(First of all, I am not a learner in this field, and I do not understand this neural network learning. I write this article and do this demo just for fun. React/react/react/react/react

I’m going to show you the renderings

Personally, I think the effect is ok. The recognition is not accurate because the app takes too little time to learn the images (the computer is too slow).


(I am Windows 10) Install the operating environment:

  1. npm install --global windows-build-tools(That’s a long time…)
  2. npm install @tensorflow/tfjs-node(That’s a long time…)

The project catalog is as follows

Train folder index.js (entry file)

const tf = require('@tensorflow/tfjs-node')
const getData = require('./data')

const TRAIN_DIR = '.. / garbage sorting /train'
const OUTPUT_DIR = '.. /outputDir'
const MOBILENET_URL = 'http://ai-sample.oss-cn-hangzhou.aliyuncs.com/pipcook/models/mobilenet/web_model/model.json'

const main = async() = > {// Load data
  const { ds, classes} = await getData(TRAIN_DIR, OUTPUT_DIR)
  // Define the model
  const mobilenet = await tf.loadLayersModel(MOBILENET_URL)
  mobilenet.summary()
  // console.log(mobilenet.layers.map((l, i) => [l.name, i]))
  const model = tf.sequential()
  for (let i = 0; i <= 86; i += 1) {
    const layer = mobilenet.layers[i]
    layer.trainable = false
    model.add(layer)
  }
  model.add(tf.layers.flatten())
  model.add(tf.layers.dense({
    units: 10.activation: 'relu'
  }))
  model.add(tf.layers.dense({
    units: classes.length,
    activation: 'softmax'
  }))
  // The training model
  model.compile({
    loss: 'sparseCategoricalCrossentropy'.optimizer: tf.train.adam(),
    metrics: ['acc']})await model.fitDataset(ds, { epochs: 20 })
  await model.save(`file://${process.cwd()}/${OUTPUT_DIR}`)
}
main()
Copy the code

Data.js (processing data)

const fs = require('fs')
const tf = require('@tensorflow/tfjs-node')

const img2x = (imgPath) = > {
  const buffer = fs.readFileSync(imgPath)
  return tf.tidy(() = > {
    const imgTs = tf.node.decodeImage(new Uint8Array(buffer))
    const imgTsResized = tf.image.resizeBilinear(imgTs, [224.224])
    return imgTsResized.toFloat().sub(255/2).div(255/2).reshape([1.224.224.3])})}const getData = async (trainDir, outputDir) => {
  const classes = fs.readdirSync(trainDir)
  fs.writeFileSync(`${outputDir}/classes.json`.JSON.stringify(classes))

  const data = []
  classes.forEach((dir, dirIndex) = > {
    fs.readdirSync(`${trainDir}/${dir}`)
      .filter(n= > n.match(/jpg$/))
      .slice(0.10)
      .forEach(filename= > {
        console.log('read', dir, filename)
        const imgPath = `${trainDir}/${dir}/${filename}`
        data.push({ imgPath, dirIndex })
      })
  })

  tf.util.shuffle(data)

  const ds = tf.data.generator(function* () {
    const count = data.length
    const batchSize = 32
    for (let start = 0; start < count; start += batchSize) {
      const end = Math.min(start + batchSize, count)
      yield tf.tidy(() = > {
        const inputs = []
        const labels = []
        for (let j = start; j < end; j += 1) {
          const { imgPath, dirIndex } = data[j]
          const x = img2x(imgPath)
          inputs.push(x)
          labels.push(dirIndex)
        }
        const xs = tf.concat(inputs)
        const ys = tf.tensor(labels)
        return { xs, ys }
      })
    }
  })

  return {
    ds,
    classes
  }
}

module.exports = getData
Copy the code

Install some plug-ins needed to run the project

The app folder

import React, { PureComponent } from 'react'
import { Button, Progress, Spin, Empty } from 'antd'
import 'antd/dist/antd.css'
import * as tf from '@tensorflow/tfjs'
import { file2img, img2x } from './utils'
import intro from './intro'

const DATA_URL = 'http://127.0.0.1:8080/'
class App extends PureComponent {
  state = {}
  async componentDidMount() {
    this.model = await tf.loadLayersModel(DATA_URL + '/model.json')
    // this.model.summary()
    this.CLASSES = await fetch(DATA_URL + '/classes.json').then(res= > res.json())
  }
  predict = async (file) => {
    const img = await file2img(file)

    this.setState({
      imgSrc: img.src,
      isLoading: true
    })
    setTimeout(() = > {
      const pred = tf.tidy(() = > {
        const x = img2x(img)
        return this.model.predict(x)
      })

      const results = pred.arraySync()[0]
        .map((score, i) = > ({score, label: this.CLASSES[i]}))
        .sort((a, b) = > b.score - a.score)
      this.setState({
        results,
        isLoading: false})},0)
  }

  renderResult = (item) = > {
    const finalScore = Math.round(item.score * 100)
    return (
      <tr key={item.label}>
        <td style={{ width: 80.padding: '5px 0' }}>{item.label}</td>
        <td>
          <Progress percent={finalScore} status={finalScore= = =100 ? 'success' : 'normal'} / >
        </td>
      </tr>)}render() {
    const { imgSrc, results, isLoading } = this.state
    constfinalItem = results && {... results[0], ...intro[results[0].label]}

    return (
      <div style={{padding: 20}} >
        <span
          style={{ color: '#cccccc', textAlign: 'center', fontSize: 12.display: 'block'}} >Identification may be inaccurate</span>
        <Button
          type="primary"
          size="large"
          style={{width: '100% '}}onClick={()= >This.upload.click ()} > Select image recognition</Button>
        <input
          type="file"
          onChange={e= >this.predict(e.target.files[0])} ref={el => {this.upload = el}} style={{ display: 'none' }} /> { ! results && ! imgSrc &&<Empty style={{ marginTop: 40}} / >
        }
        {imgSrc && <div style={{ marginTop: 20.textAlign: 'center' }}>
          <img src={imgSrc} style={{ maxWidth: '100% '}} / >
        </div>}
        {finalItem && <div style={{marginTop: 20}} >Recognition results:</div>}
        {finalItem && <div style={{display: 'flex', alignItems: 'flex-start', marginTop: 20}} >
          <img
            src={finalItem.icon}
            width={120}
          />
          <div>
            <h2 style={{color: finalItem.color}} >
              {finalItem.label}
            </h2>
            <div style={{color: finalItem.color}} >
              {finalItem.intro}
            </div>
          </div>
        </div>}
        {
          isLoading && <Spin size="large" style={{display: 'flex', justifyContent: 'center', alignItems: 'center', marginTop: 40}} / >
        }
        {results && <div style={{ marginTop: 20}} >
          <table style={{width: '100'}} % >
            <tbody>
              <tr>
                <td>category</td>
                <td>compatibility</td>
              </tr>
              {results.map(this.renderResult)}
            </tbody>
          </table>
        </div>}
      </div>)}}export default App
Copy the code

index.html

<! DOCTYPE html><html>
  <head>
    <title>Garbage classification</title>
    <meta name="viewport" content="width=device-width, inital-scale=1">
  </head>
  <body>
    <div id="app"></div>
    <script src="./index.js"></script>
  </body>
</html>
Copy the code

index.js

import React from 'react'
import ReactDOM from 'react-dom'
import App from './App'

ReactDOM.render(<App />.document.querySelector('#app'))
Copy the code

intro.js

export default {
  'Recyclables': {
    icon: 'https://lajifenleiapp.com/static/svg/1_3F6BA8.svg'.color: '#3f6ba8'.intro: 'refers to articles produced in daily life or in activities providing services for daily life, which have lost all or part of their original use value and can be recycled as raw materials or reused after reprocessing, including waste paper, plastic, glass, metal and fabric. '
  },
  'Hazardous waste': {
    icon: 'https://lajifenleiapp.com/static/svg/2v_B43953.svg'.color: '#b43953'.intro: 'refers to the substances in household garbage that cause direct or potential harm to human health or natural environment, including waste rechargeable batteries, waste button batteries, waste lamps, discarded drugs, waste pesticides (containers), waste paint (containers), waste daily chemicals, waste silver products, waste electrical appliances and electronic products, etc. '
  },
  'Kitchen waste': {
    icon: 'https://lajifenleiapp.com/static/svg/3v_48925B.svg'.color: '#48925b'.intro: 'refers to the organic perishable garbage generated in the daily life of residents, including vegetable leaves, leftovers, leftovers, fruit peels, eggshells, tea leaves, bones and so on. '
  },
  'Other Rubbish': {
    icon: 'https://lajifenleiapp.com/static/svg/4_89918B.svg'.color: '#89918b'.intro: 'refers to other household wastes that are mixed, polluted and difficult to classify, except recyclables, hazardous wastes and kitchen waste. '}}Copy the code

utils.js

import * as tf from '@tensorflow/tfjs'

export const file2img = async (f) => {
  return new Promise(reslove= > {
    const reader = new FileReader()
    reader.readAsDataURL(f)
    reader.onload = (e) = > {
      const img = document.createElement('img')
      img.src = e.target.result
      img.width = 224
      img.height = 224
      img.onload = () = > { reslove(img) }
    }
  })
}

export function img2x(imgEl) {
  return tf.tidy(() = > {
    return tf.browser.fromPixels(imgEl)
        .toFloat().sub(255/2).div(255/2)
        .reshape([1.224.224.3])})}Copy the code

Before running the project code, we need to run node index.js under train to generate model.json for the identification system. NPM start can be run only after hs outputDir –cors is run in the root directory to make the generated model.json run in the HTTP environment, otherwise the project will report an error.

The main code is this. The previous author also said. I don’t know anything about it, so I can’t explain the code. If you’re interested, do your own research. Code address attached.

Gitee.com/suiboyu/gar…