refact: Dependency Inversion #1

Merged
yehoshuasandler merged 1 commits from useCaseDrivenRefact into master 2021-01-17 02:38:02 -06:00
15 changed files with 235 additions and 49 deletions

View File

@ -0,0 +1,7 @@
import PredictedObject from '../Models/PredictedObject'
interface IObjectDetector {
getPredictionsFromImageData(videoImage: ImageData): Promise<PredictedObject[]>
}
export default IObjectDetector

View File

@ -0,0 +1,8 @@
import IOffset from "./IOffset"
import IPredictedObject from "./IPredictedObject"
interface IObjectLocator {
getOffsetsFromPredictions(predictedObject: IPredictedObject): IOffset
}
export default IObjectLocator

View File

@ -0,0 +1,7 @@
interface IOffset {
x: number,
y: number,
hypotenuse: number
}
export default IOffset

View File

@ -0,0 +1,8 @@
import IOffset from "./IOffset"
import IPredictedObject from "./IPredictedObject"
interface IUiRenderer {
render(props: { imageData: ImageData, predictedObjects: IPredictedObject[], offsets: IOffset[] }): void
}
export default IUiRenderer

View File

@ -0,0 +1,5 @@
interface IVideoCapturer {
imageData: ImageData | null
}
export default IVideoCapturer

View File

@ -0,0 +1,14 @@
import { DetectedObject } from "@tensorflow-models/coco-ssd"
import ObjectDetector from "../ObjectDetector"
const defaultPredictions = [
(prediction: DetectedObject) => prediction.score > 0.6,
(prediction: DetectedObject) => prediction.class === 'cat',
]
function makeObjectDetector (filterPredicates?: Function[]): ObjectDetector {
if (!filterPredicates) filterPredicates = defaultPredictions
return new ObjectDetector({ filterPredicates })
}
export default makeObjectDetector

View File

@ -0,0 +1,14 @@
import IVideo from "../../Interfaces/IVideo"
import ObjectLocator from "../ObjectLocator"
const defaultProps = {
width: 640,
height: 480
}
function makeObjectLocator (props?: IVideo): ObjectLocator {
const videoProps = props || defaultProps
return new ObjectLocator(videoProps)
}
export default makeObjectLocator

View File

@ -0,0 +1,15 @@
import { DetectedObject } from "@tensorflow-models/coco-ssd"
import IPredictedObject from "../../Interfaces/IPredictedObject"
import PredictedObject from "../../Models/PredictedObject"
function makePredictedObject (p: IPredictedObject) {
return new PredictedObject({
xOrigin: p.xOrigin,
yOrigin: p.yOrigin,
width: p.width,
height: p.height,
class: p.class
})
}
export default makePredictedObject

View File

@ -0,0 +1,7 @@
import UiRenderer from "../UiRenderer";
function makeUiRenderer (): UiRenderer {
return new UiRenderer()
}
export default makeUiRenderer

View File

@ -0,0 +1,14 @@
import IVideo from "../../Interfaces/IVideo"
import VideoCapturer from "../VideoCapturer"
const defaultProps = {
width: 640,
height: 480
}
function makeVideoCapturer (props?: IVideo): VideoCapturer {
const videoProps = props || defaultProps
return new VideoCapturer(videoProps)
}
export default makeVideoCapturer

View File

@ -1,24 +1,20 @@
import * as tf from '@tensorflow/tfjs'
import * as cocossd from '@tensorflow-models/coco-ssd'
import PredictedObject from '../Models/PredictedObject'
import IObjectDetector from '../Interfaces/IObjectDetector'
import IPredictedObject from '../Interfaces/IPredictedObject'
import makePredictedObject from './Factories/makePredictedObject'
let instance: ObjectDetector | null = null
class ObjectDetector {
class ObjectDetector implements IObjectDetector {
private mlModel: cocossd.ObjectDetection | null = null
private filterPredicates: Function[] = []
constructor (props?: { filterPredicates?: Function[] }) {
if (!instance) instance = this
if (props?.filterPredicates) this.filterPredicates = props.filterPredicates
tf.getBackend()
return instance
}
private convertDetectedToPredictedObjects = (detectedObjects: cocossd.DetectedObject[]) => {
const predictedObjects: PredictedObject[] = detectedObjects.map(p => new PredictedObject({
private convertDetectedToPredictedObjects (detectedObjects: cocossd.DetectedObject[]) {
const predictedObjects: IPredictedObject[] = detectedObjects.map(p => makePredictedObject({
xOrigin: p.bbox[0],
yOrigin: p.bbox[1],
width: p.bbox[2],
@ -39,7 +35,7 @@ class ObjectDetector {
else return true
}
public predictImageStream = async (videoImage: ImageData) => {
public async getPredictionsFromImageData (videoImage: ImageData): Promise<IPredictedObject[]> {
const mlModel = await this.loadMlModel()
const detectedObjects = await mlModel.detect(videoImage)
const filteredDetections = detectedObjects.filter(p => this.doesDetectionPassFilterPredicates(p))

View File

@ -1,20 +1,19 @@
import PredictedObject from "../Models/PredictedObject"
import Video from "../Models/Video"
import IObjectLocator from "../Interfaces/IObjectLocator"
import IOffset from "../Interfaces/IOffset"
import IPredictedObject from "../Interfaces/IPredictedObject"
import IVideo from "../Interfaces/IVideo"
interface Offset {
x: number,
y: number,
hypotenuse: number
}
class ObjectLocator implements IObjectLocator {
private videoWidth: number
private videoHeight: number
class ObjectLocator {
private video: Video
constructor (video: Video) {
this.video = video
constructor (props: IVideo) {
this.videoWidth = props.width
this.videoHeight = props.height
}
detectPredictedObjectLocationFromVideo = (predictedObject: PredictedObject): Offset => {
const videoCenter = { x: this.video.width / 2, y: this.video.height / 2 }
getOffsetsFromPredictions = (predictedObject: IPredictedObject): IOffset => {
const videoCenter = { x: this.videoWidth / 2, y: this.videoHeight / 2 }
const objectCenter = {
x: predictedObject.xOrigin + (predictedObject.width / 2),
y: predictedObject.yOrigin + (predictedObject.height / 2)

View File

@ -0,0 +1,44 @@
import IOffset from "../Interfaces/IOffset"
import IPredictedObject from "../Interfaces/IPredictedObject"
import IUiRenderer from "../Interfaces/IUiRenderer"
class UiRenderer implements IUiRenderer {
render (props: { imageData: ImageData, predictedObjects: IPredictedObject[], offsets: IOffset[] }) {
const body: HTMLBodyElement = document.querySelector('body')!
let canvasElement: HTMLCanvasElement = document.querySelector('#videoOutput') as HTMLCanvasElement
if (!canvasElement) {
canvasElement = document.createElement('canvas')
canvasElement.id = 'videoOutput'
canvasElement.width = props.imageData.width
canvasElement.height = props.imageData.height
body.append(canvasElement)
}
const canvasContext = canvasElement.getContext('2d')!
canvasContext.clearRect(0, 0, canvasElement.width, canvasElement.height)
canvasContext.putImageData(props.imageData, 0, 0)
props.predictedObjects.forEach(obj => {
canvasContext.strokeStyle = 'rgb(0, 255, 0)'
canvasContext.strokeRect(obj.xOrigin, obj.yOrigin, obj.width, obj.height)
})
const startPoint = {
x: props.imageData.width / 2,
y: props.imageData.height / 2
}
props.offsets.forEach(offset => {
canvasContext.strokeStyle = 'rgb(255, 0, 0)'
canvasContext.beginPath()
canvasContext.moveTo(startPoint.x, startPoint.y)
canvasContext.lineTo(startPoint.x - offset.x, startPoint.y - offset.y)
canvasContext.closePath()
canvasContext.stroke()
})
}
}
export default UiRenderer

View File

@ -0,0 +1,49 @@
import IVideo from '../Interfaces/IVideo'
import IVideoCapturer from '../Interfaces/IVideoCapturer'
class VideoCapturer implements IVideoCapturer {
private videoWidth: number
private videoHeight: number
private videoStream: MediaStream | null = null
constructor (props: IVideo) {
this.videoWidth = props.width
this.videoHeight = props.height
this.enableCamera()
}
private enableCamera = async () => {
const webCameraStream = await navigator.mediaDevices.getUserMedia({ video: true })
this.videoStream = webCameraStream
}
get imageData () {
if (!this.videoStream) return null
let videoElement: HTMLVideoElement = document.querySelector('#videoView') as HTMLVideoElement
if (!videoElement) {
videoElement = document.createElement('video')
videoElement.width = this.videoWidth
videoElement.height = this.videoHeight
videoElement.autoplay = true
videoElement.srcObject = this.videoStream
videoElement.id = 'videoView'
videoElement.style.display = 'none'
const body = document.querySelector('body')!
body.appendChild(videoElement)
}
const canvasElement: HTMLCanvasElement = document.createElement('canvas')
canvasElement.width = this.videoWidth
canvasElement.height = this.videoHeight
const canvasContext = canvasElement.getContext('2d')!
canvasContext.drawImage(videoElement, 0, 0, this.videoWidth, this.videoHeight)
return canvasContext.getImageData(0, 0, this.videoWidth, this.videoHeight)
}
}
export default VideoCapturer

View File

@ -1,43 +1,42 @@
import { DetectedObject } from "@tensorflow-models/coco-ssd"
import PredictedObjectCollectionController from "./Controllers/PredictedObjectCollectionController"
import VideoController from './Controllers/VideoController'
import ObjectDetector from './UseCases/ObjectDetector'
import ObjectLocator from "./UseCases/ObjectLocator"
import IObjectDetector from './Interfaces/IObjectDetector'
import IObjectLocator from './Interfaces/IObjectLocator'
import IOffset from './Interfaces/IOffset'
import IUiRenderer from './Interfaces/IUiRenderer'
import IVideoCapturer from "./Interfaces/IVideoCapturer"
const defaultPredictions = [
(prediction: DetectedObject) => prediction.score > 0.6,
(prediction: DetectedObject) => prediction.class === 'person', // TODO: change to cat
]
import makeObjectDetector from './UseCases/Factories/makeObjectDetector'
import makeObjectLocator from './UseCases/Factories/makeObjectLocator'
import makeUiRenderer from './UseCases/Factories/makeUiRenderer'
import makeVideoCapturer from './UseCases/Factories/makeVideoCatpurer'
class App {
private predictedObjectCollectionController: PredictedObjectCollectionController
private videoController: VideoController
private objectDetector: ObjectDetector
private objectLocator: ObjectLocator
private objectDetector: IObjectDetector
private objectLocator: IObjectLocator
private videoCapturer: IVideoCapturer
private uiRenderer: IUiRenderer
constructor () {
this.objectDetector = new ObjectDetector({ filterPredicates: defaultPredictions })
this.predictedObjectCollectionController = new PredictedObjectCollectionController()
this.videoController = new VideoController({ width: 640, height: 480 })
this.objectLocator = new ObjectLocator(this.videoController.model)
this.videoCapturer = makeVideoCapturer()
this.objectDetector = makeObjectDetector()
this.objectLocator = makeObjectLocator()
this.uiRenderer = makeUiRenderer()
const eventTarget = new EventTarget()
eventTarget.addEventListener('onMediaStreamReady', this.predictImage)
this.predictImage()
}
predictImage = async () => {
const imageData = this.videoController.imageData
const imageData = this.videoCapturer.imageData
if (!imageData) {
window.requestAnimationFrame(this.predictImage)
return
}
const predictedObjects = await this.objectDetector.predictImageStream(imageData)
this.predictedObjectCollectionController.predictedObjects = predictedObjects
const offsets = predictedObjects.map(obj => {
return this.objectLocator.detectPredictedObjectLocationFromVideo(obj)
})
console.log(offsets)
const predictedObjects = await this.objectDetector.getPredictionsFromImageData(imageData)
const offsets: IOffset[] = predictedObjects.map(obj => this.objectLocator.getOffsetsFromPredictions(obj))
this.uiRenderer.render({ imageData, predictedObjects, offsets })
window.requestAnimationFrame(this.predictImage)
}