last gpt4 live camera
import React, { useEffect, useRef, useState } from 'react';
import { View, StyleSheet, Dimensions, Image } from 'react-native';
import VLCPlayer from 'react-native-vlc-player';
import * as tf from '@tensorflow/tfjs';
import { fetch } from '@tensorflow/tfjs-react-native';
import { decodeJpeg } from '@tensorflow/tfjs-react-native';
const ObjectDetection = () => {
const vlcPlayerRef = useRef(null);
const modelRef = useRef(null);
const streamingRef = useRef(false);
const [overlayImage, setOverlayImage] = useState(null);
useEffect(() => {
const loadModel = async () => {
try {
await tf.ready();
modelRef.current = await tf.loadGraphModel(
fetch(
'https://tfhub.dev/tensorflow/ssd_mobilenet_v2/2/default/1',
{ method: 'GET' }
)
);
streamingRef.current = true;
} catch (error) {
console.error('Error loading model:', error);
}
};
loadModel();
}, []);
const processFrame = async () => {
if (!streamingRef.current) {
return;
}
try {
const snapshot = await vlcPlayerRef.current.takeSnapshot(true);
const imageTensor = await decodeJpeg(new Uint8Array(snapshot));
const preprocessedTensor = imageTensor.resizeBilinear([300, 300]).expandDims();
const predictions = await modelRef.current.executeAsync(preprocessedTensor);
const processedImage = processPredictions(predictions, imageTensor);
setOverlayImage(processedImage);
imageTensor.dispose();
preprocessedTensor.dispose();
tf.dispose(predictions);
} catch (error) {
console.error('Error processing frame:', error);
}
requestAnimationFrame(processFrame);
};
const processPredictions = (predictions, imageTensor) => {
// Process the predictions and draw bounding boxes on the image
// You can filter predictions based on a certain threshold or other criteria
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
canvas.width = imageTensor.shape[1];
canvas.height = imageTensor.shape[0];
// Draw the original image on the canvas
const imageData = new ImageData(new Uint8ClampedArray(imageTensor.dataSync()), canvas.width, canvas.height);
ctx.putImageData(imageData, 0, 0);
// Draw bounding boxes for detected objects
const boxes = predictions[0].dataSync();
const scores = predictions[1].dataSync();
const classes = predictions[2].dataSync();
for (let i = 0; i < scores.length; i++) {
if (scores[i] > 0.5) {
const [y1, x1, y2, x2] = boxes.slice(i * 4, i * 4 + 4);
ctx.strokeStyle = 'red';
ctx.lineWidth = 2;
ctx.strokeRect(x1 * canvas.width, y1 * canvas.height, (x2 - x1) * canvas.width, (y2 - y1) * canvas.height);
}
}
// Convert the canvas to a base64-encoded image
const processedImage = canvas.toDataURL('image/jpeg');
return processedImage;
};
useEffect(() => {
processFrame();
return () => {
streamingRef.current = false;
};
}, []);
return (
<View style={styles.container}>
<VLCPlayer
ref={vlcPlayerRef}
style={styles.videoPlayer}
videoAspectRatio="16:9"
source={{
uri: 'rtsp://admin:admin@192.168.184.64:1935',
autoplay: true,
initOptions: ['--rtsp-tcp'],
}}
/>
<View style={styles.overlay}>
<Image style={styles.processedImage} source={{ uri: overlayImage }} />
</View>
</View>
);
};
const styles = StyleSheet.create({
container: {
flex: 1,
},
videoPlayer: {
flex: 1,
width: Dimensions.get('window').width,
height: Dimensions.get('window').height,
},
overlay: {
position: 'absolute',
top: 0,
left: 0,
width: Dimensions.get('window').width,
height: Dimensions.get('window').height,
justifyContent: 'center',
alignItems: 'center',
},
processedImage: {
width: Dimensions.get('window').width,
height: Dimensions.get('window').height,
resizeMode: 'cover',
},
});
export default ObjectDetection;
Comments
Post a Comment