Posts

new with online tf model

import React, { useEffect, useRef } from 'react'; import { View, StyleSheet, Dimensions, Image } from 'react-native'; import VLCPlayer from 'react-native-vlc-player'; import * as tf from '@tensorflow/tfjs'; import { fetch } from '@tensorflow/tfjs-react-native'; const ObjectDetection = () => {   const vlcPlayerRef = useRef(null);   const modelRef = useRef(null);   const streamingRef = useRef(false);   useEffect(() => {     const loadModel = async () => {       try {         await tf.ready();         modelRef.current = await tf.loadGraphModel(           fetch(             'https://tfhub.dev/tensorflow/ssd_mobilenet_v2/2/default/1',             { method: 'GET' }           )         );         streamingRef.current = true;       } catch ...

new vlc media player

import React, { useEffect, useRef } from 'react'; import { View, StyleSheet, Dimensions, Image } from 'react-native'; import VLCPlayer from 'react-native-vlc-player'; import * as tf from '@tensorflow/tfjs'; import '@tensorflow/tfjs-react-native'; const ObjectDetection = () => {   const vlcPlayerRef = useRef(null);   const modelRef = useRef(null);   useEffect(() => {     const loadModel = async () => {       try {         await tf.ready();         const modelPath = 'path/to/your/model.json';         modelRef.current = await tf.loadLayersModel(modelPath);       } catch (error) {         console.error('Error loading model:', error);       }     };     loadModel();   }, []);   const processFrame = async () => {     try {       const snapshot = await vlcPlayerRef.current.takeSna...

react-native live camera with opencv

link: https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_fullbody.xml  import React, { useEffect, useRef } from 'react'; import { View, StyleSheet, Dimensions } from 'react-native'; import { RNCamera } from 'react-native-camera'; import { Mat, CvType, CvSize, CvBridge, CvCascadeClassifier } from 'react-native-opencv'; const OpenCVObjectDetection = () => {   const bridgeRef = useRef(null);   const canvasRef = useRef(null);   useEffect(() => {     const runObjectDetection = async () => {       const cascadeClassifier = new CvCascadeClassifier();       await cascadeClassifier.load('path/to/haarcascade_fullbody.xml');       const processVideo = async () => {         try {           if (!bridgeRef.current) return;           const frame = await bridgeRef.current.capture(true);         ...

Cam

 import React, { useRef, useState, useEffect } from 'react'; import { StyleSheet, View } from 'react-native'; import Video from 'react-native-vlc-player'; import * as tf from '@tensorflow/tfjs'; import { cameraWithTensors } from '@tensorflow/tfjs-react-native'; const TensorCamera = cameraWithTensors(Video); const App = () => {   const [model, setModel] = useState(null);   const [objects, setObjects] = useState([]);   const cameraRef = useRef(null);   useEffect(() => {     // Load the object detection model     async function loadModel() {       const model = await tf.loadGraphModel('model.json');       setModel(model);     }     loadModel();   }, []);   const handleFrame = async (images, updatePreview, gl) => {     const image = images.next().value;     const tensor = tf.browser.fromPixels(image);     const predictions = await model.executeAsync...

Live

 import React, { Component } from 'react'; import { StyleSheet, Text, View } from 'react-native'; import Camera from 'react-native-camera'; import ObjectDetectionCamera from 'react-native-vision-camera'; export default class App extends Component {   state = {      rtspUrl: 'rtsp://your_rtsp_url'     }   handleObjectsDetected = objects => {     console.log(objects);     }   render() {     return (       <View style={styles.container}>        <ObjectDetectionCamera          style={styles.preview}          uri={this.state.rtspUrl}          onObjectDetected={this.handleObjectsDetected}        />       </View>     );   } } const styles = StyleSheet.create({   container: {     flex: 1,     flexDirection: '...

with vision camera

 import React, { useEffect, useState } from 'react'; import { View, StyleSheet } from 'react-native'; import { Camera, useCameraDevices } from 'react-native-vision-camera'; const styles = StyleSheet.create({   container: {     flex: 1,     backgroundColor: '#000',   }, }); function VideoScreen() {   const { cameraDevices, getCameraPermissions, requestCameraPermission } = useCameraDevices();   const [cameraActive, setCameraActive] = useState(false);   // Check camera permission and request if not granted   useEffect(() => {     getCameraPermissions().then((permissions) => {       if (!permissions.cameraPermission) {         requestCameraPermission();       }     });   }, []);   return (     <View style={styles.container}>       {cameraDevices.map((device) => (         <Camera     ...

detection with tenserflow

 import React, { Component } from 'react'; import { View, StyleSheet } from 'react-native'; import Video from 'react-native-video'; import * as tf from '@tensorflow/tfjs'; import '@tensorflow/tfjs-react-native'; // Constants for object detection const MODEL_URL = 'https://tfhub.dev/tensorflow/tfjs-model/ssd_mobilenet_v1/1/default/1/model.json'; const INPUT_SIZE = 224; // Input size required by the model const OBJECT_THRESHOLD = 0.5; // Minimum confidence threshold for detected objects class VideoScreen extends Component {   constructor(props) {     super(props);     this.state = {       isModelReady: false,       objects: [],     };     // Load the TensorFlow.js model     this.loadModel();   }   async loadModel() {     try {       await tf.ready();       const model = await tf.loadGraphModel(MODEL_URL);       this.setState({ isModelRe...