Posts

Showing posts from June, 2023

live camera web app flsak

index.html <! DOCTYPE html > < html lang = "en" > < head >     < meta charset = "UTF-8" >     < meta http-equiv = "X-UA-Compatible" content = "IE=edge" >     < meta name = "viewport" content = "width=device-width, initial-scale=1.0" >     < title > Camera Stream App </ title > </ head > < body >     < h1 > Camera Stream App </ h1 >     < form action = "{{ url_for('add_camera') }}" method = "post" >         < label for = "rtsp_link" > RTSP Link: </ label >         < input type = "text" id = "rtsp_link" name = "rtsp_link" required >         < button type = "submit" > Add Camera </ button >     </ form >     < div >         {% for camera_id, camera_info in camera_streams.items() %}             < div >              

last gpt4 live camera

 import React, { useEffect, useRef, useState } from 'react'; import { View, StyleSheet, Dimensions, Image } from 'react-native'; import VLCPlayer from 'react-native-vlc-player'; import * as tf from '@tensorflow/tfjs'; import { fetch } from '@tensorflow/tfjs-react-native'; import { decodeJpeg } from '@tensorflow/tfjs-react-native'; const ObjectDetection = () => {   const vlcPlayerRef = useRef(null);   const modelRef = useRef(null);   const streamingRef = useRef(false);   const [overlayImage, setOverlayImage] = useState(null);   useEffect(() => {     const loadModel = async () => {       try {         await tf.ready();         modelRef.current = await tf.loadGraphModel(           fetch(             'https://tfhub.dev/tensorflow/ssd_mobilenet_v2/2/default/1',             { method: 'GET' }           )         );         streamingRef.current = true;       } catch (error) {         console.error('Error loading model

new with online tf model

import React, { useEffect, useRef } from 'react'; import { View, StyleSheet, Dimensions, Image } from 'react-native'; import VLCPlayer from 'react-native-vlc-player'; import * as tf from '@tensorflow/tfjs'; import { fetch } from '@tensorflow/tfjs-react-native'; const ObjectDetection = () => {   const vlcPlayerRef = useRef(null);   const modelRef = useRef(null);   const streamingRef = useRef(false);   useEffect(() => {     const loadModel = async () => {       try {         await tf.ready();         modelRef.current = await tf.loadGraphModel(           fetch(             'https://tfhub.dev/tensorflow/ssd_mobilenet_v2/2/default/1',             { method: 'GET' }           )         );         streamingRef.current = true;       } catch (error) {         console.error('Error loading model:', error);       }     };     loadModel();   }, []);   const processFrame = async () => {     if (!streamingRef.current) {      

new vlc media player

import React, { useEffect, useRef } from 'react'; import { View, StyleSheet, Dimensions, Image } from 'react-native'; import VLCPlayer from 'react-native-vlc-player'; import * as tf from '@tensorflow/tfjs'; import '@tensorflow/tfjs-react-native'; const ObjectDetection = () => {   const vlcPlayerRef = useRef(null);   const modelRef = useRef(null);   useEffect(() => {     const loadModel = async () => {       try {         await tf.ready();         const modelPath = 'path/to/your/model.json';         modelRef.current = await tf.loadLayersModel(modelPath);       } catch (error) {         console.error('Error loading model:', error);       }     };     loadModel();   }, []);   const processFrame = async () => {     try {       const snapshot = await vlcPlayerRef.current.takeSnapshot(true);       const imageTensor = tf.browser.fromPixels({ uri: `data:image/jpeg;base64,${snapshot}` });              // Preprocess the image fo

react-native live camera with opencv

link: https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_fullbody.xml  import React, { useEffect, useRef } from 'react'; import { View, StyleSheet, Dimensions } from 'react-native'; import { RNCamera } from 'react-native-camera'; import { Mat, CvType, CvSize, CvBridge, CvCascadeClassifier } from 'react-native-opencv'; const OpenCVObjectDetection = () => {   const bridgeRef = useRef(null);   const canvasRef = useRef(null);   useEffect(() => {     const runObjectDetection = async () => {       const cascadeClassifier = new CvCascadeClassifier();       await cascadeClassifier.load('path/to/haarcascade_fullbody.xml');       const processVideo = async () => {         try {           if (!bridgeRef.current) return;           const frame = await bridgeRef.current.capture(true);           const gray = new Mat();           const objects = new Mat();           frame.cvtColor(CvType.CV_RGBA2GRAY, gray);           cascadeC