JavaScript AI/ML

JavaScript Machine Learning: TensorFlow.js, Neural Networks, and AI in the Browser

Build machine learning applications with JavaScript. Learn TensorFlow.js, neural networks, computer vision, and AI model deployment in browsers.

By JavaScript Document Team
machine-learningtensorflow-jsneural-networksaicomputer-visionml-models

Machine Learning with JavaScript enables building AI applications that run directly in browsers and Node.js environments. TensorFlow.js brings powerful ML capabilities to JavaScript, allowing for model training, inference, and deployment without server dependencies. This comprehensive guide covers neural networks, computer vision, and practical ML implementations.

TensorFlow.js Fundamentals

Setup and Basic Operations

// TensorFlow.js Manager for ML Operations
class TensorFlowManager {
  constructor() {
    this.tf = null;
    this.models = new Map();
    this.datasets = new Map();
    this.isInitialized = false;
  }

  async initialize() {
    if (typeof tf === 'undefined') {
      // Load TensorFlow.js
      await this.loadTensorFlow();
    }

    this.tf = tf;

    // Set backend (webgl, cpu, webgpu)
    await tf.ready();
    console.log('TensorFlow.js backend:', tf.getBackend());

    // Configure memory management
    tf.ENV.set('WEBGL_PACK', true);
    tf.ENV.set('WEBGL_CONV_IM2COL', true);

    this.isInitialized = true;
    return this;
  }

  async loadTensorFlow() {
    return new Promise((resolve, reject) => {
      const script = document.createElement('script');
      script.src =
        'https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@latest/dist/tf.min.js';
      script.onload = resolve;
      script.onerror = reject;
      document.head.appendChild(script);
    });
  }

  // Tensor operations
  createTensor(data, shape = null, dtype = 'float32') {
    if (!this.isInitialized) throw new Error('TensorFlow not initialized');

    if (shape) {
      return tf.tensor(data, shape, dtype);
    }
    return tf.tensor(data, undefined, dtype);
  }

  // Matrix operations
  matrixMultiply(a, b) {
    return tf.matMul(a, b);
  }

  // Element-wise operations
  add(a, b) {
    return tf.add(a, b);
  }

  subtract(a, b) {
    return tf.sub(a, b);
  }

  multiply(a, b) {
    return tf.mul(a, b);
  }

  divide(a, b) {
    return tf.div(a, b);
  }

  // Activation functions
  relu(x) {
    return tf.relu(x);
  }

  sigmoid(x) {
    return tf.sigmoid(x);
  }

  softmax(x) {
    return tf.softmax(x);
  }

  tanh(x) {
    return tf.tanh(x);
  }

  // Utility functions
  reshape(tensor, newShape) {
    return tf.reshape(tensor, newShape);
  }

  transpose(tensor) {
    return tf.transpose(tensor);
  }

  sum(tensor, axis = null) {
    return tf.sum(tensor, axis);
  }

  mean(tensor, axis = null) {
    return tf.mean(tensor, axis);
  }

  max(tensor, axis = null) {
    return tf.max(tensor, axis);
  }

  min(tensor, axis = null) {
    return tf.min(tensor, axis);
  }

  // Data preprocessing
  normalize(tensor, min = 0, max = 1) {
    const minVal = tf.min(tensor);
    const maxVal = tf.max(tensor);
    const range = tf.sub(maxVal, minVal);

    return tf.div(tf.mul(tf.sub(tensor, minVal), max - min), range).add(min);
  }

  standardize(tensor) {
    const mean = tf.mean(tensor);
    const std = tf.sqrt(tf.mean(tf.square(tf.sub(tensor, mean))));
    return tf.div(tf.sub(tensor, mean), std);
  }

  oneHotEncode(labels, numClasses) {
    return tf.oneHot(labels, numClasses);
  }

  // Memory management
  dispose(...tensors) {
    tensors.forEach((tensor) => {
      if (tensor && typeof tensor.dispose === 'function') {
        tensor.dispose();
      }
    });
  }

  getMemoryInfo() {
    return tf.memory();
  }

  cleanupMemory() {
    // Force garbage collection
    if (tf.ENV.get('IS_BROWSER')) {
      tf.disposeVariables();
    }
  }
}

// Neural Network Builder
class NeuralNetworkBuilder {
  constructor(tfManager) {
    this.tf = tfManager.tf;
    this.tfManager = tfManager;
    this.model = null;
    this.layers = [];
    this.isCompiled = false;
  }

  // Sequential model builder
  createSequential() {
    this.model = tf.sequential();
    this.layers = [];
    return this;
  }

  // Layer types
  addDense(units, options = {}) {
    const config = {
      units,
      activation: options.activation || 'linear',
      useBias: options.useBias !== false,
      kernelInitializer: options.kernelInitializer || 'glorotUniform',
      biasInitializer: options.biasInitializer || 'zeros',
      kernelRegularizer: options.kernelRegularizer,
      biasRegularizer: options.biasRegularizer,
      activityRegularizer: options.activityRegularizer,
      kernelConstraint: options.kernelConstraint,
      biasConstraint: options.biasConstraint,
      ...options,
    };

    const layer = tf.layers.dense(config);
    this.model.add(layer);
    this.layers.push({ type: 'dense', config });
    return this;
  }

  addConv2D(filters, kernelSize, options = {}) {
    const config = {
      filters,
      kernelSize,
      strides: options.strides || [1, 1],
      padding: options.padding || 'valid',
      activation: options.activation || 'linear',
      useBias: options.useBias !== false,
      kernelInitializer: options.kernelInitializer || 'glorotUniform',
      biasInitializer: options.biasInitializer || 'zeros',
      ...options,
    };

    const layer = tf.layers.conv2d(config);
    this.model.add(layer);
    this.layers.push({ type: 'conv2d', config });
    return this;
  }

  addMaxPooling2D(poolSize = [2, 2], options = {}) {
    const config = {
      poolSize,
      strides: options.strides,
      padding: options.padding || 'valid',
      ...options,
    };

    const layer = tf.layers.maxPooling2d(config);
    this.model.add(layer);
    this.layers.push({ type: 'maxPooling2d', config });
    return this;
  }

  addFlatten() {
    const layer = tf.layers.flatten();
    this.model.add(layer);
    this.layers.push({ type: 'flatten', config: {} });
    return this;
  }

  addDropout(rate) {
    const layer = tf.layers.dropout({ rate });
    this.model.add(layer);
    this.layers.push({ type: 'dropout', config: { rate } });
    return this;
  }

  addBatchNormalization(options = {}) {
    const config = {
      axis: options.axis || -1,
      momentum: options.momentum || 0.99,
      epsilon: options.epsilon || 0.001,
      center: options.center !== false,
      scale: options.scale !== false,
      ...options,
    };

    const layer = tf.layers.batchNormalization(config);
    this.model.add(layer);
    this.layers.push({ type: 'batchNormalization', config });
    return this;
  }

  addLSTM(units, options = {}) {
    const config = {
      units,
      activation: options.activation || 'tanh',
      recurrentActivation: options.recurrentActivation || 'sigmoid',
      useBias: options.useBias !== false,
      returnSequences: options.returnSequences || false,
      returnState: options.returnState || false,
      goBackwards: options.goBackwards || false,
      stateful: options.stateful || false,
      dropout: options.dropout || 0,
      recurrentDropout: options.recurrentDropout || 0,
      ...options,
    };

    const layer = tf.layers.lstm(config);
    this.model.add(layer);
    this.layers.push({ type: 'lstm', config });
    return this;
  }

  // Compile model
  compile(options = {}) {
    if (!this.model) {
      throw new Error('Model not created. Call createSequential() first.');
    }

    const config = {
      optimizer: options.optimizer || 'adam',
      loss: options.loss || 'meanSquaredError',
      metrics: options.metrics || ['accuracy'],
      ...options,
    };

    this.model.compile(config);
    this.isCompiled = true;
    return this;
  }

  // Model summary
  summary() {
    if (!this.model) return null;
    this.model.summary();
    return this.getModelInfo();
  }

  getModelInfo() {
    if (!this.model) return null;

    return {
      layers: this.layers.length,
      parameters: this.model.countParams(),
      trainable: this.model.trainableWeights.length,
      nonTrainable: this.model.nonTrainableWeights.length,
      memoryUsage: this.estimateMemoryUsage(),
    };
  }

  estimateMemoryUsage() {
    // Rough estimation in bytes
    const params = this.model.countParams();
    return params * 4; // 4 bytes per float32
  }

  // Get the compiled model
  getModel() {
    return this.model;
  }
}

// Training Manager
class ModelTrainer {
  constructor(tfManager) {
    this.tf = tfManager.tf;
    this.tfManager = tfManager;
    this.model = null;
    this.trainingHistory = [];
    this.isTraining = false;
    this.callbacks = [];
  }

  setModel(model) {
    this.model = model;
    return this;
  }

  // Data preparation
  prepareData(features, labels, options = {}) {
    const { validationSplit = 0.2, shuffle = true, batchSize = 32 } = options;

    let featureTensor = Array.isArray(features)
      ? tf.tensor(features)
      : features;
    let labelTensor = Array.isArray(labels) ? tf.tensor(labels) : labels;

    // Shuffle data
    if (shuffle) {
      const indices = tf.util.createShuffledIndices(featureTensor.shape[0]);
      featureTensor = tf.gather(featureTensor, indices);
      labelTensor = tf.gather(labelTensor, indices);
    }

    // Split into training and validation
    const numSamples = featureTensor.shape[0];
    const numValidation = Math.floor(numSamples * validationSplit);
    const numTraining = numSamples - numValidation;

    const trainFeatures = featureTensor.slice([0], [numTraining]);
    const trainLabels = labelTensor.slice([0], [numTraining]);
    const valFeatures = featureTensor.slice([numTraining], [numValidation]);
    const valLabels = labelTensor.slice([numTraining], [numValidation]);

    return {
      trainFeatures,
      trainLabels,
      valFeatures,
      valLabels,
      batchSize,
    };
  }

  // Training callbacks
  addCallback(callback) {
    this.callbacks.push(callback);
    return this;
  }

  createEarlyStopping(options = {}) {
    return tf.callbacks.earlyStopping({
      monitor: options.monitor || 'val_loss',
      minDelta: options.minDelta || 0.001,
      patience: options.patience || 10,
      verbose: options.verbose || 1,
      mode: options.mode || 'min',
      baseline: options.baseline,
      restoreBestWeights: options.restoreBestWeights !== false,
    });
  }

  createReduceLROnPlateau(options = {}) {
    return tf.callbacks.reduceLROnPlateau({
      monitor: options.monitor || 'val_loss',
      factor: options.factor || 0.1,
      patience: options.patience || 10,
      verbose: options.verbose || 1,
      mode: options.mode || 'min',
      minDelta: options.minDelta || 0.0001,
      cooldown: options.cooldown || 0,
      minLr: options.minLr || 0,
    });
  }

  // Custom training loop
  async train(trainData, options = {}) {
    if (!this.model) {
      throw new Error('Model not set. Call setModel() first.');
    }

    const {
      epochs = 100,
      batchSize = 32,
      validationData = null,
      verbose = 1,
      callbacks = [],
    } = options;

    this.isTraining = true;
    const allCallbacks = [...this.callbacks, ...callbacks];

    try {
      const history = await this.model.fit(
        trainData.trainFeatures,
        trainData.trainLabels,
        {
          epochs,
          batchSize: trainData.batchSize || batchSize,
          validationData: validationData || [
            trainData.valFeatures,
            trainData.valLabels,
          ],
          verbose,
          callbacks: allCallbacks,
        }
      );

      this.trainingHistory.push(history);
      return history;
    } catch (error) {
      console.error('Training error:', error);
      throw error;
    } finally {
      this.isTraining = false;
    }
  }

  // Advanced training with custom loop
  async trainCustomLoop(dataset, options = {}) {
    const { epochs = 100, learningRate = 0.01, optimizer = null } = options;

    const opt = optimizer || tf.train.adam(learningRate);

    for (let epoch = 0; epoch < epochs; epoch++) {
      let totalLoss = 0;
      let batches = 0;

      await dataset.forEachAsync((batch) => {
        const { xs, ys } = batch;

        const f = () => this.model.fit(xs, ys, { epochs: 1 });
        const { value: loss } = opt.minimize(f);

        totalLoss += loss.dataSync()[0];
        batches++;

        loss.dispose();
        xs.dispose();
        ys.dispose();
      });

      const avgLoss = totalLoss / batches;
      console.log(`Epoch ${epoch + 1}/${epochs}, Loss: ${avgLoss.toFixed(4)}`);
    }
  }

  // Evaluation
  async evaluate(testFeatures, testLabels, options = {}) {
    if (!this.model) {
      throw new Error('Model not set');
    }

    const results = await this.model.evaluate(
      testFeatures,
      testLabels,
      options
    );

    if (Array.isArray(results)) {
      return results.map((tensor) => tensor.dataSync()[0]);
    }

    return results.dataSync()[0];
  }

  // Prediction
  predict(inputs) {
    if (!this.model) {
      throw new Error('Model not set');
    }

    return this.model.predict(inputs);
  }

  // Model persistence
  async saveModel(savePath) {
    if (!this.model) {
      throw new Error('Model not set');
    }

    await this.model.save(savePath);
  }

  async loadModel(loadPath) {
    this.model = await tf.loadLayersModel(loadPath);
    return this.model;
  }

  getTrainingHistory() {
    return this.trainingHistory;
  }
}

// Usage example with a simple neural network
async function createSimpleClassifier() {
  // Initialize TensorFlow.js
  const tfManager = new TensorFlowManager();
  await tfManager.initialize();

  // Create model
  const builder = new NeuralNetworkBuilder(tfManager);
  builder
    .createSequential()
    .addDense(10, { inputShape: [4], activation: 'relu' })
    .addDense(8, { activation: 'relu' })
    .addDropout(0.2)
    .addDense(3, { activation: 'softmax' })
    .compile({
      optimizer: 'adam',
      loss: 'categoricalCrossentropy',
      metrics: ['accuracy'],
    });

  // Show model summary
  console.log(builder.summary());

  // Generate sample data (Iris-like dataset)
  const features = [];
  const labels = [];

  for (let i = 0; i < 150; i++) {
    const classId = Math.floor(i / 50);
    const baseFeature = [
      classId * 2,
      classId * 1.5,
      classId * 0.5,
      classId * 0.3,
    ];

    // Add noise
    const noisyFeature = baseFeature.map(
      (val) => val + (Math.random() - 0.5) * 0.5
    );
    features.push(noisyFeature);

    // One-hot encode labels
    const label = [0, 0, 0];
    label[classId] = 1;
    labels.push(label);
  }

  // Train model
  const trainer = new ModelTrainer(tfManager);
  trainer.setModel(builder.getModel());

  const trainData = trainer.prepareData(features, labels, {
    validationSplit: 0.2,
    shuffle: true,
  });

  const history = await trainer.train(trainData, {
    epochs: 50,
    batchSize: 16,
    verbose: 1,
  });

  console.log('Training completed:', history);

  // Make predictions
  const testInput = tfManager.createTensor([[5.1, 3.5, 1.4, 0.2]]);
  const prediction = trainer.predict(testInput);
  const predictionData = await prediction.data();

  console.log('Prediction:', predictionData);

  // Cleanup
  testInput.dispose();
  prediction.dispose();
  trainData.trainFeatures.dispose();
  trainData.trainLabels.dispose();
  trainData.valFeatures.dispose();
  trainData.valLabels.dispose();

  return { tfManager, builder, trainer };
}

// Run the example
createSimpleClassifier()
  .then((result) => {
    console.log('Simple classifier created successfully');
  })
  .catch((error) => {
    console.error('Error creating classifier:', error);
  });

Computer Vision with TensorFlow.js

Image Processing and CNNs

// Computer Vision Manager
class ComputerVisionManager {
  constructor(tfManager) {
    this.tf = tfManager.tf;
    this.tfManager = tfManager;
    this.pretrainedModels = new Map();
    this.customModels = new Map();
  }

  // Image preprocessing
  async preprocessImage(imageElement, targetSize = [224, 224]) {
    // Convert image to tensor
    let tensor = tf.browser.fromPixels(imageElement);

    // Resize image
    tensor = tf.image.resizeBilinear(tensor, targetSize);

    // Normalize pixel values to [0, 1]
    tensor = tf.div(tensor, 255.0);

    // Add batch dimension
    tensor = tf.expandDims(tensor, 0);

    return tensor;
  }

  // Data augmentation
  augmentImage(imageTensor, options = {}) {
    const {
      rotation = 0,
      horizontalFlip = false,
      verticalFlip = false,
      brightness = 0,
      contrast = 1,
      saturation = 1,
      hue = 0,
      zoom = 1,
      shear = 0,
    } = options;

    let augmented = imageTensor;

    // Random rotation
    if (rotation > 0) {
      const angle = ((Math.random() - 0.5) * 2 * rotation * Math.PI) / 180;
      augmented = tf.image.rotateWithOffset(augmented, angle);
    }

    // Random flip
    if (horizontalFlip && Math.random() > 0.5) {
      augmented = tf.image.flipLeftRight(augmented);
    }

    if (verticalFlip && Math.random() > 0.5) {
      augmented = tf.image.flipUpDown(augmented);
    }

    // Brightness adjustment
    if (brightness > 0) {
      const delta = (Math.random() - 0.5) * 2 * brightness;
      augmented = tf.image.adjustBrightness(augmented, delta);
    }

    // Contrast adjustment
    if (contrast !== 1) {
      const factor = 1 + (Math.random() - 0.5) * 2 * (contrast - 1);
      augmented = tf.image.adjustContrast(augmented, factor);
    }

    // Saturation adjustment
    if (saturation !== 1) {
      const factor = 1 + (Math.random() - 0.5) * 2 * (saturation - 1);
      augmented = tf.image.adjustSaturation(augmented, factor);
    }

    // Hue adjustment
    if (hue > 0) {
      const delta = (Math.random() - 0.5) * 2 * hue;
      augmented = tf.image.adjustHue(augmented, delta);
    }

    return augmented;
  }

  // CNN model for image classification
  createImageClassifier(numClasses, inputShape = [224, 224, 3]) {
    const model = tf.sequential();

    // Convolutional base
    model.add(
      tf.layers.conv2d({
        inputShape,
        filters: 32,
        kernelSize: 3,
        activation: 'relu',
      })
    );

    model.add(tf.layers.batchNormalization());
    model.add(tf.layers.maxPooling2d({ poolSize: 2 }));

    model.add(
      tf.layers.conv2d({
        filters: 64,
        kernelSize: 3,
        activation: 'relu',
      })
    );

    model.add(tf.layers.batchNormalization());
    model.add(tf.layers.maxPooling2d({ poolSize: 2 }));

    model.add(
      tf.layers.conv2d({
        filters: 128,
        kernelSize: 3,
        activation: 'relu',
      })
    );

    model.add(tf.layers.batchNormalization());
    model.add(tf.layers.maxPooling2d({ poolSize: 2 }));

    model.add(
      tf.layers.conv2d({
        filters: 256,
        kernelSize: 3,
        activation: 'relu',
      })
    );

    model.add(tf.layers.batchNormalization());
    model.add(tf.layers.globalAveragePooling2d());

    // Classifier head
    model.add(tf.layers.dropout({ rate: 0.5 }));
    model.add(
      tf.layers.dense({
        units: 512,
        activation: 'relu',
      })
    );

    model.add(tf.layers.dropout({ rate: 0.3 }));
    model.add(
      tf.layers.dense({
        units: numClasses,
        activation: 'softmax',
      })
    );

    return model;
  }

  // Load pretrained models
  async loadMobileNet() {
    if (!this.pretrainedModels.has('mobilenet')) {
      const model = await tf.loadLayersModel(
        'https://tfhub.dev/google/tfjs-model/imagenet/mobilenet_v2_100_224/classification/3/default/1',
        {
          fromTFHub: true,
        }
      );
      this.pretrainedModels.set('mobilenet', model);
    }

    return this.pretrainedModels.get('mobilenet');
  }

  async loadCocoSSD() {
    if (!this.pretrainedModels.has('cocossd')) {
      // Load object detection model
      const model = await tf.loadGraphModel(
        'https://tfhub.dev/tensorflow/tfjs-model/ssd_mobilenet_v2/1/default/1',
        {
          fromTFHub: true,
        }
      );
      this.pretrainedModels.set('cocossd', model);
    }

    return this.pretrainedModels.get('cocossd');
  }

  // Object detection
  async detectObjects(imageTensor, model = null) {
    const detectionModel = model || (await this.loadCocoSSD());

    // Preprocess image for object detection
    const resized = tf.image.resizeBilinear(imageTensor, [300, 300]);
    const normalized = tf.div(resized, 255.0);
    const batched = tf.expandDims(normalized, 0);

    // Run inference
    const predictions = await detectionModel.predict(batched);

    // Process predictions
    const boxes = await predictions[0].data();
    const scores = await predictions[1].data();
    const classes = await predictions[2].data();

    const detections = [];
    for (let i = 0; i < scores.length; i++) {
      if (scores[i] > 0.5) {
        // Confidence threshold
        detections.push({
          bbox: [
            boxes[i * 4], // y1
            boxes[i * 4 + 1], // x1
            boxes[i * 4 + 2], // y2
            boxes[i * 4 + 3], // x2
          ],
          class: classes[i],
          score: scores[i],
        });
      }
    }

    // Cleanup
    resized.dispose();
    normalized.dispose();
    batched.dispose();
    predictions.forEach((p) => p.dispose());

    return detections;
  }

  // Feature extraction for transfer learning
  async extractFeatures(imageTensor, model = null) {
    const featureModel = model || (await this.loadMobileNet());

    // Get features from second-to-last layer
    const featureLayer = featureModel.getLayer('global_average_pooling2d');
    const featureExtractor = tf.model({
      inputs: featureModel.input,
      outputs: featureLayer.output,
    });

    const features = featureExtractor.predict(imageTensor);
    return features;
  }

  // Transfer learning classifier
  createTransferLearningModel(baseModel, numClasses, freezeBase = true) {
    // Freeze base model layers
    if (freezeBase) {
      baseModel.layers.forEach((layer) => {
        layer.trainable = false;
      });
    }

    // Create new model with additional layers
    const model = tf.sequential();

    // Add base model
    model.add(baseModel);

    // Add custom classifier
    model.add(tf.layers.dropout({ rate: 0.2 }));
    model.add(
      tf.layers.dense({
        units: 128,
        activation: 'relu',
      })
    );
    model.add(tf.layers.dropout({ rate: 0.2 }));
    model.add(
      tf.layers.dense({
        units: numClasses,
        activation: 'softmax',
      })
    );

    return model;
  }

  // Image segmentation
  async segmentImage(imageTensor) {
    // Load segmentation model (placeholder)
    // const segModel = await tf.loadLayersModel('path/to/segmentation/model');

    // For demonstration, create a simple edge detection
    const gray = tf.image.rgbToGrayscale(imageTensor);

    // Sobel edge detection
    const sobelX = tf.tensor4d([
      [
        [-1, 0, 1],
        [-2, 0, 2],
        [-1, 0, 1],
      ],
    ]);

    const sobelY = tf.tensor4d([
      [
        [-1, -2, -1],
        [0, 0, 0],
        [1, 2, 1],
      ],
    ]);

    const edgesX = tf.conv2d(gray, sobelX, 1, 'same');
    const edgesY = tf.conv2d(gray, sobelY, 1, 'same');

    const edges = tf.sqrt(tf.add(tf.square(edgesX), tf.square(edgesY)));

    // Cleanup
    gray.dispose();
    sobelX.dispose();
    sobelY.dispose();
    edgesX.dispose();
    edgesY.dispose();

    return edges;
  }

  // Style transfer
  async styleTransfer(contentImage, styleImage) {
    // Load style transfer model
    const styleModel = await tf.loadGraphModel(
      'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2/default/1',
      {
        fromTFHub: true,
      }
    );

    // Preprocess images
    const contentTensor = tf.image.resizeBilinear(contentImage, [256, 256]);
    const styleTensor = tf.image.resizeBilinear(styleImage, [256, 256]);

    const contentNormalized = tf.div(contentTensor, 255.0);
    const styleNormalized = tf.div(styleTensor, 255.0);

    const contentBatched = tf.expandDims(contentNormalized, 0);
    const styleBatched = tf.expandDims(styleNormalized, 0);

    // Generate styled image
    const styledImage = styleModel.predict([contentBatched, styleBatched]);

    // Cleanup
    contentTensor.dispose();
    styleTensor.dispose();
    contentNormalized.dispose();
    styleNormalized.dispose();
    contentBatched.dispose();
    styleBatched.dispose();

    return styledImage;
  }
}

// Webcam integration for real-time inference
class WebcamClassifier {
  constructor(computerVisionManager) {
    this.cvManager = computerVisionManager;
    this.tf = computerVisionManager.tf;
    this.video = null;
    this.canvas = null;
    this.ctx = null;
    this.model = null;
    this.isRunning = false;
    this.predictions = [];
  }

  async setupWebcam() {
    // Create video element
    this.video = document.createElement('video');
    this.video.width = 640;
    this.video.height = 480;
    this.video.autoplay = true;

    // Create canvas for visualization
    this.canvas = document.createElement('canvas');
    this.canvas.width = 640;
    this.canvas.height = 480;
    this.ctx = this.canvas.getContext('2d');

    // Get user media
    const stream = await navigator.mediaDevices.getUserMedia({
      video: { width: 640, height: 480 },
    });

    this.video.srcObject = stream;

    return new Promise((resolve) => {
      this.video.onloadedmetadata = () => {
        resolve();
      };
    });
  }

  async loadModel(modelPath) {
    this.model = await tf.loadLayersModel(modelPath);
    return this.model;
  }

  async startRealTimeInference(options = {}) {
    if (!this.model) {
      throw new Error('Model not loaded');
    }

    const {
      fps = 30,
      confidenceThreshold = 0.5,
      onPrediction = null,
    } = options;

    this.isRunning = true;
    const intervalMs = 1000 / fps;

    const predict = async () => {
      if (!this.isRunning) return;

      try {
        // Capture frame
        const imageTensor = tf.browser.fromPixels(this.video);
        const preprocessed = await this.cvManager.preprocessImage(this.video);

        // Make prediction
        const prediction = this.model.predict(preprocessed);
        const predictionData = await prediction.data();

        // Process results
        const results = Array.from(predictionData)
          .map((confidence, index) => ({
            class: index,
            confidence,
          }))
          .filter((result) => result.confidence > confidenceThreshold)
          .sort((a, b) => b.confidence - a.confidence);

        this.predictions = results;

        // Draw results
        this.drawPredictions(results);

        // Callback
        if (onPrediction) {
          onPrediction(results);
        }

        // Cleanup
        imageTensor.dispose();
        preprocessed.dispose();
        prediction.dispose();
      } catch (error) {
        console.error('Prediction error:', error);
      }

      setTimeout(predict, intervalMs);
    };

    predict();
  }

  stopInference() {
    this.isRunning = false;
  }

  drawPredictions(predictions) {
    // Clear canvas
    this.ctx.clearRect(0, 0, this.canvas.width, this.canvas.height);

    // Draw video frame
    this.ctx.drawImage(this.video, 0, 0);

    // Draw predictions
    this.ctx.font = '16px Arial';
    this.ctx.fillStyle = 'rgba(255, 255, 255, 0.8)';
    this.ctx.strokeStyle = 'rgba(0, 0, 0, 0.8)';

    predictions.slice(0, 3).forEach((prediction, index) => {
      const y = 30 + index * 25;
      const text = `Class ${prediction.class}: ${(prediction.confidence * 100).toFixed(1)}%`;

      this.ctx.fillText(text, 10, y);
      this.ctx.strokeText(text, 10, y);
    });
  }

  getCanvas() {
    return this.canvas;
  }

  getVideo() {
    return this.video;
  }

  cleanup() {
    this.stopInference();

    if (this.video && this.video.srcObject) {
      this.video.srcObject.getTracks().forEach((track) => track.stop());
    }
  }
}

// Usage example
async function createComputerVisionApp() {
  // Initialize TensorFlow.js
  const tfManager = new TensorFlowManager();
  await tfManager.initialize();

  // Create computer vision manager
  const cvManager = new ComputerVisionManager(tfManager);

  // Create image classifier
  const model = cvManager.createImageClassifier(10); // 10 classes
  model.compile({
    optimizer: 'adam',
    loss: 'categoricalCrossentropy',
    metrics: ['accuracy'],
  });

  console.log('Model created:', model.summary());

  // Example: Object detection
  const imageElement = document.createElement('img');
  imageElement.src = 'path/to/image.jpg';

  imageElement.onload = async () => {
    const imageTensor = await cvManager.preprocessImage(imageElement);
    const detections = await cvManager.detectObjects(imageTensor);

    console.log('Detected objects:', detections);

    // Cleanup
    imageTensor.dispose();
  };

  // Example: Webcam classifier
  const webcamClassifier = new WebcamClassifier(cvManager);

  try {
    await webcamClassifier.setupWebcam();
    // await webcamClassifier.loadModel('path/to/trained/model.json');

    // Add to DOM
    document.body.appendChild(webcamClassifier.getVideo());
    document.body.appendChild(webcamClassifier.getCanvas());

    // Start real-time inference
    // await webcamClassifier.startRealTimeInference({
    //   fps: 10,
    //   confidenceThreshold: 0.7,
    //   onPrediction: (predictions) => {
    //     console.log('Real-time predictions:', predictions);
    //   }
    // });
  } catch (error) {
    console.error('Webcam setup error:', error);
  }

  return { tfManager, cvManager, webcamClassifier, model };
}

// Initialize the computer vision app
createComputerVisionApp()
  .then((result) => {
    console.log('Computer vision app initialized');
  })
  .catch((error) => {
    console.error('Error initializing app:', error);
  });

Natural Language Processing

Text Processing and RNNs

// Natural Language Processing Manager
class NLPManager {
  constructor(tfManager) {
    this.tf = tfManager.tf;
    this.tfManager = tfManager;
    this.vocabulary = new Map();
    this.reverseVocabulary = new Map();
    this.maxSequenceLength = 100;
    this.vocabSize = 10000;
    this.models = new Map();
  }

  // Text preprocessing
  preprocessText(text) {
    // Convert to lowercase and remove special characters
    return text
      .toLowerCase()
      .replace(/[^\w\s]/g, ' ')
      .replace(/\s+/g, ' ')
      .trim();
  }

  tokenize(text) {
    return this.preprocessText(text)
      .split(' ')
      .filter((word) => word.length > 0);
  }

  // Build vocabulary from texts
  buildVocabulary(texts) {
    const wordCounts = new Map();

    // Count word frequencies
    texts.forEach((text) => {
      const tokens = this.tokenize(text);
      tokens.forEach((token) => {
        wordCounts.set(token, (wordCounts.get(token) || 0) + 1);
      });
    });

    // Sort by frequency and take top words
    const sortedWords = Array.from(wordCounts.entries())
      .sort((a, b) => b[1] - a[1])
      .slice(0, this.vocabSize - 2); // Reserve space for special tokens

    // Build vocabulary mapping
    this.vocabulary.clear();
    this.reverseVocabulary.clear();

    // Add special tokens
    this.vocabulary.set('<PAD>', 0);
    this.vocabulary.set('<UNK>', 1);
    this.reverseVocabulary.set(0, '<PAD>');
    this.reverseVocabulary.set(1, '<UNK>');

    // Add words
    sortedWords.forEach(([word], index) => {
      const id = index + 2;
      this.vocabulary.set(word, id);
      this.reverseVocabulary.set(id, word);
    });

    return this.vocabulary;
  }

  // Text to sequences
  textsToSequences(texts) {
    return texts.map((text) => {
      const tokens = this.tokenize(text);
      return tokens.map((token) => {
        return this.vocabulary.get(token) || this.vocabulary.get('<UNK>');
      });
    });
  }

  // Pad sequences
  padSequences(sequences, maxLen = null) {
    const maxLength = maxLen || this.maxSequenceLength;

    return sequences.map((sequence) => {
      if (sequence.length > maxLength) {
        return sequence.slice(0, maxLength);
      } else {
        const padding = new Array(maxLength - sequence.length).fill(0);
        return [...sequence, ...padding];
      }
    });
  }

  // Sequence to text
  sequenceToText(sequence) {
    return sequence
      .map((id) => this.reverseVocabulary.get(id) || '<UNK>')
      .filter((token) => token !== '<PAD>')
      .join(' ');
  }

  // Word embeddings
  createEmbeddingLayer(embeddingDim = 100, inputLength = null) {
    return tf.layers.embedding({
      inputDim: this.vocabulary.size,
      outputDim: embeddingDim,
      inputLength: inputLength || this.maxSequenceLength,
      maskZero: true,
    });
  }

  // Text classification model
  createTextClassifier(numClasses, options = {}) {
    const {
      embeddingDim = 100,
      rnnUnits = 64,
      denseUnits = 32,
      dropoutRate = 0.5,
      rnnType = 'lstm',
    } = options;

    const model = tf.sequential();

    // Embedding layer
    model.add(this.createEmbeddingLayer(embeddingDim));

    // RNN layer
    if (rnnType === 'lstm') {
      model.add(
        tf.layers.lstm({
          units: rnnUnits,
          returnSequences: false,
          dropout: dropoutRate,
          recurrentDropout: dropoutRate,
        })
      );
    } else if (rnnType === 'gru') {
      model.add(
        tf.layers.gru({
          units: rnnUnits,
          returnSequences: false,
          dropout: dropoutRate,
          recurrentDropout: dropoutRate,
        })
      );
    } else {
      model.add(
        tf.layers.simpleRNN({
          units: rnnUnits,
          returnSequences: false,
          dropout: dropoutRate,
        })
      );
    }

    // Dense layers
    model.add(
      tf.layers.dense({
        units: denseUnits,
        activation: 'relu',
      })
    );

    model.add(tf.layers.dropout({ rate: dropoutRate }));

    model.add(
      tf.layers.dense({
        units: numClasses,
        activation: numClasses === 1 ? 'sigmoid' : 'softmax',
      })
    );

    return model;
  }

  // Sequence-to-sequence model
  createSeq2SeqModel(options = {}) {
    const {
      embeddingDim = 256,
      latentDim = 256,
      inputLength = this.maxSequenceLength,
      outputLength = this.maxSequenceLength,
    } = options;

    // Encoder
    const encoderInputs = tf.input({ shape: [inputLength] });
    const encoderEmbedding = tf.layers
      .embedding({
        inputDim: this.vocabulary.size,
        outputDim: embeddingDim,
        maskZero: true,
      })
      .apply(encoderInputs);

    const [encoderOutputs, stateH, stateC] = tf.layers
      .lstm({
        units: latentDim,
        returnState: true,
      })
      .apply(encoderEmbedding);

    const encoderStates = [stateH, stateC];

    // Decoder
    const decoderInputs = tf.input({ shape: [outputLength] });
    const decoderEmbedding = tf.layers
      .embedding({
        inputDim: this.vocabulary.size,
        outputDim: embeddingDim,
        maskZero: true,
      })
      .apply(decoderInputs);

    const decoderLstm = tf.layers.lstm({
      units: latentDim,
      returnSequences: true,
      returnState: true,
    });

    const [decoderOutputs] = decoderLstm.apply(decoderEmbedding, {
      initialState: encoderStates,
    });

    const decoderDense = tf.layers.dense({
      units: this.vocabulary.size,
      activation: 'softmax',
    });

    const decoderOutputsFinal = decoderDense.apply(decoderOutputs);

    // Define the model
    const model = tf.model({
      inputs: [encoderInputs, decoderInputs],
      outputs: decoderOutputsFinal,
    });

    return model;
  }

  // Attention mechanism
  createAttentionLayer() {
    return tf.layers.attention();
  }

  // Transformer-like model
  createTransformerEncoder(options = {}) {
    const {
      dModel = 512,
      numHeads = 8,
      dff = 2048,
      dropoutRate = 0.1,
      numLayers = 6,
    } = options;

    // Multi-head attention
    const multiHeadAttention = (inputs, training = false) => {
      const attention = tf.layers.multiHeadAttention({
        numHeads,
        keyDim: dModel / numHeads,
        dropout: dropoutRate,
      });

      return attention.apply([inputs, inputs], { training });
    };

    // Position-wise feed forward
    const pointWiseFeedForward = (inputs) => {
      const dense1 = tf.layers.dense({
        units: dff,
        activation: 'relu',
      });
      const dense2 = tf.layers.dense({
        units: dModel,
      });

      const output1 = dense1.apply(inputs);
      return dense2.apply(output1);
    };

    // Encoder layer
    const encoderLayer = (inputs, training = false) => {
      // Multi-head attention
      const attention = multiHeadAttention(inputs, training);
      const dropout1 = tf.layers.dropout({ rate: dropoutRate });
      const attnOutput = dropout1.apply(attention, { training });

      // Add & Norm
      const norm1 = tf.layers.layerNormalization();
      const out1 = norm1.apply(tf.add(inputs, attnOutput));

      // Feed forward
      const ffn = pointWiseFeedForward(out1);
      const dropout2 = tf.layers.dropout({ rate: dropoutRate });
      const ffnOutput = dropout2.apply(ffn, { training });

      // Add & Norm
      const norm2 = tf.layers.layerNormalization();
      return norm2.apply(tf.add(out1, ffnOutput));
    };

    return { encoderLayer, multiHeadAttention, pointWiseFeedForward };
  }

  // Text generation
  async generateText(seedText, model, maxLength = 100, temperature = 1.0) {
    let generatedText = seedText;
    const seedTokens = this.tokenize(seedText);
    let currentSequence = this.padSequences([
      this.textsToSequences([seedText])[0],
    ])[0];

    for (let i = 0; i < maxLength; i++) {
      // Prepare input
      const inputTensor = tf.tensor2d([currentSequence]);

      // Predict next token
      const predictions = model.predict(inputTensor);
      const probabilities = await predictions.data();

      // Sample next token with temperature
      const nextTokenId = this.sampleWithTemperature(
        probabilities,
        temperature
      );
      const nextToken = this.reverseVocabulary.get(nextTokenId);

      if (!nextToken || nextToken === '<PAD>') break;

      generatedText += ' ' + nextToken;

      // Update sequence
      currentSequence = [...currentSequence.slice(1), nextTokenId];

      // Cleanup
      inputTensor.dispose();
      predictions.dispose();
    }

    return generatedText;
  }

  sampleWithTemperature(probabilities, temperature) {
    // Apply temperature
    const logits = Array.from(probabilities).map(
      (p) => Math.log(p + 1e-8) / temperature
    );

    // Softmax
    const maxLogit = Math.max(...logits);
    const expLogits = logits.map((l) => Math.exp(l - maxLogit));
    const sumExp = expLogits.reduce((sum, exp) => sum + exp, 0);
    const probs = expLogits.map((exp) => exp / sumExp);

    // Sample
    const random = Math.random();
    let cumSum = 0;

    for (let i = 0; i < probs.length; i++) {
      cumSum += probs[i];
      if (random < cumSum) {
        return i;
      }
    }

    return probs.length - 1;
  }

  // Sentiment analysis
  async analyzeSentiment(text, model) {
    const sequences = this.padSequences(this.textsToSequences([text]));
    const inputTensor = tf.tensor2d(sequences);

    const prediction = model.predict(inputTensor);
    const score = await prediction.data();

    inputTensor.dispose();
    prediction.dispose();

    return {
      text,
      sentiment: score[0] > 0.5 ? 'positive' : 'negative',
      confidence: Math.abs(score[0] - 0.5) * 2,
    };
  }

  // Named entity recognition (simplified)
  async recognizeEntities(text, model) {
    const tokens = this.tokenize(text);
    const sequences = this.padSequences([this.textsToSequences([text])[0]]);
    const inputTensor = tf.tensor2d(sequences);

    const predictions = model.predict(inputTensor);
    const entityProbabilities = await predictions.data();

    const entities = [];
    const numTokens = tokens.length;

    for (let i = 0; i < numTokens; i++) {
      const entityProb = entityProbabilities[i];
      if (entityProb > 0.5) {
        entities.push({
          token: tokens[i],
          position: i,
          type: 'ENTITY',
          confidence: entityProb,
        });
      }
    }

    inputTensor.dispose();
    predictions.dispose();

    return entities;
  }
}

// Usage example
async function createNLPApp() {
  // Initialize TensorFlow.js
  const tfManager = new TensorFlowManager();
  await tfManager.initialize();

  // Create NLP manager
  const nlpManager = new NLPManager(tfManager);

  // Sample training data
  const trainingTexts = [
    "I love this product, it's amazing!",
    'This is terrible, worst purchase ever.',
    'Great quality and fast shipping.',
    'Poor customer service, very disappointed.',
    'Excellent value for money, highly recommend.',
    'Not worth the price, very low quality.',
  ];

  const labels = [1, 0, 1, 0, 1, 0]; // 1 = positive, 0 = negative

  // Build vocabulary
  nlpManager.buildVocabulary(trainingTexts);
  console.log('Vocabulary size:', nlpManager.vocabulary.size);

  // Prepare training data
  const sequences = nlpManager.padSequences(
    nlpManager.textsToSequences(trainingTexts)
  );

  const xTrain = tf.tensor2d(sequences);
  const yTrain = tf.tensor1d(labels);

  // Create sentiment analysis model
  const sentimentModel = nlpManager.createTextClassifier(1, {
    embeddingDim: 50,
    rnnUnits: 32,
    denseUnits: 16,
    dropoutRate: 0.3,
    rnnType: 'lstm',
  });

  sentimentModel.compile({
    optimizer: 'adam',
    loss: 'binaryCrossentropy',
    metrics: ['accuracy'],
  });

  console.log('Sentiment model:', sentimentModel.summary());

  // Train the model
  await sentimentModel.fit(xTrain, yTrain, {
    epochs: 10,
    batchSize: 2,
    validationSplit: 0.2,
    verbose: 1,
  });

  // Test sentiment analysis
  const testText = 'This product is fantastic and works perfectly!';
  const sentiment = await nlpManager.analyzeSentiment(testText, sentimentModel);
  console.log('Sentiment analysis:', sentiment);

  // Cleanup
  xTrain.dispose();
  yTrain.dispose();

  return { tfManager, nlpManager, sentimentModel };
}

// Initialize NLP app
createNLPApp()
  .then((result) => {
    console.log('NLP app initialized successfully');
  })
  .catch((error) => {
    console.error('Error initializing NLP app:', error);
  });

Model Deployment and Optimization

Production-Ready ML Applications

// Model Deployment Manager
class ModelDeploymentManager {
  constructor(tfManager) {
    this.tf = tfManager.tf;
    this.tfManager = tfManager;
    this.deployedModels = new Map();
    this.modelCache = new Map();
    this.performanceMonitor = new PerformanceMonitor();
    this.batchProcessor = new BatchProcessor();
  }

  // Model quantization for performance
  async quantizeModel(model, options = {}) {
    const {
      quantizationBytes = 1, // 1 for int8, 2 for int16
      optimizeForSize = true,
      weightClustering = false,
    } = options;

    // Apply post-training quantization
    const quantizedModel = await tf.quantization.quantize(model, {
      quantizationBytes,
      optimizeForSize,
    });

    return quantizedModel;
  }

  // Model pruning
  async pruneModel(model, sparsity = 0.5) {
    // Simplified pruning - remove weights with small magnitudes
    const prunedWeights = [];

    model.weights.forEach((weight) => {
      const values = weight.val;
      const threshold = this.calculatePruningThreshold(values, sparsity);

      const prunedValues = tf.where(
        tf.greater(tf.abs(values), threshold),
        values,
        tf.zerosLike(values)
      );

      prunedWeights.push(prunedValues);
    });

    return prunedWeights;
  }

  calculatePruningThreshold(weights, sparsity) {
    const flatWeights = tf.reshape(weights, [-1]);
    const absWeights = tf.abs(flatWeights);
    const sortedWeights = tf.topk(absWeights, flatWeights.shape[0]);

    const thresholdIndex = Math.floor(flatWeights.shape[0] * sparsity);
    const threshold = sortedWeights.values.slice([thresholdIndex], [1]);

    flatWeights.dispose();
    absWeights.dispose();
    sortedWeights.values.dispose();
    sortedWeights.indices.dispose();

    return threshold;
  }

  // Model versioning
  deployModel(modelName, model, version = '1.0.0', metadata = {}) {
    const deploymentInfo = {
      model,
      version,
      deployedAt: new Date().toISOString(),
      metadata: {
        ...metadata,
        modelSize: this.estimateModelSize(model),
        parameters: model.countParams(),
        layers: model.layers.length,
      },
    };

    if (!this.deployedModels.has(modelName)) {
      this.deployedModels.set(modelName, new Map());
    }

    this.deployedModels.get(modelName).set(version, deploymentInfo);

    return deploymentInfo;
  }

  getModel(modelName, version = 'latest') {
    const modelVersions = this.deployedModels.get(modelName);
    if (!modelVersions) return null;

    if (version === 'latest') {
      const versions = Array.from(modelVersions.keys());
      version = versions.sort().pop();
    }

    return modelVersions.get(version);
  }

  // A/B testing support
  createABTestManager(modelA, modelB, splitRatio = 0.5) {
    return {
      predict: (input) => {
        const useModelA = Math.random() < splitRatio;
        const model = useModelA ? modelA : modelB;
        const prediction = model.predict(input);

        // Log for analysis
        this.performanceMonitor.logPrediction({
          model: useModelA ? 'A' : 'B',
          timestamp: Date.now(),
          input: input.shape,
          output: prediction.shape,
        });

        return prediction;
      },
    };
  }

  // Batch processing for efficiency
  async processBatch(inputs, model, batchSize = 32) {
    const results = [];

    for (let i = 0; i < inputs.length; i += batchSize) {
      const batch = inputs.slice(i, i + batchSize);
      const batchTensor = tf.stack(batch);

      const predictions = model.predict(batchTensor);
      const predictionArray = await predictions.data();

      results.push(...predictionArray);

      // Cleanup
      batchTensor.dispose();
      predictions.dispose();
    }

    return results;
  }

  // Model caching
  cacheModel(modelName, model, ttl = 3600000) {
    // 1 hour default TTL
    const cacheEntry = {
      model,
      cachedAt: Date.now(),
      ttl,
    };

    this.modelCache.set(modelName, cacheEntry);

    // Set cleanup timer
    setTimeout(() => {
      this.modelCache.delete(modelName);
    }, ttl);
  }

  getCachedModel(modelName) {
    const entry = this.modelCache.get(modelName);
    if (!entry) return null;

    if (Date.now() - entry.cachedAt > entry.ttl) {
      this.modelCache.delete(modelName);
      return null;
    }

    return entry.model;
  }

  // Performance optimization
  optimizeForInference(model) {
    // Convert to graph model for better performance
    return tf.loadGraphModel(tf.io.fromMemory(model.getWeights()));
  }

  // Model monitoring
  monitorModel(modelName, model) {
    const monitor = {
      predict: (input) => {
        const startTime = performance.now();
        const prediction = model.predict(input);
        const endTime = performance.now();

        this.performanceMonitor.recordMetrics({
          modelName,
          latency: endTime - startTime,
          memoryUsage: tf.memory().numBytes,
          timestamp: Date.now(),
        });

        return prediction;
      },
    };

    return monitor;
  }

  estimateModelSize(model) {
    const weights = model.getWeights();
    let totalSize = 0;

    weights.forEach((weight) => {
      totalSize += weight.size * 4; // 4 bytes per float32
    });

    return totalSize;
  }
}

// Performance Monitoring
class PerformanceMonitor {
  constructor() {
    this.metrics = [];
    this.predictions = [];
    this.alerts = [];
  }

  recordMetrics(metric) {
    this.metrics.push(metric);

    // Check for performance issues
    this.checkPerformance(metric);

    // Keep only last 1000 metrics
    if (this.metrics.length > 1000) {
      this.metrics = this.metrics.slice(-1000);
    }
  }

  logPrediction(prediction) {
    this.predictions.push(prediction);

    if (this.predictions.length > 1000) {
      this.predictions = this.predictions.slice(-1000);
    }
  }

  checkPerformance(metric) {
    // Alert if latency is too high
    if (metric.latency > 1000) {
      // 1 second
      this.alerts.push({
        type: 'HIGH_LATENCY',
        message: `High latency detected: ${metric.latency}ms`,
        timestamp: Date.now(),
        metric,
      });
    }

    // Alert if memory usage is too high
    if (metric.memoryUsage > 1024 * 1024 * 100) {
      // 100MB
      this.alerts.push({
        type: 'HIGH_MEMORY_USAGE',
        message: `High memory usage: ${(metric.memoryUsage / 1024 / 1024).toFixed(2)}MB`,
        timestamp: Date.now(),
        metric,
      });
    }
  }

  getMetrics(timeRange = 3600000) {
    // 1 hour
    const cutoff = Date.now() - timeRange;
    return this.metrics.filter((m) => m.timestamp > cutoff);
  }

  getAverageLatency(timeRange = 3600000) {
    const recentMetrics = this.getMetrics(timeRange);
    if (recentMetrics.length === 0) return 0;

    const totalLatency = recentMetrics.reduce((sum, m) => sum + m.latency, 0);
    return totalLatency / recentMetrics.length;
  }

  getAlerts() {
    return this.alerts;
  }

  clearAlerts() {
    this.alerts = [];
  }
}

// Batch Processing
class BatchProcessor {
  constructor(maxBatchSize = 32, maxWaitTime = 100) {
    this.maxBatchSize = maxBatchSize;
    this.maxWaitTime = maxWaitTime;
    this.queue = [];
    this.processing = false;
  }

  async process(input, model) {
    return new Promise((resolve, reject) => {
      this.queue.push({ input, resolve, reject });

      if (!this.processing) {
        this.processBatch(model);
      }
    });
  }

  async processBatch(model) {
    this.processing = true;

    // Wait for batch to fill or timeout
    await this.waitForBatch();

    if (this.queue.length === 0) {
      this.processing = false;
      return;
    }

    try {
      // Extract inputs and callbacks
      const batch = this.queue.splice(0, this.maxBatchSize);
      const inputs = batch.map((item) => item.input);
      const callbacks = batch.map((item) => ({
        resolve: item.resolve,
        reject: item.reject,
      }));

      // Stack inputs into batch tensor
      const batchTensor = tf.stack(inputs);

      // Process batch
      const predictions = model.predict(batchTensor);
      const results = tf.unstack(predictions);

      // Resolve individual promises
      results.forEach((result, index) => {
        callbacks[index].resolve(result);
      });

      // Cleanup
      batchTensor.dispose();
      predictions.dispose();
      results.forEach((r) => r.dispose());
    } catch (error) {
      // Reject all promises in batch
      this.queue.forEach((item) => item.reject(error));
    }

    this.processing = false;

    // Process next batch if queue not empty
    if (this.queue.length > 0) {
      this.processBatch(model);
    }
  }

  async waitForBatch() {
    return new Promise((resolve) => {
      const checkBatch = () => {
        if (this.queue.length >= this.maxBatchSize) {
          resolve();
        } else {
          setTimeout(resolve, this.maxWaitTime);
        }
      };

      checkBatch();
    });
  }
}

// Usage example
async function setupProductionDeployment() {
  // Initialize TensorFlow.js
  const tfManager = new TensorFlowManager();
  await tfManager.initialize();

  // Create deployment manager
  const deploymentManager = new ModelDeploymentManager(tfManager);

  // Load a trained model (example)
  const model = tf.sequential({
    layers: [
      tf.layers.dense({ units: 64, activation: 'relu', inputShape: [10] }),
      tf.layers.dropout({ rate: 0.2 }),
      tf.layers.dense({ units: 32, activation: 'relu' }),
      tf.layers.dense({ units: 1, activation: 'sigmoid' }),
    ],
  });

  model.compile({
    optimizer: 'adam',
    loss: 'binaryCrossentropy',
    metrics: ['accuracy'],
  });

  // Quantize model for production
  const quantizedModel = await deploymentManager.quantizeModel(model, {
    quantizationBytes: 1,
    optimizeForSize: true,
  });

  // Deploy model with versioning
  const deployment = deploymentManager.deployModel(
    'sentiment-classifier',
    quantizedModel,
    '1.0.0',
    {
      description: 'Sentiment classification model',
      accuracy: 0.95,
      trainingData: 'customer-reviews-v1',
    }
  );

  console.log('Model deployed:', deployment);

  // Set up monitoring
  const monitoredModel = deploymentManager.monitorModel(
    'sentiment-classifier',
    quantizedModel
  );

  // Example inference with monitoring
  const testInput = tf.randomNormal([1, 10]);
  const prediction = monitoredModel.predict(testInput);

  console.log('Prediction shape:', prediction.shape);

  // Get performance metrics
  const metrics = deploymentManager.performanceMonitor.getMetrics();
  console.log('Performance metrics:', metrics);

  // Cleanup
  testInput.dispose();
  prediction.dispose();

  return { deploymentManager, quantizedModel, monitoredModel };
}

// Initialize production deployment
setupProductionDeployment()
  .then((result) => {
    console.log('Production deployment setup complete');
  })
  .catch((error) => {
    console.error('Error setting up deployment:', error);
  });

Conclusion

JavaScript Machine Learning with TensorFlow.js opens up powerful possibilities for building AI applications that run directly in browsers and Node.js environments. From neural networks and computer vision to natural language processing and production deployment, JavaScript provides a complete ecosystem for machine learning development. The key advantages include client-side privacy, reduced server costs, and real-time inference capabilities.

When building ML applications with JavaScript, focus on efficient memory management, model optimization for web deployment, and proper performance monitoring. Consider using quantization and pruning techniques for production models, implement proper caching strategies, and monitor your models' performance in real-world scenarios to ensure optimal user experiences.