useMediaDevices
A hook for tracking connected media input and output devices
A React hook that provides a list of connected media input and output devices (cameras, microphones, speakers) using the MediaDevices API. It automatically updates when devices are connected or disconnected, making it perfect for building video conferencing apps, audio recording tools, or any application that needs to interact with media hardware.
Source Code
View the full hook implementation in the Hook Source Code section below.
Permission Required
Important: While you can enumerate devices without permission, the
label property (e.g., "Internal Microphone", "FaceTime HD Camera") will be
empty until the user grants access to the microphone or camera. Use the
built-in requestPermission function to prompt the user for access.
Features
- Real-time Updates - Automatically listens for
devicechangeevents to keep the device list current when users plug in or remove devices - SSR Safe - Gracefully handles server-side rendering environments where
navigatoris unavailable - Type Safe - Full TypeScript support with properly typed
MediaDeviceInfo[]arrays - Options API - Filter devices by kind (
audioinput,audiooutput,videoinput) to get only the devices you need - Permission Handling - Built-in
requestPermissionfunction to request camera/microphone access and reveal device labels - Manual Refresh -
refetchfunction to manually update the device list on demand
Learn More
Request Permissions
Before you can see device labels (like "MacBook Pro Microphone" or "FaceTime HD Camera"), you need to request permission from the user. This example shows how to use the built-in requestPermission function to prompt for camera and microphone access.
The hasPermission state tells you whether the user has already granted access - if true, device labels will be visible.
"use client";
import { useMediaDevices } from "@repo/hooks/device/use-media-devices";
import { Button } from "@repo/ui/components/button";
import { Shield, ShieldCheck, Mic, Camera } from "lucide-react";
/* REQUEST PERMISSIONS - Reveal Device Labels */
export const Example1 = () => {
const { devices, isLoading, hasPermission, requestPermission, refetch } =
useMediaDevices();
const handleRequestAudio = async () => {
await requestPermission({ audio: true, video: false });
};
const handleRequestVideo = async () => {
await requestPermission({ audio: false, video: true });
};
const handleRequestBoth = async () => {
await requestPermission({ audio: true, video: true });
};
if (isLoading) {
return (
<span className="text-muted-foreground text-sm">
Loading devices...
</span>
);
}
return (
<div className="flex w-full max-w-sm flex-col gap-4">
{/* Permission Status */}
<div
className={`flex items-center gap-2 rounded-lg border p-4 ${
hasPermission
? "border-green-500/50 bg-green-500/10"
: "border-yellow-500/50 bg-yellow-500/10"
}`}
>
{hasPermission ? (
<>
<ShieldCheck className="h-5 w-5 text-green-600 dark:text-green-400" />
<div>
<div className="text-sm font-medium text-green-600 dark:text-green-400">
Permission Granted
</div>
<div className="text-muted-foreground text-xs">
Device labels are now visible
</div>
</div>
</>
) : (
<>
<Shield className="h-5 w-5 text-yellow-600 dark:text-yellow-400" />
<div>
<div className="text-sm font-medium text-yellow-600 dark:text-yellow-400">
Permission Required
</div>
<div className="text-muted-foreground text-xs">
Grant access to see device names
</div>
</div>
</>
)}
</div>
{/* Permission Buttons */}
{!hasPermission && (
<div className="flex flex-wrap gap-2">
<Button
variant="outline"
size="sm"
onClick={handleRequestAudio}
className="gap-2"
>
<Mic className="h-4 w-4" />
Microphone
</Button>
<Button
variant="outline"
size="sm"
onClick={handleRequestVideo}
className="gap-2"
>
<Camera className="h-4 w-4" />
Camera
</Button>
<Button size="sm" onClick={handleRequestBoth}>
Request Both
</Button>
</div>
)}
{/* Device Count */}
<div className="text-muted-foreground flex items-center justify-between text-sm">
<span>
{devices.length} device{devices.length !== 1 ? "s" : ""}{" "}
found
</span>
<Button variant="ghost" size="sm" onClick={refetch}>
Refresh
</Button>
</div>
{/* Device List */}
<div className="divide-y rounded-lg border">
{devices.slice(0, 5).map((device, i) => (
<div key={device.deviceId + i} className="p-3 text-sm">
<div className="truncate font-medium">
{device.label || (
<span className="text-muted-foreground italic">
Hidden (permission required)
</span>
)}
</div>
<div className="text-muted-foreground mt-0.5 text-xs capitalize">
{device.kind
.replace("input", " Input")
.replace("output", " Output")}
</div>
</div>
))}
</div>
</div>
);
};
Filtering Devices
Often you don't need all devices - just cameras, or just microphones. Use the kind option to filter the device list. This example shows only video input devices (cameras).
You can pass a single kind ('videoinput') or an array of kinds (['audioinput', 'audiooutput']) to filter multiple types at once.
"use client";
import { useMediaDevices } from "@repo/hooks/device/use-media-devices";
import { Button } from "@repo/ui/components/button";
import { Camera, RefreshCw } from "lucide-react";
/* FILTERED USAGE - Cameras Only */
export const Example2 = () => {
const { devices, isLoading } = useMediaDevices();
const cameras = devices.filter((d) => d.kind === "videoinput");
return (
<div className="w-full max-w-sm rounded-lg border">
<div className="bg-muted/50 flex items-center justify-between border-b px-4 py-3">
<div className="flex items-center gap-2 font-medium">
<Camera className="h-4 w-4" />
<span>Cameras</span>
</div>
<Button
variant="ghost"
size="icon"
className="h-8 w-8"
onClick={() => window.location.reload()}
// title="Reload page to refresh devices"
>
<RefreshCw className="h-3.5 w-3.5" />
</Button>
</div>
<div className="p-4">
{isLoading ? (
<div className="text-muted-foreground text-sm">
Scanning...
</div>
) : cameras.length === 0 ? (
<div className="text-muted-foreground py-8 text-center text-sm">
No cameras found
</div>
) : (
<ul className="space-y-4">
{cameras.map((camera, i) => (
<li
key={camera.deviceId || i}
className="flex gap-3"
>
<div className="bg-muted flex h-10 w-10 shrink-0 items-center justify-center rounded">
<Camera className="text-muted-foreground h-5 w-5" />
</div>
<div className="min-w-0 flex-1">
<div className="truncate text-sm font-medium">
{camera.label || `Camera ${i + 1}`}
</div>
<div className="text-muted-foreground truncate text-xs">
{camera.deviceId
? `ID: ${camera.deviceId}`
: "System Default"}
</div>
</div>
</li>
))}
</ul>
)}
</div>
{cameras.length > 0 && cameras.some((c) => !c.label) && (
<div className="border-t bg-yellow-500/10 px-4 py-3 text-xs text-yellow-600 dark:text-yellow-400">
Camera labels may be hidden until you grant camera
permissions to this site.
</div>
)}
</div>
);
};
List All Devices
This example shows how to display all connected devices grouped by their type: microphones (audio input), speakers (audio output), and cameras (video input).
Each device includes a deviceId (unique identifier), kind (type of device), label (human-readable name), and groupId (devices in the same group belong to the same physical hardware).
Media Devices API is not supported in this browser
"use client";
import { useMediaDevices } from "@repo/hooks/device/use-media-devices";
import { Mic, Speaker, Video } from "lucide-react";
/* LIST ALL DEVICES - Grouped by Type */
export const Example3 = () => {
const { devices, isLoading, isSupported } = useMediaDevices();
if (!isSupported) {
return (
<div className="rounded-md border border-yellow-500/50 bg-yellow-500/10 p-4">
<p className="text-sm text-yellow-600 dark:text-yellow-400">
Media Devices API is not supported in this browser
</p>
</div>
);
}
if (isLoading) {
return (
<span className="text-muted-foreground text-sm">
Loading devices...
</span>
);
}
const grouped = devices.reduce(
(acc, device) => {
if (!acc[device.kind]) acc[device.kind] = [];
acc[device.kind]?.push(device);
return acc;
},
{} as Record<string, MediaDeviceInfo[]>,
);
return (
<div className="flex w-full max-w-sm flex-col gap-6">
<DeviceGroup
title="Audio Input"
icon={<Mic className="h-4 w-4" />}
devices={grouped["audioinput"]}
/>
<DeviceGroup
title="Audio Output"
icon={<Speaker className="h-4 w-4" />}
devices={grouped["audiooutput"]}
/>
<DeviceGroup
title="Video Input"
icon={<Video className="h-4 w-4" />}
devices={grouped["videoinput"]}
/>
</div>
);
};
const DeviceGroup = ({
title,
icon,
devices,
}: {
title: string;
icon: React.ReactNode;
devices?: MediaDeviceInfo[];
}) => {
if (!devices?.length) return null;
return (
<div className="flex flex-col gap-2">
<div className="text-foreground flex items-center gap-2 text-sm font-medium">
{icon}
<span>{title}</span>
<span className="text-muted-foreground ml-auto text-xs">
{devices.length}
</span>
</div>
<div className="divide-y rounded-lg border">
{devices.map((device, i) => (
<div key={device.deviceId || i} className="p-3 text-sm">
<div className="truncate font-medium">
{device.label || `Unknown Device ${i + 1}`}
</div>
<div className="text-muted-foreground mt-0.5 truncate text-xs">
ID:{" "}
{device.deviceId
? device.deviceId.slice(0, 8) + "..."
: "N/A"}
</div>
</div>
))}
</div>
</div>
);
};
Device Selector
Build dropdown menus that let users choose their preferred microphone, speaker, and camera. This is a common pattern in video conferencing apps where users need to switch between devices.
The deviceId from the selected device can be passed to getUserMedia() constraints to use that specific device for audio/video capture.
Media Devices API is not supported
"use client";
import { useMediaDevices } from "@repo/hooks/device/use-media-devices";
import { useState } from "react";
import { Mic, Speaker, Camera, ChevronDown } from "lucide-react";
/* DEVICE SELECTOR - Dropdown Selects */
export const Example4 = () => {
const { devices, isLoading, isSupported } = useMediaDevices();
const [selectedMic, setSelectedMic] = useState<string>("");
const [selectedSpeaker, setSelectedSpeaker] = useState<string>("");
const [selectedCamera, setSelectedCamera] = useState<string>("");
if (!isSupported) {
return (
<div className="rounded-md border border-yellow-500/50 bg-yellow-500/10 p-4">
<p className="text-sm text-yellow-600 dark:text-yellow-400">
Media Devices API is not supported
</p>
</div>
);
}
if (isLoading) {
return (
<span className="text-muted-foreground text-sm">
Loading devices...
</span>
);
}
const microphones = devices.filter((d) => d.kind === "audioinput");
const speakers = devices.filter((d) => d.kind === "audiooutput");
const cameras = devices.filter((d) => d.kind === "videoinput");
return (
<div className="flex w-full max-w-sm flex-col gap-4">
{/* Microphone Select */}
<DeviceSelect
icon={<Mic className="h-4 w-4" />}
label="Microphone"
devices={microphones}
value={selectedMic}
onChange={setSelectedMic}
/>
{/* Speaker Select */}
<DeviceSelect
icon={<Speaker className="h-4 w-4" />}
label="Speaker"
devices={speakers}
value={selectedSpeaker}
onChange={setSelectedSpeaker}
/>
{/* Camera Select */}
<DeviceSelect
icon={<Camera className="h-4 w-4" />}
label="Camera"
devices={cameras}
value={selectedCamera}
onChange={setSelectedCamera}
/>
{/* Selection Summary */}
<div className="bg-muted/50 mt-2 rounded-lg border p-4">
<div className="text-muted-foreground mb-2 text-xs font-medium uppercase tracking-wide">
Selected Devices
</div>
<div className="space-y-1 text-sm">
<DeviceSelection
label="Mic"
deviceId={selectedMic}
devices={microphones}
/>
<DeviceSelection
label="Speaker"
deviceId={selectedSpeaker}
devices={speakers}
/>
<DeviceSelection
label="Camera"
deviceId={selectedCamera}
devices={cameras}
/>
</div>
</div>
</div>
);
};
const DeviceSelect = ({
icon,
label,
devices,
value,
onChange,
}: {
icon: React.ReactNode;
label: string;
devices: MediaDeviceInfo[];
value: string;
onChange: (value: string) => void;
}) => {
return (
<div className="flex flex-col gap-1.5">
<label className="text-muted-foreground flex items-center gap-2 text-sm font-medium">
{icon}
{label}
<span className="bg-muted ml-auto rounded-full px-2 py-0.5 text-xs">
{devices.length}
</span>
</label>
<div className="relative">
<select
value={value}
onChange={(e) => onChange(e.target.value)}
className="border-input bg-background focus:ring-ring w-full appearance-none rounded-md border py-2 pl-3 pr-10 text-sm focus:outline-none focus:ring-2"
>
<option value="">Select {label.toLowerCase()}...</option>
{devices.map((device, i) => (
<option
key={device.deviceId || i}
value={device.deviceId}
>
{device.label || `${label} ${i + 1}`}
</option>
))}
</select>
<ChevronDown className="text-muted-foreground pointer-events-none absolute right-3 top-1/2 h-4 w-4 -translate-y-1/2" />
</div>
</div>
);
};
const DeviceSelection = ({
label,
deviceId,
devices,
}: {
label: string;
deviceId: string;
devices: MediaDeviceInfo[];
}) => {
const device = devices.find((d) => d.deviceId === deviceId);
return (
<div className="flex items-center gap-2">
<span className="text-muted-foreground w-16">{label}:</span>
<span className="truncate font-medium">
{device?.label ||
device?.deviceId?.slice(0, 12) ||
"None selected"}
</span>
</div>
);
};
Audio Level Meter
An advanced example that combines device selection with real-time audio visualization. Select a microphone and see a live audio level meter that responds to your voice.
This uses the Web Audio API to analyze the audio stream and display the volume level - useful for helping users test their microphone before a call.
Media Devices API is not supported
"use client";
import { useMediaDevices } from "@repo/hooks/device/use-media-devices";
import { Button } from "@repo/ui/components/button";
import { useState, useEffect, useRef, useCallback } from "react";
import { Mic, MicOff, ChevronDown } from "lucide-react";
/* MICROPHONE WITH AUDIO LEVEL - Real-time visualization */
export const Example5 = () => {
const {
devices,
isLoading,
isSupported,
hasPermission,
requestPermission,
} = useMediaDevices({ kind: "audioinput" });
const [selectedMic, setSelectedMic] = useState<string>("");
const [isListening, setIsListening] = useState(false);
const [audioLevel, setAudioLevel] = useState(0);
const streamRef = useRef<MediaStream | null>(null);
const analyserRef = useRef<AnalyserNode | null>(null);
const animationRef = useRef<number | null>(null);
const stopListening = useCallback(() => {
if (animationRef.current) {
cancelAnimationFrame(animationRef.current);
animationRef.current = null;
}
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop());
streamRef.current = null;
}
analyserRef.current = null;
setIsListening(false);
setAudioLevel(0);
}, []);
const startListening = async () => {
if (!selectedMic) return;
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: { deviceId: { exact: selectedMic } },
});
streamRef.current = stream;
const audioContext = new AudioContext();
const source = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
analyser.fftSize = 256;
source.connect(analyser);
analyserRef.current = analyser;
setIsListening(true);
// Start animation loop
const dataArray = new Uint8Array(analyser.frequencyBinCount);
const updateLevel = () => {
if (!analyserRef.current) return;
analyserRef.current.getByteFrequencyData(dataArray);
const average =
dataArray.reduce((a, b) => a + b, 0) / dataArray.length;
setAudioLevel(Math.min(100, (average / 128) * 100));
animationRef.current = requestAnimationFrame(updateLevel);
};
updateLevel();
} catch (err) {
console.error("Failed to access microphone:", err);
}
};
// Cleanup on unmount
useEffect(() => {
return () => {
stopListening();
};
}, [stopListening]);
// Stop listening when mic changes
useEffect(() => {
stopListening();
}, [selectedMic, stopListening]);
if (!isSupported) {
return (
<div className="rounded-md border border-yellow-500/50 bg-yellow-500/10 p-4">
<p className="text-sm text-yellow-600 dark:text-yellow-400">
Media Devices API is not supported
</p>
</div>
);
}
if (isLoading) {
return (
<span className="text-muted-foreground text-sm">
Loading microphones...
</span>
);
}
if (!hasPermission) {
return (
<div className="flex w-full max-w-sm flex-col items-center gap-4 rounded-lg border p-6">
<MicOff className="text-muted-foreground h-8 w-8" />
<p className="text-muted-foreground text-center text-sm">
Microphone access is required for this demo
</p>
<Button
onClick={() =>
requestPermission({ audio: true, video: false })
}
>
Grant Permission
</Button>
</div>
);
}
return (
<div className="flex w-full max-w-sm flex-col gap-4">
{/* Microphone Select */}
<div className="flex flex-col gap-1.5">
<label className="text-muted-foreground flex items-center gap-2 text-sm font-medium">
<Mic className="h-4 w-4" />
Select Microphone
</label>
<div className="relative">
<select
value={selectedMic}
onChange={(e) => setSelectedMic(e.target.value)}
className="border-input bg-background focus:ring-ring w-full appearance-none rounded-md border py-2 pl-3 pr-10 text-sm focus:outline-none focus:ring-2"
>
<option value="">Choose a microphone...</option>
{devices.map((device, i) => (
<option
key={device.deviceId || i}
value={device.deviceId}
>
{device.label || `Microphone ${i + 1}`}
</option>
))}
</select>
<ChevronDown className="text-muted-foreground pointer-events-none absolute right-3 top-1/2 h-4 w-4 -translate-y-1/2" />
</div>
</div>
{/* Audio Level Meter */}
<div className="rounded-lg border p-4">
<div className="mb-3 flex items-center justify-between">
<span className="text-sm font-medium">Audio Level</span>
<span className="text-muted-foreground text-sm">
{Math.round(audioLevel)}%
</span>
</div>
{/* Level Bar */}
<div className="bg-muted h-4 overflow-hidden rounded-full">
<div
className="h-full transition-all duration-75"
style={{
width: `${audioLevel}%`,
backgroundColor:
audioLevel > 80
? "#ef4444"
: audioLevel > 50
? "#f59e0b"
: "#22c55e",
}}
/>
</div>
{/* Level Indicators */}
<div className="mt-2 flex h-3 justify-between">
{Array.from({ length: 20 }).map((_, i) => (
<div
key={i}
className={`w-1 rounded-full transition-colors ${
audioLevel > i * 5
? i >= 16
? "bg-red-500"
: i >= 10
? "bg-yellow-500"
: "bg-green-500"
: "bg-muted"
}`}
/>
))}
</div>
</div>
{/* Control Button */}
<Button
onClick={isListening ? stopListening : startListening}
isDisabled={!selectedMic}
variant={isListening ? "destructive" : "default"}
className="gap-2"
>
{isListening ? (
<>
<MicOff className="h-4 w-4" />
Stop Listening
</>
) : (
<>
<Mic className="h-4 w-4" />
Start Listening
</>
)}
</Button>
{!selectedMic && (
<p className="text-muted-foreground text-center text-xs">
Select a microphone to test audio levels
</p>
)}
</div>
);
};
API Reference
Hook Signature
function useMediaDevices(
options?: UseMediaDevicesOptions,
): UseMediaDevicesReturn;Options
Configure the hook behavior by passing an options object:
| Property | Type | Default | Description |
|---|---|---|---|
kind | MediaDeviceKind | MediaDeviceKind[] | - | Filter devices to specific types. Use 'audioinput', 'audiooutput', or 'videoinput' |
requestPermissionOnMount | boolean | false | When true, automatically prompts for camera/microphone permission when the component mounts |
Return Value
The hook returns an object with the following properties:
| Property | Type | Description |
|---|---|---|
devices | MediaDeviceInfo[] | Array of connected devices. If kind option is set, this is filtered to only matching device types |
isLoading | boolean | true while the initial device enumeration is in progress |
error | Error | null | Contains an Error object if device enumeration failed, otherwise null |
isSupported | boolean | false if the MediaDevices API is not available (e.g., older browsers or non-HTTPS contexts) |
hasPermission | boolean | true if at least one device has a non-empty label, indicating the user has granted media access |
refetch | () => Promise<void> | Call this function to manually refresh the device list. Useful after programmatically changing devices |
requestPermission | (constraints?) => Promise<boolean> | Prompts the user for media access. Pass { audio: true } or { video: true } to request specific access. Returns true if permission was granted |
MediaDeviceInfo
Each device in the devices array is a standard MediaDeviceInfo object from the browser:
| Property | Type | Description |
|---|---|---|
deviceId | string | A unique identifier for the device. Use this when specifying a device in getUserMedia() constraints |
kind | string | The type of device: 'audioinput' (microphone), 'audiooutput' (speaker), or 'videoinput' (camera) |
label | string | Human-readable name like "MacBook Pro Microphone". Empty string until permission is granted |
groupId | string | Devices with the same groupId belong to the same physical device (e.g., built-in mic and speakers on a laptop) |
Hook Source Code
import { useState, useEffect, useCallback, useMemo } from "react";
/**
* Device kind types for filtering
*/
export type MediaDeviceKind = "audioinput" | "audiooutput" | "videoinput";
/**
* Options for the useMediaDevices hook
*/
export interface UseMediaDevicesOptions {
/** Filter to specific device kinds. Can be a single kind or array of kinds. */
kind?: MediaDeviceKind | MediaDeviceKind[];
/** Whether to automatically request permissions on mount (default: false) */
requestPermissionOnMount?: boolean;
}
/**
* Return type for the useMediaDevices hook
*/
export interface UseMediaDevicesReturn {
/** Array of connected media devices */
devices: MediaDeviceInfo[];
/** Whether the device list is currently loading */
isLoading: boolean;
/** Error object if device enumeration failed */
error: Error | null;
/** Whether the MediaDevices API is supported */
isSupported: boolean;
/** Manually refresh the device list */
refetch: () => Promise<void>;
/** Request media permissions to get device labels */
requestPermission: (
constraints?: MediaStreamConstraints,
) => Promise<boolean>;
/** Whether permission has been granted (devices have labels) */
hasPermission: boolean;
}
/**
* A React hook that provides a list of connected media input and output devices
* (cameras, microphones, speakers) using the MediaDevices API.
*
* @param options - Configuration options for the hook
* @returns UseMediaDevicesReturn object with devices, states, and utility functions
*
* @example
* ```tsx
* // Basic usage - list all devices
* const { devices, isLoading, isSupported } = useMediaDevices();
*
* // Filter to specific device types
* const { devices: cameras } = useMediaDevices({ kind: 'videoinput' });
*
* // Request permissions to get device labels
* const { devices, requestPermission, hasPermission } = useMediaDevices();
* await requestPermission({ audio: true });
* ```
*/
export function useMediaDevices(
options: UseMediaDevicesOptions = {},
): UseMediaDevicesReturn {
const { kind, requestPermissionOnMount = false } = options;
const [allDevices, setAllDevices] = useState<MediaDeviceInfo[]>([]);
const [isLoading, setIsLoading] = useState(true);
const [error, setError] = useState<Error | null>(null);
const [hasPermission, setHasPermission] = useState(false);
// Check if API is supported
const isSupported =
typeof navigator !== "undefined" && !!navigator.mediaDevices;
// Normalize kind filter to array
const kindFilter = useMemo(() => {
if (!kind) return null;
return Array.isArray(kind) ? kind : [kind];
}, [kind]);
// Filter devices based on kind option
const devices = useMemo(() => {
if (!kindFilter) return allDevices;
return allDevices.filter((device) =>
kindFilter.includes(device.kind as MediaDeviceKind),
);
}, [allDevices, kindFilter]);
// Fetch devices from the API
const fetchDevices = useCallback(async () => {
if (!isSupported) {
setIsLoading(false);
return;
}
try {
setError(null);
const deviceInfos = await navigator.mediaDevices.enumerateDevices();
setAllDevices(deviceInfos);
// Check if we have permission (devices have labels)
const hasLabels = deviceInfos.some(
(d) => d.label && d.label.length > 0,
);
setHasPermission(hasLabels);
} catch (err) {
setError(
err instanceof Error
? err
: new Error("Failed to enumerate devices"),
);
} finally {
setIsLoading(false);
}
}, [isSupported]);
// Refetch function exposed to consumers
const refetch = useCallback(async () => {
setIsLoading(true);
await fetchDevices();
}, [fetchDevices]);
// Request permission to access media devices
const requestPermission = useCallback(
async (
constraints: MediaStreamConstraints = { audio: true, video: true },
): Promise<boolean> => {
if (!isSupported) return false;
try {
const stream =
await navigator.mediaDevices.getUserMedia(constraints);
// Stop all tracks immediately - we just needed permission
stream.getTracks().forEach((track) => track.stop());
// Refetch devices to get labels
await fetchDevices();
return true;
} catch {
return false;
}
},
[isSupported, fetchDevices],
);
// Initial fetch and device change listener
useEffect(() => {
if (!isSupported) {
setIsLoading(false);
return;
}
let mounted = true;
const initFetch = async () => {
await fetchDevices();
// Auto-request permission if option is set
if (requestPermissionOnMount && mounted) {
await requestPermission();
}
};
initFetch();
// Listen for device changes
const handleDeviceChange = () => {
if (mounted) {
fetchDevices();
}
};
navigator.mediaDevices.addEventListener(
"devicechange",
handleDeviceChange,
);
return () => {
mounted = false;
navigator.mediaDevices.removeEventListener(
"devicechange",
handleDeviceChange,
);
};
}, [
isSupported,
fetchDevices,
requestPermissionOnMount,
requestPermission,
]);
return {
devices,
isLoading,
error,
isSupported,
refetch,
requestPermission,
hasPermission,
};
}
export default useMediaDevices;