D
DreamLake

Examples

Load & Inspect Video

Basic loading and properties

import dreamlake as dl
 
# Load by resource ID or BSS URI
video = dl.load_video("v-BV1bW411n7fY9x01")
 
# Inspect properties (no download yet — metadata only)
print(video.fps)        # 30.0
print(video.duration)   # 60.0
print(video.frames)     # 1800
print(video.width)      # 1920
print(video.height)     # 1080

Slicing — float is time, int is frame

# Time-based slicing (seconds)
clip = video[10.0:20.0]       # 10s clip
print(clip.st)                 # 10.0
print(clip.duration)           # 10.0
 
# Frame-based slicing
clip = video[300:600]          # frame 300 to 600
print(clip.frames)             # 300
 
# Single frame — returns a single-frame Video (lazy)
frame_video = video[42]
print(frame_video.duration)    # 0.033 (1/30 fps)
frame_video.image              # triggers download → PIL Image
 
# Sub-slicing is relative to the parent slice
clip = video[10.0:20.0]
sub = clip[2.0:5.0]            # → Video(st=12.0, et=15.0)
sub_sub = sub[0.5:1.5]         # → Video(st=12.5, et=13.5)

Frame access

# Single frame as PIL Image
frame = video[0].image
frame.save("first_frame.png")
 
# Middle frame shortcut
thumb = video.thumbnail
thumb.save("thumbnail.png")
 
# All frames as numpy array
clip = video[0.0:2.0]
arr = clip.numpy()             # → (60, 1080, 1920, 3) for 2s at 30fps
 
# As torch tensor (requires torch installed)
tensor = clip.tensor()         # → (60, 3, 1080, 1920)
 
# Iterate frames one by one
for frame in clip.iterator():
    # frame is a PIL Image
    pass

Chunking and fancy indexing

# Split a clip into 200ms segments
clip = video[0.0:2.0]
chunks = clip.chunk(0.200)     # → VideoArray of 10 chunks
print(len(chunks))             # 10
 
# Index into the array
first_chunk = chunks[0]        # → Video (0.0–0.2s)
last_three = chunks[-3:]       # → VideoArray of 3
 
# Fancy indexing: first frame of every chunk
first_frames = chunks[:, 0]    # → VideoArray of 10 single-frame Videos
 
# Cast to numpy — batched
arr = chunks[:, 0].numpy()
print(arr.shape)               # (10, H, W, 3)
 
# Cast to tensor — feed directly to a model
# emb = model(chunks[:, 0].tensor().to('cuda'))
 
# Full batch: all frames from all chunks
batch = chunks.numpy()
print(batch.shape)             # (10, 6, H, W, 3)
 
# Snap chunks to keyframe boundaries
iframes = clip.chunk(0.200, by_iframe=True)