import os import shutil import re from collections import defaultdict def get_all_source_folders(scrape_dir): """ Recursively scrapes a directory to get all folders. Returns a dictionary mapping: folder_name -> list of full_source_paths """ source_folders = defaultdict(list) if not os.path.exists(scrape_dir): print(f"Error: Scrape directory '{scrape_dir}' does not exist.") return source_folders # Walk through the entire directory tree to map every available source folder for root, dirs, files in os.walk(scrape_dir): for d in dirs: source_folders[d].append(os.path.join(root, d)) return source_folders def get_subfolder_names(folder_path): """Returns a list of subfolder names immediately inside a given directory.""" return [d for d in os.listdir(folder_path) if os.path.isdir(os.path.join(folder_path, d))] def process_folders(scrape_dir, dest_root_path, dry_run=True): # 1. Scrape all folders from the source directory to build our lookup map source_folders = get_all_source_folders(scrape_dir) if not source_folders: print("No source folders found.") return if not os.path.exists(dest_root_path): print(f"Error: Destination root '{dest_root_path}' does not exist.") return print("=" * 60) print(f"MODE: {'DRY RUN (Mapping Only)' if dry_run else 'EXECUTION (Copying Files)'}") print("=" * 60) # Pattern to extract the date and hour: YYYY-MM-DD_HH file_pattern = re.compile(r'^(\d{4}-\d{2}-\d{2}_\d{2})-\d{2}-\d{2}') # 2. Iterate exactly to the second level of the destination directory for level1 in os.listdir(dest_root_path): level1_path = os.path.join(dest_root_path, level1) if not os.path.isdir(level1_path): continue for level2 in os.listdir(level1_path): dest_path = os.path.join(level1_path, level2) if not os.path.isdir(dest_path): continue folder_name = level2 # 3. Look up the destination folder in our source map src_paths = source_folders.get(folder_name, []) # 4. Handle matching conditions if not src_paths: if dry_run: print(f"[UNMATCHED] Dest: '{folder_name}' -> No source found.") continue if len(src_paths) > 1: print(f"[CONFLICT] Dest: '{folder_name}' -> Matches {len(src_paths)} sources. Skipping.") if dry_run: for p in src_paths: print(f" - {p}") continue # We have exactly one match src_path = src_paths[0] if dry_run: print(f"[MAPPED] Dest: '{folder_name}' \n <- Source: '{src_path}'") continue # Skip the actual file processing during a dry run # --------------------------------------------------------- # The following code only runs if dry_run=False # --------------------------------------------------------- # 5. Get the subfolders (e.g., 'camera') from the source src_subfolders = get_subfolder_names(src_path) # 6. Process files INSIDE the source subfolders for subfolder in src_subfolders: src_sub_path = os.path.join(src_path, subfolder) dest_sub_path = os.path.join(dest_path, subfolder) # Ensure the camera folder exists in the destination os.makedirs(dest_sub_path, exist_ok=True) # Get files and sort chronologically files = [f for f in os.listdir(src_sub_path) if os.path.isfile(os.path.join(src_sub_path, f))] files.sort() selected_files = {} for file_name in files: match = file_pattern.search(file_name) if match: hour_key = match.group(1) if hour_key not in selected_files: selected_files[hour_key] = file_name # 7. Copy the filtered files while checking for duplicates copied_count = 0 for file_name in selected_files.values(): src_file = os.path.join(src_sub_path, file_name) dest_file = os.path.join(dest_sub_path, file_name) if os.path.exists(dest_file): continue shutil.copy2(src_file, dest_file) copied_count += 1 if copied_count > 0: print(f"Copied {copied_count} new files into '{folder_name}/{subfolder}'.") print("\nProcess completed.") if __name__ == "__main__": # Define your paths here SCRAPE_DIRECTORY = "/media/ngaggion/DATA/Raices/Datasets" DESTINATION_ROOT = "/media/ngaggion/DATA/Raices/NewData/ArabidopsisDataset" # Run first with dry_run=True to see the mappings. # Change to False when you are ready to copy files. process_folders(SCRAPE_DIRECTORY, DESTINATION_ROOT, dry_run=False)