| import pandas as pd |
| import numpy as np |
| import networkx as nx |
| from typing import List, Dict, Tuple, Set, Optional |
| import json |
| import random |
| from collections import defaultdict, Counter |
| import multiprocessing as mp |
| from functools import partial |
| import time |
| import requests |
| from urllib.parse import quote |
| import xml.etree.ElementTree as ET |
| from time import sleep |
| import re |
| import os |
| import fcntl |
| import tempfile |
| import shutil |
|
|
| |
| MAX_PROTEINS_TO_PROCESS = 1000 |
| TRUNCATE_MIDDLE_ABOVE_THIS = 120 |
|
|
| class ProteinDataEnricher: |
| """ |
| Handles fetching and caching protein sequence and structural data with thread-safe persistent cache |
| """ |
| def __init__(self, cache_dir: str = "protein_cache"): |
| self.cache_dir = cache_dir |
| self.uniprot_cache = {} |
| self.alphafold_cache = {} |
| self.session = requests.Session() |
| self.session.headers.update({'User-Agent': 'ProteinNetworkDataset/1.0'}) |
| |
| |
| os.makedirs(cache_dir, exist_ok=True) |
| |
| |
| self._load_caches() |
| |
| def _get_cache_files(self): |
| """Get cache file paths""" |
| uniprot_cache_file = os.path.join(self.cache_dir, "uniprot_cache.json") |
| alphafold_cache_file = os.path.join(self.cache_dir, "alphafold_cache.json") |
| return uniprot_cache_file, alphafold_cache_file |
| |
| def _load_caches(self): |
| """Load existing caches from disk""" |
| uniprot_cache_file, alphafold_cache_file = self._get_cache_files() |
| |
| |
| if os.path.exists(uniprot_cache_file): |
| try: |
| with open(uniprot_cache_file, 'r') as f: |
| self.uniprot_cache = json.load(f) |
| print(f"Loaded UniProt cache with {len(self.uniprot_cache)} entries") |
| except Exception as e: |
| print(f"Error loading UniProt cache: {e}") |
| self.uniprot_cache = {} |
| |
| |
| if os.path.exists(alphafold_cache_file): |
| try: |
| with open(alphafold_cache_file, 'r') as f: |
| self.alphafold_cache = json.load(f) |
| print(f"Loaded AlphaFold cache with {len(self.alphafold_cache)} entries") |
| except Exception as e: |
| print(f"Error loading AlphaFold cache: {e}") |
| self.alphafold_cache = {} |
| |
| def _safe_merge_and_save_cache(self, cache_type: str, new_data: Dict): |
| """ |
| Safely merge new cache data with existing cache using file locking |
| """ |
| if cache_type == "uniprot": |
| cache_file = os.path.join(self.cache_dir, "uniprot_cache.json") |
| elif cache_type == "alphafold": |
| cache_file = os.path.join(self.cache_dir, "alphafold_cache.json") |
| else: |
| raise ValueError(f"Unknown cache type: {cache_type}") |
| |
| |
| lock_file = cache_file + ".lock" |
| |
| try: |
| |
| with open(lock_file, 'w') as lock: |
| fcntl.flock(lock.fileno(), fcntl.LOCK_EX) |
| |
| |
| current_cache = {} |
| if os.path.exists(cache_file): |
| try: |
| with open(cache_file, 'r') as f: |
| current_cache = json.load(f) |
| except Exception as e: |
| print(f"Error loading {cache_type} cache for merge: {e}") |
| current_cache = {} |
| |
| |
| current_cache.update(new_data) |
| |
| |
| temp_file = cache_file + ".tmp" |
| with open(temp_file, 'w') as f: |
| json.dump(current_cache, f, indent=2) |
| |
| |
| shutil.move(temp_file, cache_file) |
| |
| print(f"Merged and saved {cache_type} cache: {len(current_cache)} total entries") |
| |
| except Exception as e: |
| print(f"Error saving {cache_type} cache: {e}") |
| finally: |
| |
| try: |
| os.remove(lock_file) |
| except: |
| pass |
| |
| def get_uniprot_info(self, protein_name: str) -> Optional[Dict]: |
| """ |
| Fetch UniProt information for a protein |
| """ |
| if protein_name in self.uniprot_cache: |
| return self.uniprot_cache[protein_name] |
| |
| try: |
| |
| search_url = f"https://rest.uniprot.org/uniprotkb/search" |
| params = { |
| 'query': f'gene_exact:{protein_name} OR protein_name:{protein_name}', |
| 'format': 'json', |
| 'size': 1 |
| } |
| |
| response = self.session.get(search_url, params=params, timeout=10) |
| if response.status_code == 200: |
| data = response.json() |
| if data.get('results'): |
| entry = data['results'][0] |
| uniprot_info = { |
| 'uniprot_id': entry.get('primaryAccession'), |
| 'protein_name': entry.get('proteinDescription', {}).get('recommendedName', {}).get('fullName', {}).get('value'), |
| 'gene_names': [gn.get('geneName', {}).get('value') for gn in entry.get('genes', []) if gn.get('geneName')], |
| 'organism': entry.get('organism', {}).get('scientificName'), |
| 'sequence': entry.get('sequence', {}).get('value'), |
| 'sequence_length': entry.get('sequence', {}).get('length'), |
| 'function': entry.get('comments', [{}])[0].get('texts', [{}])[0].get('value') if entry.get('comments') else None |
| } |
| self.uniprot_cache[protein_name] = uniprot_info |
| return uniprot_info |
| |
| except Exception as e: |
| print(f"Error fetching UniProt data for {protein_name}: {e}") |
| |
| self.uniprot_cache[protein_name] = None |
| return None |
| |
| def get_alphafold_info(self, uniprot_id: str) -> Optional[Dict]: |
| """ |
| Fetch AlphaFold structural information for a UniProt ID |
| """ |
| if not uniprot_id: |
| return None |
| |
| cache_key = uniprot_id |
| if cache_key in self.alphafold_cache: |
| return self.alphafold_cache[cache_key] |
| |
| try: |
| |
| af_url = f"https://alphafold.ebi.ac.uk/api/prediction/{uniprot_id}" |
| response = self.session.get(af_url, timeout=10) |
| |
| if response.status_code == 200: |
| af_data = response.json() |
| if af_data: |
| entry = af_data[0] if isinstance(af_data, list) else af_data |
| |
| alphafold_info = { |
| 'alphafold_id': entry.get('entryId'), |
| 'model_confidence': entry.get('modelConfidence'), |
| 'model_url': entry.get('pdbUrl'), |
| 'confidence_score': entry.get('modelConfidence'), |
| 'structure_coverage': f"{entry.get('uniprotStart', 1)}-{entry.get('uniprotEnd', 'end')}", |
| 'has_structure': True |
| } |
| self.alphafold_cache[cache_key] = alphafold_info |
| return alphafold_info |
| |
| except Exception as e: |
| print(f"Error fetching AlphaFold data for {uniprot_id}: {e}") |
| |
| self.alphafold_cache[cache_key] = {'has_structure': False} |
| return {'has_structure': False} |
| |
| def get_protein_enriched_data(self, protein_name: str) -> Dict: |
| """ |
| Get combined UniProt and AlphaFold data for a protein |
| """ |
| enriched_data = {'protein_name': protein_name} |
| |
| |
| uniprot_info = self.get_uniprot_info(protein_name) |
| if uniprot_info: |
| enriched_data.update(uniprot_info) |
| |
| |
| if uniprot_info.get('uniprot_id'): |
| alphafold_info = self.get_alphafold_info(uniprot_info['uniprot_id']) |
| if alphafold_info: |
| enriched_data.update(alphafold_info) |
| |
| |
| sleep(0.1) |
| return enriched_data |
| |
| def save_cache(self): |
| """Save caches using safe merge strategy""" |
| |
| uniprot_to_save = {k: v for k, v in self.uniprot_cache.items() if v is not None} |
| alphafold_to_save = {k: v for k, v in self.alphafold_cache.items() if v is not None} |
| |
| if uniprot_to_save: |
| self._safe_merge_and_save_cache("uniprot", uniprot_to_save) |
| |
| if alphafold_to_save: |
| self._safe_merge_and_save_cache("alphafold", alphafold_to_save) |
|
|
| def enrich_proteins_worker(args): |
| """ |
| Worker function to enrich a batch of proteins with sequence/structure data |
| """ |
| protein_names_batch, cache_dir, worker_id = args |
| enricher = ProteinDataEnricher(cache_dir) |
| enriched_proteins = {} |
| |
| print(f"Worker {worker_id}: Processing {len(protein_names_batch)} proteins") |
| |
| for i, protein_name in enumerate(protein_names_batch): |
| enriched_proteins[protein_name] = enricher.get_protein_enriched_data(protein_name) |
| |
| |
| if (i + 1) % 25 == 0: |
| enricher.save_cache() |
| print(f"Worker {worker_id}: Saved cache after {i + 1} proteins") |
| |
| |
| enricher.save_cache() |
| print(f"Worker {worker_id}: Completed batch, final cache save") |
| |
| return enriched_proteins |
|
|
| def extract_neighborhood_worker(args): |
| """ |
| Worker function for parallel neighborhood extraction |
| """ |
| center_protein, interactions_by_protein, all_interactions, max_size = args |
| |
| |
| neighbors = set() |
| relevant_interactions = [] |
| |
| for interaction in interactions_by_protein[center_protein]: |
| other_protein = (interaction['protein_b'] if interaction['protein_a'] == center_protein |
| else interaction['protein_a']) |
| neighbors.add(other_protein) |
| relevant_interactions.append(interaction) |
| |
| |
| if len(neighbors) > max_size - 1: |
| neighbors = set(random.sample(list(neighbors), max_size - 1)) |
| |
| |
| neighborhood_proteins = {center_protein} | neighbors |
| neighborhood_interactions = [] |
| |
| for interaction in all_interactions: |
| if (interaction['protein_a'] in neighborhood_proteins and |
| interaction['protein_b'] in neighborhood_proteins): |
| neighborhood_interactions.append(interaction) |
| |
| return { |
| 'center_protein': center_protein, |
| 'proteins': sorted(list(neighborhood_proteins)), |
| 'interactions': neighborhood_interactions |
| } |
|
|
| def create_conversation_examples_worker(args): |
| """ |
| Worker function for parallel conversation creation |
| """ |
| neighborhood, enriched_proteins = args |
| creator = ProteinNetworkConversationDataset("") |
| creator.enriched_proteins = enriched_proteins |
| conversations = [] |
| |
| |
| conversations.extend(creator.create_protein_list_to_network_examples(neighborhood)) |
| |
| |
| conversations.extend(creator.create_new_protein_prediction_examples(neighborhood)) |
| |
| |
| conversations.extend(creator.create_partial_network_completion_examples(neighborhood)) |
| |
| |
| conversations.extend(creator.create_network_property_examples(neighborhood)) |
| |
| return conversations |
|
|
| class ProteinNetworkConversationDataset: |
| def __init__(self, filename: str, cache_dir: str = "protein_cache"): |
| """ |
| Create conversational dataset for protein network prediction using diffusion models |
| """ |
| self.filename = filename |
| self.cache_dir = cache_dir |
| self.df = None |
| self.graph = nx.Graph() |
| self.protein_to_id = {} |
| self.id_to_protein = {} |
| self.interactions_by_protein = defaultdict(list) |
| self.enriched_proteins = {} |
| self.enricher = ProteinDataEnricher(cache_dir) |
| |
| def load_and_parse_biogrid(self): |
| """Load and parse BioGRID data""" |
| print("Loading BioGRID data...") |
| self.df = pd.read_csv( |
| self.filename, |
| sep='\t', |
| comment='#', |
| low_memory=False, |
| dtype=str |
| ) |
| |
| |
| protein_a_col = 7 |
| protein_b_col = 8 |
| interaction_type_col = 11 |
| |
| interactions = [] |
| protein_set = set() |
| |
| seen_interactions = set() |
| |
| for idx, row in self.df.iterrows(): |
| try: |
| protein_a = str(row.iloc[protein_a_col]).strip() |
| protein_b = str(row.iloc[protein_b_col]).strip() |
| interaction_type = str(row.iloc[interaction_type_col]).strip() |
| |
| if protein_a in ['-', 'nan', ''] or protein_b in ['-', 'nan', '']: |
| continue |
| |
| |
| if protein_a == protein_b: |
| continue |
| |
| |
| interaction_key = tuple(sorted([protein_a, protein_b]) + [interaction_type]) |
| |
| |
| if interaction_key in seen_interactions: |
| continue |
| |
| seen_interactions.add(interaction_key) |
| protein_set.add(protein_a) |
| protein_set.add(protein_b) |
| |
| interaction = { |
| 'protein_a': protein_a, |
| 'protein_b': protein_b, |
| 'interaction_type': interaction_type |
| } |
| interactions.append(interaction) |
| |
| |
| self.interactions_by_protein[protein_a].append(interaction) |
| self.interactions_by_protein[protein_b].append(interaction) |
| |
| except Exception: |
| continue |
| |
| print(f"Extracted {len(interactions)} valid unique interactions") |
| print(f"Found {len(protein_set)} unique proteins") |
| |
| return interactions, sorted(list(protein_set)) |
| |
| def build_network_neighborhoods(self, interactions, proteins, min_connections=3, max_connections=15): |
| """ |
| Build subnetworks around high-degree proteins for training examples |
| """ |
| |
| protein_degrees = Counter() |
| for interaction in interactions: |
| protein_degrees[interaction['protein_a']] += 1 |
| protein_degrees[interaction['protein_b']] += 1 |
| |
| |
| candidate_proteins = [ |
| protein for protein, degree in protein_degrees.items() |
| if min_connections <= degree <= max_connections |
| ] |
| |
| print(f"Found {len(candidate_proteins)} proteins with degree {min_connections}-{max_connections}") |
| |
| |
| limited_proteins = candidate_proteins[:MAX_PROTEINS_TO_PROCESS] |
| print(f"Processing {len(limited_proteins)} proteins with multiprocessing...") |
| |
| |
| worker_args = [ |
| (protein, self.interactions_by_protein, interactions, 10) |
| for protein in limited_proteins |
| ] |
| |
| |
| num_processes = min(int(mp.cpu_count()/2), len(limited_proteins)) |
| print(f"Using {num_processes} processes") |
| |
| neighborhoods = [] |
| with mp.Pool(processes=num_processes) as pool: |
| results = pool.map(extract_neighborhood_worker, worker_args) |
| |
| |
| neighborhoods = [ |
| neighborhood for neighborhood in results |
| if len(neighborhood['proteins']) >= 3 |
| ] |
| |
| return neighborhoods |
| |
| def enrich_proteins_with_data(self, proteins: List[str]): |
| """ |
| Enrich proteins with UniProt and AlphaFold data using multiprocessing with thread-safe persistent cache |
| """ |
| print(f"Enriching {len(proteins)} proteins with sequence and structural data...") |
| |
| |
| cached_count = 0 |
| proteins_to_fetch = [] |
| |
| for protein in proteins: |
| |
| if (protein in self.enricher.uniprot_cache): |
| cached_count += 1 |
| |
| self.enriched_proteins[protein] = self.enricher.get_protein_enriched_data(protein) |
| else: |
| proteins_to_fetch.append(protein) |
| |
| print(f"Found {cached_count} proteins in cache, need to fetch {len(proteins_to_fetch)} new proteins") |
| |
| if not proteins_to_fetch: |
| print("All proteins found in cache!") |
| return self.enriched_proteins |
| |
| |
| batch_size = 25 |
| protein_batches = [proteins_to_fetch[i:i+batch_size] for i in range(0, len(proteins_to_fetch), batch_size)] |
| |
| |
| worker_args = [(batch, self.cache_dir, i) for i, batch in enumerate(protein_batches)] |
| |
| |
| num_processes = min(int(mp.cpu_count()/2), len(protein_batches)) |
| print(f"Using {num_processes} processes for protein enrichment with thread-safe caching") |
| |
| enrichment_start = time.time() |
| with mp.Pool(processes=num_processes) as pool: |
| results = pool.map(enrich_proteins_worker, worker_args) |
| |
| |
| for batch_result in results: |
| self.enriched_proteins.update(batch_result) |
| |
| |
| print("Reloading cache to get all merged data...") |
| self.enricher._load_caches() |
| |
| |
| for protein in proteins: |
| if protein not in self.enriched_proteins and protein in self.enricher.uniprot_cache: |
| self.enriched_proteins[protein] = self.enricher.get_protein_enriched_data(protein) |
| |
| enrichment_time = time.time() - enrichment_start |
| successful_enrichments = sum(1 for data in self.enriched_proteins.values() |
| if data.get('uniprot_id') is not None) |
| |
| print(f"Protein enrichment completed in {enrichment_time:.2f} seconds") |
| print(f"Successfully enriched {successful_enrichments}/{len(proteins)} proteins") |
| print(f"Final cache sizes - UniProt: {len(self.enricher.uniprot_cache)}, AlphaFold: {len(self.enricher.alphafold_cache)}") |
| |
| return self.enriched_proteins |
| |
| def extract_neighborhood(self, center_protein, interactions, max_size=10): |
| """ |
| Extract neighborhood around a protein |
| """ |
| |
| neighbors = set() |
| relevant_interactions = [] |
| |
| for interaction in self.interactions_by_protein[center_protein]: |
| other_protein = (interaction['protein_b'] if interaction['protein_a'] == center_protein |
| else interaction['protein_a']) |
| neighbors.add(other_protein) |
| relevant_interactions.append(interaction) |
| |
| |
| if len(neighbors) > max_size - 1: |
| neighbors = set(random.sample(list(neighbors), max_size - 1)) |
| |
| |
| neighborhood_proteins = {center_protein} | neighbors |
| neighborhood_interactions = [] |
| |
| for interaction in interactions: |
| if (interaction['protein_a'] in neighborhood_proteins and |
| interaction['protein_b'] in neighborhood_proteins): |
| neighborhood_interactions.append(interaction) |
| |
| return { |
| 'center_protein': center_protein, |
| 'proteins': sorted(list(neighborhood_proteins)), |
| 'interactions': neighborhood_interactions |
| } |
| |
| def create_conversation_examples(self, neighborhoods): |
| """ |
| Create different types of conversation examples for diffusion training (parallelized) |
| """ |
| print(f"Creating conversation examples for {len(neighborhoods)} neighborhoods using multiprocessing...") |
| |
| |
| worker_args = [(neighborhood, self.enriched_proteins) for neighborhood in neighborhoods] |
| |
| |
| num_processes = min(int(mp.cpu_count()/2), len(neighborhoods)) |
| print(f"Using {num_processes} processes for conversation creation") |
| |
| conversations = [] |
| with mp.Pool(processes=num_processes) as pool: |
| results = pool.map(create_conversation_examples_worker, worker_args) |
| |
| |
| for result in results: |
| conversations.extend(result) |
| |
| return conversations |
| |
| def create_protein_list_to_network_examples(self, neighborhood): |
| """ |
| Context: List of proteins with sequence/structure info |
| Generation: Complete interaction network |
| """ |
| examples = [] |
| proteins = neighborhood['proteins'] |
| interactions = neighborhood['interactions'] |
| |
| |
| network_text = self.format_enriched_network_as_text(proteins, interactions) |
| |
| system_msg = { |
| "role": "system", |
| "content": "You are a protein interaction prediction system. Given a list of proteins with their sequence and structural information, predict all likely interactions between them based on biological knowledge, sequence similarity, and structural compatibility." |
| } |
| |
| |
| protein_context = self.format_proteins_with_context(proteins) |
| |
| user_msg = { |
| "role": "user", |
| "content": f"Predict the protein interaction network for these proteins:\n\n{protein_context}" |
| } |
| |
| assistant_msg = { |
| "role": "assistant", |
| "content": network_text |
| } |
| |
| conversation = [system_msg, user_msg, assistant_msg] |
| examples.append({"updated": conversation}) |
| |
| return examples |
| |
| def create_new_protein_prediction_examples(self, neighborhood): |
| """ |
| Context: Known network + new protein with sequence/structure info |
| Generation: Interactions for the new protein |
| """ |
| examples = [] |
| if len(neighborhood['proteins']) < 4: |
| return examples |
| |
| proteins = neighborhood['proteins'] |
| interactions = neighborhood['interactions'] |
| |
| |
| target_protein = random.choice(proteins) |
| remaining_proteins = [p for p in proteins if p != target_protein] |
| |
| |
| known_interactions = [ |
| i for i in interactions |
| if target_protein not in [i['protein_a'], i['protein_b']] |
| ] |
| |
| |
| target_interactions = [ |
| i for i in interactions |
| if target_protein in [i['protein_a'], i['protein_b']] |
| ] |
| |
| if not target_interactions: |
| return examples |
| |
| known_network_text = self.format_enriched_network_as_text(remaining_proteins, known_interactions) |
| target_network_text = self.format_interactions_as_text(target_interactions) |
| |
| |
| target_enriched_data = self.enriched_proteins.get(target_protein, {}) |
| target_context = self.format_proteins_with_context([target_protein]) |
| |
| system_msg = { |
| "role": "system", |
| "content": "You are a protein interaction prediction system. Given a known protein network and a new protein with sequence and structural information, predict which proteins in the network the new protein will interact with based on sequence similarity, structural compatibility, and functional relationships." |
| } |
| |
| user_msg = { |
| "role": "user", |
| "content": f"Known protein network:\n{known_network_text}\n\nNew protein to integrate:\n{target_context}\n\nPredict the interactions for {target_protein} based on its sequence, structure, and function:" |
| } |
| |
| assistant_msg = { |
| "role": "assistant", |
| "content": target_network_text |
| } |
| |
| conversation = [system_msg, user_msg, assistant_msg] |
| examples.append({"updated": conversation}) |
| |
| return examples |
| |
| def create_partial_network_completion_examples(self, neighborhood): |
| """ |
| Context: Partial network with some missing interactions |
| Generation: Complete network |
| """ |
| examples = [] |
| proteins = neighborhood['proteins'] |
| interactions = neighborhood['interactions'] |
| |
| if len(interactions) < 3: |
| return examples |
| |
| |
| n_hidden = max(1, len(interactions) // 3) |
| hidden_interactions = random.sample(interactions, n_hidden) |
| visible_interactions = [i for i in interactions if i not in hidden_interactions] |
| |
| partial_network_text = self.format_network_as_text(proteins, visible_interactions) |
| complete_network_text = self.format_network_as_text(proteins, interactions) |
| |
| system_msg = { |
| "role": "system", |
| "content": "You are a protein interaction prediction system. Given a partial protein network, predict the complete network including missing interactions." |
| } |
| |
| user_msg = { |
| "role": "user", |
| "content": f"Complete this partial protein network:\n{partial_network_text}" |
| } |
| |
| assistant_msg = { |
| "role": "assistant", |
| "content": complete_network_text |
| } |
| |
| conversation = [system_msg, user_msg, assistant_msg] |
| examples.append({"updated": conversation}) |
| |
| return examples |
| |
| def create_network_property_examples(self, neighborhood): |
| """ |
| Context: Network properties and constraints |
| Generation: Network that satisfies those properties |
| """ |
| examples = [] |
| proteins = neighborhood['proteins'] |
| interactions = neighborhood['interactions'] |
| |
| |
| n_proteins = len(proteins) |
| n_interactions = len(interactions) |
| density = (2 * n_interactions) / (n_proteins * (n_proteins - 1)) if n_proteins > 1 else 0 |
| |
| |
| protein_degrees = Counter() |
| for interaction in interactions: |
| protein_degrees[interaction['protein_a']] += 1 |
| protein_degrees[interaction['protein_b']] += 1 |
| |
| hub_proteins = [p for p, degree in protein_degrees.most_common(2)] |
| |
| network_text = self.format_network_as_text(proteins, interactions) |
| |
| system_msg = { |
| "role": "system", |
| "content": "You are a protein interaction network generator. Given network constraints and properties, generate a biologically plausible protein network." |
| } |
| |
| properties_text = (f"Generate a protein network with the following properties:\n" |
| f"- Proteins: {', '.join(proteins)}\n" |
| f"- Network density: approximately {density:.2f}\n" |
| f"- Hub proteins (highly connected): {', '.join(hub_proteins)}\n" |
| f"- Total interactions: approximately {n_interactions}") |
| |
| user_msg = { |
| "role": "user", |
| "content": properties_text |
| } |
| |
| assistant_msg = { |
| "role": "assistant", |
| "content": network_text |
| } |
| |
| conversation = [system_msg, user_msg, assistant_msg] |
| examples.append({"updated": conversation}) |
| |
| return examples |
| |
| def format_network_as_text(self, proteins, interactions): |
| """ |
| Format network as structured text for the model to predict |
| """ |
| |
| proteins = sorted(proteins) |
| |
| |
| interactions_by_type = defaultdict(set) |
| for interaction in interactions: |
| |
| if interaction['protein_a'] == interaction['protein_b']: |
| continue |
| |
| int_type = interaction.get('interaction_type', 'physical') |
| |
| p1, p2 = sorted([interaction['protein_a'], interaction['protein_b']]) |
| interactions_by_type[int_type].add(f"{p1}--{p2}") |
| |
| result = f"PROTEINS: {', '.join(proteins)}\n\n" |
| |
| for int_type, edges in interactions_by_type.items(): |
| if edges: |
| result += f"{int_type.upper()} INTERACTIONS:\n" |
| for edge in sorted(edges): |
| result += f" {edge}\n" |
| result += "\n" |
| |
| total_interactions = sum(len(edges) for edges in interactions_by_type.values()) |
| result += f"NETWORK SUMMARY: {len(proteins)} proteins, {total_interactions} unique interactions" |
| return result.strip() |
| |
| def format_proteins_with_context(self, proteins: List[str]) -> str: |
| """ |
| Format proteins with their enriched sequence and structural context |
| """ |
| protein_contexts = [] |
| |
| for protein in sorted(proteins): |
| enriched_data = self.enriched_proteins.get(protein, {}) |
| |
| context_parts = [f"PROTEIN: {protein}"] |
| |
| |
| if enriched_data.get('uniprot_id'): |
| context_parts.append(f" UniProt ID: {enriched_data['uniprot_id']}") |
| |
| if enriched_data.get('protein_name'): |
| context_parts.append(f" Full Name: {enriched_data['protein_name']}") |
| |
| if enriched_data.get('organism'): |
| context_parts.append(f" Organism: {enriched_data['organism']}") |
| |
| |
| if enriched_data.get('sequence_length'): |
| context_parts.append(f" Sequence Length: {enriched_data['sequence_length']} amino acids") |
| |
| if enriched_data.get('sequence'): |
| |
| seq = enriched_data['sequence'] |
| if len(seq) > TRUNCATE_MIDDLE_ABOVE_THIS: |
| seq_preview = f"{seq[:int(TRUNCATE_MIDDLE_ABOVE_THIS*0.5)]}...{seq[-int(TRUNCATE_MIDDLE_ABOVE_THIS*0.2):]}" |
| else: |
| seq_preview = seq |
| context_parts.append(f" Sequence: {seq_preview}") |
| |
| |
| if enriched_data.get('has_structure'): |
| context_parts.append(f" AlphaFold Structure: Available") |
| if enriched_data.get('model_confidence'): |
| context_parts.append(f" Structure Confidence: {enriched_data['model_confidence']}") |
| if enriched_data.get('structure_coverage'): |
| context_parts.append(f" Structure Coverage: residues {enriched_data['structure_coverage']}") |
| else: |
| context_parts.append(f" AlphaFold Structure: Not available") |
| |
| |
| if enriched_data.get('function'): |
| func_text = enriched_data['function'][:200] + "..." if len(enriched_data['function']) > 200 else enriched_data['function'] |
| context_parts.append(f" Function: {func_text}") |
| |
| protein_contexts.append("\n".join(context_parts)) |
| |
| return "\n\n".join(protein_contexts) |
| |
| def format_enriched_network_as_text(self, proteins, interactions): |
| """ |
| Format network with enriched protein information |
| """ |
| |
| basic_network = self.format_network_as_text(proteins, interactions) |
| |
| |
| enriched_summary = "\n\nPROTEIN DETAILS:\n" |
| |
| for protein in sorted(proteins): |
| enriched_data = self.enriched_proteins.get(protein, {}) |
| details = [f"{protein}"] |
| |
| if enriched_data.get('sequence_length'): |
| details.append(f"{enriched_data['sequence_length']}aa") |
| |
| if enriched_data.get('has_structure'): |
| confidence = enriched_data.get('model_confidence', 'unknown') |
| details.append(f"AlphaFold({confidence})") |
| |
| if enriched_data.get('organism'): |
| org = enriched_data['organism'].split()[0] if ' ' in enriched_data['organism'] else enriched_data['organism'] |
| details.append(f"{org}") |
| |
| enriched_summary += f" {' | '.join(details)}\n" |
| |
| return basic_network + enriched_summary |
| |
| def format_interactions_as_text(self, interactions): |
| """ |
| Format just interactions as text |
| """ |
| if not interactions: |
| return "No interactions predicted." |
| |
| |
| interactions_by_type = defaultdict(set) |
| for interaction in interactions: |
| |
| if interaction['protein_a'] == interaction['protein_b']: |
| continue |
| |
| int_type = interaction.get('interaction_type', 'physical') |
| p1, p2 = sorted([interaction['protein_a'], interaction['protein_b']]) |
| interactions_by_type[int_type].add(f"{p1}--{p2}") |
| |
| result = "" |
| for int_type, edges in interactions_by_type.items(): |
| if edges: |
| result += f"{int_type.upper()} INTERACTIONS:\n" |
| for edge in sorted(edges): |
| result += f" {edge}\n" |
| result += "\n" |
| |
| return result.strip() |
| |
| def save_conversation_dataset(self, output_file="processed_dataset.json"): |
| """ |
| Create and save the full conversation dataset with enriched protein data |
| """ |
| start_time = time.time() |
| |
| |
| print("Step 1: Loading and parsing data...") |
| load_start = time.time() |
| interactions, proteins = self.load_and_parse_biogrid() |
| load_time = time.time() - load_start |
| print(f"Data loading completed in {load_time:.2f} seconds") |
| |
| |
| print("Step 2: Building protein neighborhoods...") |
| neighborhood_start = time.time() |
| neighborhoods = self.build_network_neighborhoods(interactions, proteins) |
| neighborhood_time = time.time() - neighborhood_start |
| print(f"Built {len(neighborhoods)} protein neighborhoods in {neighborhood_time:.2f} seconds") |
| |
| |
| print("Step 3: Enriching proteins with sequence and structural data...") |
| enrichment_start = time.time() |
| |
| unique_proteins = set() |
| for neighborhood in neighborhoods: |
| unique_proteins.update(neighborhood['proteins']) |
| |
| self.enrich_proteins_with_data(list(unique_proteins)) |
| enrichment_time = time.time() - enrichment_start |
| print(f"Protein enrichment completed in {enrichment_time:.2f} seconds") |
| |
| |
| print("Step 4: Creating conversation examples...") |
| conversation_start = time.time() |
| conversations = self.create_conversation_examples(neighborhoods) |
| conversation_time = time.time() - conversation_start |
| print(f"Created {len(conversations)} conversation examples in {conversation_time:.2f} seconds") |
| |
| |
| print("Step 5: Shuffling and saving dataset...") |
| random.shuffle(conversations) |
| |
| |
| with open(output_file, 'w') as f: |
| json.dump(conversations, f, indent=2) |
| |
| |
| enriched_data_file = output_file.replace('.json', '_protein_data.json') |
| with open(enriched_data_file, 'w') as f: |
| json.dump(self.enriched_proteins, f, indent=2) |
| |
| total_time = time.time() - start_time |
| print(f"Saved dataset to {output_file}") |
| print(f"Saved enriched protein data to {enriched_data_file}") |
| print(f"Total processing time: {total_time:.2f} seconds") |
| |
| |
| print("\n=== Example Conversations ===") |
| for i, conv in enumerate(conversations[:2]): |
| print(f"\n--- Example {i+1} ---") |
| for msg in conv["updated"]: |
| print(f"{msg['role'].upper()}: {msg['content'][:300]}...") |
| |
| return conversations |
|
|
| |
| if __name__ == "__main__": |
| |
| random.seed(42) |
| np.random.seed(42) |
| |
| print(f"Configuration: Processing up to {MAX_PROTEINS_TO_PROCESS} proteins") |
| print(f"Available CPU cores: {int(mp.cpu_count()/2)}") |
| print(f"Cache directory: protein_cache/") |
| |
| creator = ProteinNetworkConversationDataset( |
| "./unzipped/BIOGRID-ALL-4.4.246.tab3/BIOGRID-ALL-4.4.246.tab3.txt", |
| cache_dir="protein_cache" |
| ) |
| |
| conversations = creator.save_conversation_dataset("processed_dataset.json") |
| |
| print(f"\n=== Dataset Summary ===") |
| print(f"Total conversations: {len(conversations)}") |
| |
| |
| task_types = Counter() |
| for conv in conversations: |
| system_content = conv["updated"][0]["content"] |
| if "list of proteins" in system_content: |
| task_types["protein_list_to_network"] += 1 |
| elif "new protein" in system_content: |
| task_types["new_protein_integration"] += 1 |
| elif "partial" in system_content: |
| task_types["partial_completion"] += 1 |
| elif "properties" in system_content: |
| task_types["property_based_generation"] += 1 |
| |
| print("\nTask distribution:") |
| for task, count in task_types.items(): |
| print(f" {task}: {count}") |
| |
| |
| print(f"\n=== Protein Enrichment Summary ===") |
| total_proteins = len(creator.enriched_proteins) |
| proteins_with_uniprot = sum(1 for data in creator.enriched_proteins.values() |
| if data.get('uniprot_id') is not None) |
| proteins_with_sequence = sum(1 for data in creator.enriched_proteins.values() |
| if data.get('sequence') is not None) |
| proteins_with_structure = sum(1 for data in creator.enriched_proteins.values() |
| if data.get('has_structure') == True) |
| |
| print(f"Total proteins processed: {total_proteins}") |
| print(f"Proteins with UniProt data: {proteins_with_uniprot} ({proteins_with_uniprot/total_proteins*100:.1f}%)") |
| print(f"Proteins with sequences: {proteins_with_sequence} ({proteins_with_sequence/total_proteins*100:.1f}%)") |
| print(f"Proteins with AlphaFold structures: {proteins_with_structure} ({proteins_with_structure/total_proteins*100:.1f}%)") |
| |
| |
| print(f"\n=== Cache Statistics ===") |
| print(f"UniProt cache entries: {len(creator.enricher.uniprot_cache)}") |
| print(f"AlphaFold cache entries: {len(creator.enricher.alphafold_cache)}") |
| print(f"Cache files location: {creator.cache_dir}/") |