| import os |
| import gradio as gr |
| import torch |
| import json |
| from huggingface_hub import InferenceClient |
| import spaces |
|
|
| |
| os.environ['CUDA_HOME'] = '/usr/local/cuda' |
| os.environ['PATH'] = os.environ['PATH'] + ':/usr/local/cuda/bin' |
|
|
| |
| @spaces.GPU(duration=120) |
| def ai_fix_json(model_id, json_data): |
| client = InferenceClient(model=model_id) |
| prompt = f"Fix the following JSON data and make it valid:\n\n{json_data}\n\nFixed JSON:" |
| |
| try: |
| |
| if torch.cuda.is_available(): |
| response = client.text_generation(prompt, max_new_tokens=1024) |
| |
| if not response or 'generated_text' not in response[0]: |
| return None, f"Failed to process JSON with model {model_id}. Response was invalid." |
| fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip() |
| return fixed_json, f"JSON fixed using AI on GPU with model: {model_id}" |
| else: |
| raise RuntimeError("GPU not available, falling back to CPU.") |
| |
| except Exception as gpu_error: |
| print(f"Falling back to CPU due to: {gpu_error}") |
| try: |
| response = client.text_generation(prompt, max_new_tokens=1024) |
| |
| if not response or 'generated_text' not in response[0]: |
| return None, f"Failed to process JSON with model {model_id}. Response was invalid." |
| fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip() |
| return fixed_json, f"JSON fixed using AI on CPU with model: {model_id}" |
| except Exception as e: |
| return None, f"Failed to process with model {model_id}. Error: {str(e)}" |
|
|
| def process_file(model_id, uploaded_file): |
| |
| if hasattr(uploaded_file, 'read'): |
| json_data = uploaded_file.read().decode("utf-8") |
| else: |
| |
| json_data = uploaded_file |
| |
| cleaned_json, message = ai_fix_json(model_id, json_data) |
| |
| if cleaned_json: |
| try: |
| parsed_data = json.loads(cleaned_json, ensure_ascii=False) |
| pretty_json = json.dumps(parsed_data, indent=4, ensure_ascii=False) |
| return pretty_json, message, pretty_json |
| except json.JSONDecodeError as e: |
| return None, f"Failed to fix JSON: {str(e)}", None |
| else: |
| return None, message, None |
|
|
| |
| model_options = [ |
| "EleutherAI/gpt-neo-2.7B", |
| "gpt2", |
| "facebook/opt-1.3b", |
| "EleutherAI/gpt-j-6B", |
| "google/flan-t5-base" |
| ] |
|
|
| iface = gr.Interface( |
| fn=process_file, |
| inputs=[ |
| gr.Dropdown(label="Select Model", choices=model_options, value="EleutherAI/gpt-neo-2.7B"), |
| gr.File(label="Upload your JSON file") |
| ], |
| outputs=[gr.JSON(label="Fixed JSON"), "text", gr.File(label="Download cleaned JSON file")], |
| title="AI-Powered JSON Cleaner with Model Selection", |
| description="Upload a JSON file to automatically fix, remove duplicates, and download the cleaned version using AI with GPU/CPU fallback. Select any model from the dropdown list." |
| ) |
|
|
| if __name__ == "__main__": |
| try: |
| |
| print("ZeroGPU initialized.") |
| except Exception as e: |
| print(f"ZeroGPU initialization failed: {e}. Falling back to CPU.") |
| |
| iface.launch() |