source stringlengths 4.8k 15.8k | file_name stringlengths 9 9 | cwe sequencelengths 1 1 |
|---|---|---|
"""
Implementation of the SHA1 hash function and gives utilities to find hash of string or
hash of text from a file. Also contains a Test class to verify that the generated hash
matches what is returned by the hashlib library
Usage: python sha1.py --string "Hello World!!"
python sha1.py --file "hello_world.txt"... | 916728.py | [
"CWE-327: Use of a Broken or Risky Cryptographic Algorithm"
] |
import os
import gc
import time
import numpy as np
import torch
import torchvision
from PIL import Image
from einops import rearrange, repeat
from omegaconf import OmegaConf
import safetensors.torch
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config, ismap
from modules impo... | 177699.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
# As the LDSR upscaler relies on VQModel & VQModelInterface, the ... | 932523.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
# where the license is as follows:
#
# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
#
# Permission is hereby granted, free of charge, to any pers... | 570756.py | [
"Unknown"
] |
#!/usr/bin/python3
import argparse
import ctypes
import functools
import shutil
import subprocess
import sys
import tempfile
import threading
import traceback
import os.path
sys.path.insert(0, os.path.dirname(os.path.dirname((os.path.abspath(__file__)))))
from youtube_dl.compat import (
compat_input,
compat_h... | 093118.py | [
"CWE-276: Incorrect Default Permissions"
] |
from __future__ import unicode_literals
import errno
import hashlib
import json
import os.path
import re
import ssl
import sys
import types
import unittest
import youtube_dl.extractor
from youtube_dl import YoutubeDL
from youtube_dl.compat import (
compat_open as open,
compat_os_name,
compat_str,
)
from y... | 717170.py | [
"CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"
] |
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
try_get,
unified_strdate,
unified_timestamp,
)
class AmericasTestKitchenIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:ameri... | 773378.py | [
"CWE-798: Use of Hard-coded Credentials"
] |
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless... | 627547.py | [
"CWE-676: Use of Potentially Dangerous Function"
] |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable... | 624453.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
#! /usr/bin/python3
import argparse
import logging
import os
import sys
from collections import namedtuple
import torch
from modeling_bertabs import BertAbs, build_predictor
from torch.utils.data import DataLoader, SequentialSampler
from tqdm import tqdm
from transformers import BertTokenizer
from .utils_summarizati... | 884804.py | [
"CWE-676: Use of Potentially Dangerous Function"
] |
#!/usr/bin/env python3
import os
import shutil
import sys
from pathlib import Path
from subprocess import check_call
from tempfile import TemporaryDirectory
from typing import Optional
SCRIPT_DIR = Path(__file__).parent
REPO_DIR = SCRIPT_DIR.parent.parent
def read_triton_pin(device: str = "cuda") -> str:
trito... | 879024.py | [
"CWE-78: Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')"
] |
#!/usr/bin/env python3
import os
import sys
from dataclasses import asdict, dataclass, field
from pathlib import Path
from typing import Dict, Iterable, List, Literal, Set
from typing_extensions import TypedDict # Python 3.11+
import generate_binary_build_matrix # type: ignore[import]
import jinja2
Arch = Literal... | 938702.py | [
"CWE-79: Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')"
] |
# Helper to get the id of the currently running job in a GitHub Actions
# workflow. GitHub does not provide this information to workflow runs, so we
# need to figure it out based on what they *do* provide.
import argparse
import json
import operator
import os
import re
import sys
import time
import urllib
import urlli... | 948858.py | [
"CWE-939: Improper Authorization in Handler for Custom URL Scheme"
] |
import hashlib
import time
import urllib
import uuid
from .common import InfoExtractor
from .openload import PhantomJSwrapper
from ..utils import (
ExtractorError,
UserNotLive,
determine_ext,
int_or_none,
js_to_json,
parse_resolution,
str_or_none,
traverse_obj,
unescapeHTML,
url... | 758317.py | [
"CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')"
] |
import functools
import hashlib
import json
import time
import urllib.parse
from .common import InfoExtractor
from ..utils import (
ExtractorError,
OnDemandPagedList,
int_or_none,
jwt_decode_hs256,
mimetype2ext,
qualities,
traverse_obj,
try_call,
unified_timestamp,
)
class IwaraBa... | 837764.py | [
"CWE-327: Use of a Broken or Risky Cryptographic Algorithm"
] |
import hashlib
import random
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
try_get,
)
class JamendoIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
licensing\.jamendo\.com/[^/]+|
... | 530858.py | [
"CWE-327: Use of a Broken or Risky Cryptographic Algorithm"
] |
"""
Settings and configuration for Django.
Read values from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global_settings.py
for a list of all possible variables.
"""
import importlib
import os
import time
import traceback
import warnings
f... | 359100.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
"Misc. utility functions/classes for admin documentation generator."
import re
from email.errors import HeaderParseError
from email.parser import HeaderParser
from inspect import cleandoc
from django.urls import reverse
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import mark_sa... | 429723.py | [
"CWE-79: Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')"
] |
"""
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use... | 783587.py | [
"CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')"
] |
A dataset of 76 Python programs taken from real Python open source projects (top 100 on GitHub), where each program is a file that has exactly 1 vulnerability as detected by a particular static analyzer (Semgrep), used in the paper Patched MOA: optimizing inference for diverse software development tasks.
OpenAI used the synth-vuln-fixes and fine-tuned a new version of gpt-4o is now the SOTA on this benchmark. More details and code is available from their repo.
More details on the benchmark are available in our blog.
New Version of Static Analysis Eval (Aug 20, 2024)
We have created a new version of the benchmark with instances that are harder than the previous one. There has been a lot of progress in models over the last year as a result the previous version of the benchmark was saturated. The methodology is the same, we have also released the dataset generation script which scans the top 100 Python projects to generate the instances. You can see it here. The same eval script works as before. You do not need to login to Semgrep anymore as we only use their OSS rules for this version of the benchmark.
The highest score a model can get on this benchmark is 100%, you can see the oracle run logs here.
New Evaluation
| Model | Score | Logs |
|---|---|---|
| o1-mini-2024-09-12 | 51.33 | link |
| gpt-4o-mini | 52.21 | link |
| gpt-4o-mini + 3-shot prompt | 53.10 | link |
| gpt-4o-mini + rag (embedding & reranking) | 58.41 | link |
| gpt-4o-mini + fine-tuned with synth-vuln-fixes | 53.98 | link |
| Model | Score | Logs |
|---|---|---|
| gpt-4o | 53.10 | link |
| gpt-4o + 3-shot prompt | 53.98 | link |
| gpt-4o + rag (embedding & reranking) | 56.64 | link |
| gpt-4o + fine-tuned with synth-vuln-fixes | 61.06 | link |
Mixture of Agents (MOA)
We also benchmarked gpt-4o with Patched MOA. This demostrates that an inference optimization technique like MOA can improve performance without fine-tuning.
| Model | Score | Logs |
|---|---|---|
| moa-gpt-4o | 53.98 | link |
| moa-gpt-4o + 3-shot prompt | 60.18 | link |
| moa-gpt-4o + rag (embedding & reranking) | 61.06 | link |
Static Analysis Eval Benchmark
You can run the _script_for_eval.py script to check the results.
python3 -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
python _script_for_eval.py
For all supported options, run with --help:
usage: _script_for_eval.py [-h] [--model MODEL] [--cache] [--n_shot N_SHOT] [--use_similarity] [--oracle]
Run Static Analysis Evaluation
options:
-h, --help show this help message and exit
--model MODEL OpenAI model to use
--cache Enable caching of results
--n_shot N_SHOT Number of examples to use for few-shot learning
--use_similarity Use similarity for fetching dataset examples
--oracle Run in oracle mode (assume all vulnerabilities are fixed)
We need to use the logged in version of Semgrep to get access to more rules for vulnerability detection. So, make sure you login before running the eval script.
% semgrep login
API token already exists in /Users/user/.semgrep/settings.yml. To login with a different token logout use `semgrep logout`
After the run, the script will also create a log file which captures the stats for the run and the files that were fixed. You can see an example here. Due to the recent versions of Semgrep not detecting a few of the samples in the dataset as vulnerable anymore, the maximum score possible on the benchmark is 77.63%. You can see the oracle run log here.
Evaluation
We did some detailed evaluations recently (19/08/2024):
| Model | Score | Logs |
|---|---|---|
| gpt-4o-mini | 67.11 | link |
| gpt-4o-mini + 3-shot prompt | 71.05 | link |
| gpt-4o-mini + rag (embedding & reranking) | 72.37 | link |
| gpt-4o-mini + fine-tuned with synth-vuln-fixes | 77.63 | link |
| Model | Score | Logs |
|---|---|---|
| gpt-4o | 68.42 | link |
| gpt-4o + 3-shot prompt | 77.63 | link |
| gpt-4o + rag (embedding & reranking) | 77.63 | link |
| gpt-4o + fine-tuned with synth-vuln-fixes | 77.63 | link |
Leaderboard
The top models on the leaderboard are all fine-tuned using the same dataset that we released called synth vuln fixes.
You can read about our experience with fine-tuning them on our blog.
You can also explore the leaderboard with this interactive visualization.

| Model | StaticAnalysisEval (%) | Time (mins) | Price (USD) |
|---|---|---|---|
| gpt-4o-mini-fine-tuned | 77.63 | 21:0 | 0.21 |
| gemini-1.5-flash-fine-tuned | 73.68 | 18:0 | |
| Llama-3.1-8B-Instruct-fine-tuned | 69.74 | 23:0 | |
| gpt-4o | 69.74 | 24:0 | 0.12 |
| gpt-4o-mini | 68.42 | 20:0 | 0.07 |
| gemini-1.5-flash-latest | 68.42 | 18:2 | 0.07 |
| Llama-3.1-405B-Instruct | 65.78 | 40:12 | |
| Llama-3-70B-instruct | 65.78 | 35:2 | |
| Llama-3-8B-instruct | 65.78 | 31.34 | |
| gemini-1.5-pro-latest | 64.47 | 34:40 | |
| gpt-4-1106-preview | 64.47 | 27:56 | 3.04 |
| gpt-4 | 63.16 | 26:31 | 6.84 |
| claude-3-5-sonnet-20240620 | 59.21 | 23:59 | 0.70 |
| moa-gpt-3.5-turbo-0125 | 53.95 | 49:26 | |
| gpt-4-0125-preview | 53.94 | 34:40 | |
| patched-coder-7b | 51.31 | 45.20 | |
| patched-coder-34b | 46.05 | 33:58 | 0.87 |
| patched-mix-4x7b | 46.05 | 60:00+ | 0.80 |
| Mistral-Large | 40.80 | 60:00+ | |
| Gemini-pro | 39.47 | 16:09 | 0.23 |
| Mistral-Medium | 39.47 | 60:00+ | 0.80 |
| Mixtral-Small | 30.26 | 30:09 | |
| gpt-3.5-turbo-0125 | 28.95 | 21:50 | |
| claude-3-opus-20240229 | 25.00 | 60:00+ | |
| Llama-3-8B-instruct.Q4_K_M | 21.05 | 60:00+ | |
| Gemma-7b-it | 19.73 | 36:40 | |
| gpt-3.5-turbo-1106 | 17.11 | 13:00 | 0.23 |
| Codellama-70b-Instruct | 10.53 | 30.32 | |
| CodeLlama-34b-Instruct | 7.89 | 23:16 |
The price is calcualted by assuming 1000 input and output tokens per call as all examples in the dataset are < 512 tokens (OpenAI cl100k_base tokenizer).
Some models timed out during the run or had intermittent API errors. We try each example 3 times in such cases. This is why some runs are reported to be longer than 1 hr (60:00+ mins).
If you want to add your model to the leaderboard, you can send in a PR to this repo with the log file from the evaluation run.
- Downloads last month
- 577
