File size: 1,366 Bytes
983d3b8
055d938
 
 
 
 
 
 
 
 
 
 
 
 
 
983d3b8
304c5a2
 
 
d57024f
304c5a2
 
 
4f36dfb
304c5a2
 
d57024f
 
 
 
 
 
 
 
 
983d3b8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
<<<<<<< HEAD
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch

# Load CodeT5+ model for code fixing
model_ckpt = "Salesforce/codet5p-220m"
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
model = AutoModelForSeq2SeqLM.from_pretrained(model_ckpt)

def fix_code(code):
    prompt = f"fix: {code}"
    inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
    output = model.generate(inputs["input_ids"], max_length=256)
    fixed_code = tokenizer.decode(output[0], skip_special_tokens=True)
    return fixed_code.strip()
=======
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer

# Load pre-trained CodeBERT model
model = AutoModelForSequenceClassification.from_pretrained("microsoft/codebert-base", num_labels=2)
tokenizer = AutoTokenizer.from_pretrained("microsoft/codebert-base")

def detect_bug(code):
    inputs = tokenizer(code, return_tensors="pt", truncation=True, padding=True)
    outputs = model(**inputs)
    probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
    return "buggy" if probabilities[0][1] > probabilities[0][0] else "correct"

# Optional test
if __name__ == "__main__":
    sample = "def multiply(a, b): return a + b"
    print(detect_bug(sample))
#detects if there's a bug in code

>>>>>>> 22b22edd4386cff48f5ad4c4325e1f8524238b52