text
stringlengths 1
93.6k
|
|---|
# bb does not end with JCC, so ignore
|
if len(bb.outgoing_edges) != 2:
|
continue
|
# evaluate both branches
|
# (does not differentiate between True/False branch)
|
# but for our purpose, we don't need to
|
if bb in seen_bbs:
|
continue
|
total_conds_seen += 1
|
for branch in bb.outgoing_edges:
|
# bb has multiple incoming edges, so ignore
|
if len(branch.target.incoming_edges) != 1:
|
continue
|
# ignore authentic bb
|
if branch.target.start in metadata.good_bbs:
|
continue
|
# core analysis
|
alerted_rules_in_bb = list() # reset to empty
|
last_instr_addr = get_last_bb_instr(bb) # if_addr
|
for analysis in analyses:
|
analysis_result = analysis(bv, branch.target,
|
branch.target.start, metadata)
|
if analysis_result:
|
# add list of rules alerted in current basic block
|
alerted_rules_in_bb.extend(analysis_result)
|
if alerted_rules_in_bb: # list not empty there are alerted
|
# rules in current basic block
|
# format: (OP addr, binja branch object, rule list)
|
cur_pass_patch_locations.append(
|
OpaquePredicateInfo(last_instr_addr, branch,
|
alerted_rules_in_bb)
|
)
|
seen_bbs.add(bb)
|
return (cur_pass_patch_locations, total_conds_seen)
|
# <FILESEP>
|
from enum import Enum
|
import argparse
|
import dataclasses
|
from dataclasses import dataclass, field
|
from typing import Optional
|
from transformers import HfArgumentParser, TrainingArguments
|
from tasks.utils import *
|
@dataclass
|
class DataTrainingArguments:
|
"""
|
Arguments pertaining to what data we are going to input our model for training and eval.
|
Using `HfArgumentParser` we can turn this class
|
into argparse arguments to be able to specify them on
|
the command line.training_args
|
"""
|
task_name: str = field(
|
metadata={
|
"help": "The name of the task to train on: " + ", ".join(TASKS),
|
"choices": TASKS
|
},
|
)
|
dataset_name: str = field(
|
metadata={
|
"help": "The name of the dataset to use: " + ", ".join(DATASETS),
|
"choices": DATASETS
|
}
|
)
|
dataset_config_name: Optional[str] = field(
|
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
|
)
|
max_seq_length: int = field(
|
default=128,
|
metadata={
|
"help": "The maximum total input sequence length after tokenization. Sequences longer "
|
"than this will be truncated, sequences shorter will be padded."
|
},
|
)
|
overwrite_cache: bool = field(
|
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
|
)
|
pad_to_max_length: bool = field(
|
default=True,
|
metadata={
|
"help": "Whether to pad all samples to `max_seq_length`. "
|
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
|
},
|
)
|
max_train_samples: Optional[int] = field(
|
default=None,
|
metadata={
|
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.