text stringlengths 0 93.6k |
|---|
def canon_name(archh, offseth): |
for some_name, (some_offset, _) in archh.registers.items(): |
if some_offset == offseth and some_name in arch_data[archh.vex_arch[7:].lower()]: |
return some_name |
return None |
for archname in arch_data: |
try: |
arch = archinfo.arch_from_id(archname) |
except (archinfo.ArchError, RuntimeError): |
print("Skipping", archname) |
continue |
new_register_names = {} |
new_registers_reverse = {} |
misses = [] |
for a_offset, a_fieldname in arch.register_names.items(): |
cname = canon_name(arch, a_offset) |
if cname is None: |
misses.append((a_offset, a_fieldname)) |
continue |
new_offset, new_size = arch_data[archname][cname] |
# deal with picking up subregisters? shouldn't need to, beyond the above... |
new_register_names[new_offset] = a_fieldname |
for alt_name, (sub_offset, sub_size) in arch.registers.items(): |
if sub_offset >= a_offset and sub_offset < a_offset + new_size: |
new_sub_offset = new_offset + (sub_offset - a_offset) |
if new_sub_offset not in new_registers_reverse: |
new_registers_reverse[new_sub_offset] = [] |
new_registers_reverse[new_sub_offset].append((sub_size, alt_name)) |
for misso, miss in misses: |
for dlist in new_registers_reverse.values(): |
for _, alt_name in dlist: |
if alt_name == miss: |
new_register_names[misso] = miss |
break |
else: |
continue |
break |
else: |
raise Exception(f"Arch {arch.name}: {miss} has no name that matches vex") |
# get ready to write back to archinfo source |
arch_fname = "archinfo/%s.py" % arch.__class__.__module__.split(".")[-1] |
orig_lines = iter(list(open(arch_fname, "rb"))) |
file_fp = open(arch_fname, "wb") |
# copy initial lines |
for line in orig_lines: |
if " register_names = " in line: |
break |
file_fp.write(line) |
# discard lines we want to replace |
for line in orig_lines: |
if " registers = " in line: |
break |
for line in orig_lines: |
if "}" in line: |
break |
# dump out the new data |
file_fp.write(" register_names = {\n") |
for new_offset in sorted(list(new_register_names)): |
file_fp.write(" %d: '%s',\n" % (new_offset, new_register_names[new_offset])) |
file_fp.write(" }\n\n") |
file_fp.write(" registers = {\n") |
for new_offset, dlist in sorted(new_registers_reverse.items()): |
for new_size, new_name in sorted(dlist): |
file_fp.write(" '%s': (%d, %d),\n" % (new_name, new_offset, new_size)) |
file_fp.write(" }\n") |
# dump out the rest of the file |
for line in orig_lines: |
file_fp.write(line) |
# <FILESEP> |
# triplet loss |
import tensorflow.keras.backend as K |
from itertools import permutations |
import random |
import tensorflow as tf |
import numpy as np |
def generate_triplet(x, y, ap_pairs=10, an_pairs=10): |
data_xy = tuple([x, y]) |
trainsize = 1 |
triplet_train_pairs = [] |
y_triplet_pairs = [] |
#triplet_test_pairs = [] |
for data_class in sorted(set(data_xy[1])): |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.