query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Autocomplete typing for the command prefixes in a guild. | async def auto_complete_type_guild_prefixes(
inter: disnake.AppCmdInter, user_input: str
) -> List[str]:
await create_guild_model(inter.guild)
guild = await Guild.get(inter.guild_id)
return guild.prefixes[:24] | [
"async def prefix(self,ctx):\r\n\t\ttry:\r\n\t\t\tprefixes = self.bot.config[f\"{ctx.guild.id}\"][\"prefix\"]\r\n\t\texcept KeyError:\r\n\t\t\tself.bot.config[f\"{ctx.guild.id}\"][\"prefix\"] = ['$','!','`','.','-','?']\r\n\t\t\tawait self._save()\r\n\t\t\tprefixes = self.bot.config[f\"{ctx.guild.id}\"][\"prefix\"]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Post the reaction roles message | async def reaction_roles_post(inter: MessageInteraction, description, roles):
view = disnake.ui.View(timeout=None)
for role in roles:
view.add_item(disnake.ui.Button(label=role.name, custom_id=role.id))
messages = await send_message(msg=description, channel=inter.channel, view=view)
for message ... | [
"async def handle_role_reaction_press(interaction: disnake.MessageInteraction):\n if interaction.message not in await ReactionRoleMessage.get_all():\n return\n\n role_id = int(interaction.component.custom_id)\n member: disnake.Member = interaction.author\n user = await User.get(member.id)\n ro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles role reaction button presses. A 'on_button_click' listener. | async def handle_role_reaction_press(interaction: disnake.MessageInteraction):
if interaction.message not in await ReactionRoleMessage.get_all():
return
role_id = int(interaction.component.custom_id)
member: disnake.Member = interaction.author
user = await User.get(member.id)
role = member.... | [
"async def button_logic(self,\n *args) -> None:\n try:\n btn_ctx: ComponentContext = await button.wait_for_component(\n client=self.bot,\n components=args[0]\n )\n except:\n pass\n else:\n if btn... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plateau in refractive index below 330nm for Glass, edge of data artifact | def refractive_index(self):
wd = np.arange(80,820,10)
nd = self.boundary.imat.refractive_index(wd)
plt.plot(wd, nd)
return wd, nd | [
"def extract_grasspollen(idx_grasspollen, data):",
"def get_index_of_surface_gate(data, setup={}):\n alts = data['alt']\n return np.argmin(np.abs(alts), 1)",
"def alpha41(df):\n return (((df.high * df.low)**0.5) - df.vwap)",
"def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read h5 format data file | def read_data(path):
with h5py.File(path, 'r') as hf:
data = np.array(hf.get('data'))
return data | [
"def read_hdf5(path_to_file):\n\n print(\"\\nReading HDF5 file: \", path_to_file)\n file = h5py.File(path_to_file, 'r')\n\n # List the groups\n groups = list(file.keys())\n print(\"Groups available: \", groups)\n\n # Read Zemax Metadata\n zemax_metadata = {}\n print(\"\\nZemax Metadata:\")\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make input data as h5 file format Depending on 'is_train' (flag value), savepath would be changed. | def make_data(sess, data, data_dir):
if FLAGS.is_train:
#savepath = os.path.join(os.getcwd(), os.path.join('checkpoint',data_dir,'train.h5'))
savepath = os.path.join('.', os.path.join('checkpoint',data_dir,'train.h5'))
if not os.path.exists(os.path.join('.',os.path.join('checkpoint',data_dir))):
o... | [
"def make_h5data(self, input, label, save_dir):\r\n \r\n savepath = os.path.join(os.getcwd(), save_dir)\r\n \r\n with h5py.File(savepath, 'w') as hf:\r\n hf.create_dataset('input', data=input)\r\n hf.create_dataset('label', data=label)",
"def save_to_hd5(out_file,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read image using its path. Default value is grayscale, and image is read by YCbCr format as the paper said. | def imread(path, is_grayscale=True):
if is_grayscale:
return scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)
else:
return scipy.misc.imread(path, mode='YCbCr').astype(np.float) | [
"def image_read(path, is_grayscale=True):\n if is_grayscale:\n return imread(path, flatten=True, mode='YCbCr').astype(np.float)\n else:\n return imread(path, mode='YCbCr').astype(np.float)",
"def imread(path, is_grayscale=True):\n if is_grayscale:\n #flatten=True 以灰度图的形式读取 \n return scipy.misc.imre... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dynamically generate options for resource group form field based on the user's selection for Environment. This method requires the user to set the resource_group parameter as dependent on environment. | def generate_options_for_resource_group(control_value=None, **kwargs):
if control_value is None:
return []
env = Environment.objects.get(id=control_value)
if CB_VERSION_93_PLUS:
# Get the Resource Groups as defined on the Environment. The Resource Group is a
# CustomField that is o... | [
"def generate_options_for_resource_group(field, control_value=None, **kwargs):\n options = []\n \n if not control_value:\n options.insert(0, ('', '--- First, Select an Region ---'))\n return options\n\n # get env object by id\n env = Environment.objects.get(id=control_value)\n \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the initialized output formatter based upon the configuration. | def initialize_formatter(config):
if config.json: # pylint: disable=R1705
return formatters.JsonFormatter()
elif config.severity: # pylint: disable=R1705
return formatters.SeverityFormatter(config.colored)
return formatters.Formatter(config.colored) | [
"def getFormatter(self):\n\n return self.__formatter;",
"def sys_formatter(self) -> logging.Formatter:\n if self._sys_formatter is None:\n self._sys_formatter = logging.Formatter(self.log_format)\n return self._sys_formatter",
"def configure_formatter(self, config):\n if '... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the sorted list of problems. | def sort_problems(problems):
# Note: sort() doesn't return the sorted list; rather, it sorts the list
# in place
problems.sort(
key=lambda problem: (
problem.filename,
problem.linenumber,
problem.rule.id
)
)
return problems | [
"def get_problems(self):\n\n with self.__orm.session_scope() as session:\n try:\n problems = session.query(Problem.name).all()\n return [problem[0] for problem in problems]\n except NoResultFound:\n return []",
"def errors_sorted(self) -> L... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the name of the class attribute to be used for classification. | def get_class_attribute(self):
return self.class_attr | [
"def get_attribute_class(self, attr_name):\n return self.attrs.get_attribute_class(attr_name)",
"def class_name(self) -> str:\n return pulumi.get(self, \"class_name\")",
"def attribute_name(self) -> str:\n return self._attribute_name",
"def get_attribute_class(self):\n return self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the value used in the dataset to indicate the positive classification choice. | def get_positive_class_val(self, tag):
# FIXME this dependence between tags and metadata is bad; don't know how to fix it right now
if tag == 'numerical-binsensitive':
return 1
else:
return self.positive_class_val | [
"def predict_class(self):\n output_val = self.network[len(self.network) -1][0].get_value\n if output_val == 0:\n return 0.0\n else:\n return 1.0\n\n # for 3-class dataset \n # ",
"def class_prediction(self,feat_ind,cl):\n if self.class_counts[cl] == ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of the names of any sensitive / protected attribute(s) that will be used for a fairness analysis and should not be used to train the model. | def get_sensitive_attributes(self):
return self.sensitive_attrs | [
"def __listAttr(self):\n attr = dir(self) # already sorted\n filter = []\n for name in attr:\n if name[:2] == '__': pass\n elif name[:10] == '_HelpDoc__': pass # used to mask private attr\n elif name in self.__exclude: pass\n else: filter.append(name)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Same as get_sensitive_attributes, but also includes the joint sensitive attribute if there is more than one sensitive attribute. | def get_sensitive_attributes_with_joint(self):
if len(self.get_sensitive_attributes()) > 1:
return self.get_sensitive_attributes() + ['-'.join(self.get_sensitive_attributes())]
return self.get_sensitive_attributes() | [
"def get_sensitive_attributes(self):\n return self.sensitive_attrs",
"def get_sensitive_terms(self):\n sensitive_terms_dict = {}\n for attribute in self.__non_redundant_entity_attributes:\n for record_id, sensitive_terms in self.__df[attribute].dropna().iteritems():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list in the same order as the sensitive attributes list above of the privileged class name (exactly as it appears in the data) of the associated sensitive attribute. | def get_privileged_class_names(self, tag):
# FIXME this dependence between tags and privileged class names is bad; don't know how to
# fix it right now
if tag == 'numerical-binsensitive':
return [1 for x in self.get_sensitive_attributes()]
else:
return self.privil... | [
"def getDataAttributes(self):\n asRet = [];\n asAttrs = dir(self);\n for sAttr in asAttrs:\n if sAttr[0] == '_' or sAttr[0] == 'k':\n continue;\n if sAttr in self.kasInternalAttributes:\n continue;\n oValue = getattr(self, sAttr);... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Same as get_privileged_class_names, but also includes the joint sensitive attribute if there is more than one sensitive attribute. | def get_privileged_class_names_with_joint(self, tag):
priv_class_names = self.get_privileged_class_names(tag)
if len(priv_class_names) > 1:
return priv_class_names + ['-'.join(str(v) for v in priv_class_names)]
return priv_class_names | [
"def get_privileged_class_names(self, tag):\n # FIXME this dependence between tags and privileged class names is bad; don't know how to\n # fix it right now\n if tag == 'numerical-binsensitive':\n return [1 for x in self.get_sensitive_attributes()]\n else:\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A passing grade in the Ricci data is defined as any grade above a 70 in the combined oral and written score. (See Miao 2010.) | def passing_grade(row):
if row['Combine'] >= 70.0:
return 1
else:
return 0 | [
"def grade(score) :\n if score >= 90 :\n return \"A\"\n elif score >= 80 :\n return \"B\"\n elif score >= 70 :\n return \"C\"\n elif score >= 60 :\n return \"D\"\n else :\n return \"F\"",
"def test_adjusted_grade(self):\n self.assertEqual(hw4.adjusted_grade... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a ZEROindexed position `pos` on the contig, what is the relative ZEROindexed nucleotide position within this annotation's coding sequence? | def nt_pos(self, pos):
seq_consumed = 0
if self.coding_blocks is None or len(self.coding_blocks) == 0:
return int(self.end - pos - 1 if self.rev_strand else pos - self.start)
for block in (reversed(self.coding_blocks) if self.rev_strand else self.coding_blocks):
if pos >=... | [
"def compute_offset_pos(seq, pos):\n \n nogap_seq = transform_seq(seq)\n assert(pos >= 0 and pos < len(nogap_seq))\n\n maps = dict()\n cnt = 0\n maxi = 0\n for i in range(len(seq)):\n if seq[i] not in msa_characters:\n maps[i-cnt] = i\n maxi = i\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an iterable `alts` of nucleotides to be substituted at contig position `pos`, return a list of the corresponding amino acid changes that would occur. `transl_table` is the NCBI genetic code to use when translating the coding sequence. | def aa_alts(self, alts, pos, transl_table=11):
aa_alts = []
nt_pos = self.nt_pos(pos)
aa_pos = self.aa_pos(pos)
for i, allele in enumerate(alts):
mut_seq = str(self.seq_record.seq)
if self.rev_strand:
allele = str(Seq(allele, generic_dna).reverse_c... | [
"def get_AA_subs(s):\r\n test_seq = s.toseq()[70:217].translate() #Translate the mutated region\r\n substitutions = []\r\n \r\n for i in range(len(test_seq)):\r\n if test_seq[i] != align_temp[i]:\r\n substitutions.append(''.join([str(align_temp[i]),\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load all genes in the BED file as SeqRecords, fetching their sequence data from the reference. ref_contigs is a dictionary of ref contig sequences created with BioPython's SeqIO.to_dict(). | def get_bed_annots(bed_path, ref_contigs, quiet=False):
annots = defaultdict(list)
with open(bed_path) as f:
for line in f:
line = line.strip().split("\t")
# Note: BED coordinates are 0-indexed, right-open.
chrom, start, end, name, strand = line[0], int(line[1]), int(... | [
"def loadReferenceContigs(referencePath, alignmentSet, windows=None):\n # FIXME we should get rid of this entirely, but I think it requires\n # fixing the inconsistency in how contigs are referenced here versus in\n # pbcore.io\n\n # Read contigs from FASTA file (or XML dataset)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load all genes in the Sequin table as SeqRecords, fetching their sequence data from the reference. ref_contigs is a dictionary of ref contig sequences created with BioPython's SeqIO.to_dict(). | def get_sequin_annots(sequin_path, ref_contigs, quiet=False):
annots = defaultdict(list)
# We need a dummy class to hold the current state while parsing
# (otherwise the below private functions can't modify it; there's no "nonlocal" in python 2.x)
class _:
in_contig = None
in_featur... | [
"def loadReferenceContigs(referencePath, alignmentSet, windows=None):\n # FIXME we should get rid of this entirely, but I think it requires\n # fixing the inconsistency in how contigs are referenced here versus in\n # pbcore.io\n\n # Read contigs from FASTA file (or XML dataset)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create event files, having milliseconds data of running train at every interval of one minute | def runEventCreation():
config = CONFIG['steps']['EventCreation']
ci = config['inputs']
co = config['outputs']
min_window_size = ci['min_window_size']
change_speed_by = ci['change_speed_by']
speed_ratio = ci['train_zero_speed_ratio']
datetime_limit = ci['datetime_limit']
csv_na... | [
"def main():\r\n # handle arguments\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-t', '--time', help = 'start time', default = \"2018-12-26 18:11:08.509654\")\r\n parser.add_argument('-bd', '--min_duration', type = int, help = 'minimum duration', default = 25)\r\n parser.add_a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Flattens each 2D detector layer into a 1D array | def flatten_layers(data):
return data.reshape((data.shape[0], data.shape[1], -1)) | [
"def _extract_array(tiffs: list[np.ndarray], idx: int, shape: tuple[int, ...], dtype: type | np.dtype) -> np.ndarray:\n feature_arrays = (np.atleast_3d(img)[..., idx] for img in tiffs)\n return np.asarray(list(feature_arrays), dtype=dtype).reshape(*shape, 1)",
"def _flatten_data(self, features, _):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Allow dumping the packed files to a folder. Returns a zipfile.write() method. | def get_zip_writer(zipfile: ZipFile):
dump_folder = CONF['packfile_dump', '']
if not dump_folder:
return zipfile.write
dump_folder = os.path.abspath(dump_folder)
# Delete files in the folder, but don't delete the folder itself.
try:
dump_files = os.listdir(dump_folder)
except F... | [
"def zip_files(folder_name):\n screenzip = random_word(7) + '.7z'\n\n with py7zr.SevenZipFile(screenzip, 'w', password='%w4A8gd-v') as archive:\n archive.writeall(folder_name, 'base')\n return screenzip",
"def _zip_files(self):\n\n zip_file = Path(self.build_directory.parent).joinpath(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a new game_sounds_manifest.txt file. This includes all the current scripts defined, plus any custom ones. Excludes is a list of scripts to remove from the listing this allows overriding the sounds without VPK overrides. | def gen_sound_manifest(additional, excludes):
if not additional:
return # Don't pack, there aren't any new sounds..
orig_manifest = os.path.join(
'..',
SOUND_MAN_FOLDER.get(CONF['game_id', ''], 'portal2'),
'scripts',
'game_sounds_manifest.txt',
)
try:
w... | [
"def export_sounds(names, path, base_label='Sound_'):\n\tfor filename, output in dump_sounds(names, base_label):\n\t\twith open(os.path.join(path, filename), 'w') as out:\n\t\t\tout.write(output)",
"def generate_music_script(data: Property, pack_list):\n # We also pack the filenames used for the tracks - that ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a new particle system manifest file. This includes all the current ones defined, plus any custom ones. | def gen_part_manifest(additional):
if not additional:
return # Don't pack, there aren't any new particles..
orig_manifest = os.path.join(
'..',
GAME_FOLDER.get(CONF['game_id', ''], 'portal2'),
'particles',
'particles_manifest.txt',
)
try:
with open(orig... | [
"def create_puppet_manifest(self):\n\t\tfilename = '/etc/puppet/manifests/cpanel.d/postunsuspendacct/%s.pp' % self.argv.get('user')\n\t\tfileobj = open(filename, 'w')\t\t\t\t\n\t\tfileobj.write(self.title)\n\t\tfileobj.write(self.puppet_resource)\n\t\tfileobj.close()\n\t\tprint \"[%s] Saved puppet manifest '%s'\" %... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a soundscript file for music. | def generate_music_script(data: Property, pack_list):
# We also pack the filenames used for the tracks - that way funnel etc
# only get packed when needed. Stock sounds are in VPKS or in aperturetag/,
# we don't check there.
# The voice attrs used in the map - we can skip tracks
voice_attr = CONF['V... | [
"def _generate_audio_file(self):\n\n polly = boto3.client('polly', \n aws_access_key_id=self.aws_access_key_id, \n aws_secret_access_key=self.aws_secret_access_key,\n region_name= self.region_name)\n \n spoken_t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write either a single sound, or multiple rndsound. snd_prefix is the prefix for each filename , , @, etc. | def write_sound(file, snds: Property, pack_list, snd_prefix='*'):
if snds.has_children():
file.write('"rndwave"\n\t{\n')
for snd in snds:
file.write(
'\t"wave" "{sndchar}{file}"\n'.format(
file=snd.value.lstrip(SOUND_CHARS),
sndchar... | [
"def write(self, *args):\n return _yarp.Sound_write(self, *args)",
"def create_random_wav(file_name):\n sample_rate = 44100.0\n sound_length = 50\n duration = 3000 #MS\n sounds_arr = create_sounds_arr(sample_rate, duration, sound_length)\n wav_file = create_wav_file(file_name)\n save_wav(sounds_arr, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the names of files to inject, if they exist.. | def inject_files():
for filename, arcname in INJECT_FILES.items():
filename = os.path.join('bee2', 'inject', filename)
if os.path.exists(filename):
yield filename, arcname
# Additionally add files set in the config.
for prop in CONF.find_children('InjectFiles'):
filename... | [
"def find_output_files():",
"def collect_filenames(s):\n filenames = []\n if os.path.isdir(s):\n for root, dirs, files in os.walk(s):\n for fname in files:\n if not fname.lower().endswith(('.txt', '.yaml')):\n continue\n filenames.append(os.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find candidate screenshots to overwrite. | def find_screenshots():
# Inside SCREENSHOT_DIR, there should be 1 folder with a
# random name which contains the user's puzzles. Just
# attempt to modify a screenshot in each of the directories
# in the folder.
for folder in os.listdir(SCREENSHOT_DIR):
full_path = os.path.join(SCREENSHOT_DI... | [
"def update_screenshot_num(self):\n for filename in glob.glob(r'Cropped_Screenshots/*.png'): # assuming png\n with open(filename, \"rb\") as f:\n file_name = f.name\n file_name = file_name.replace(\".png\", \"\")\n file_str_arr = file_name.split(\"_\")... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the UofT Graduate GPA for a given grade. | def grade_to_gpa(grade):
letter_grade = ""
gpa = 0.0
if type(grade) is str:
accepted_values = ["A+", "A", "A-", "B+", "B", "B-", "FZ"]
# check that the grade is one of the accepted values
if grade in accepted_values:
# assign grade to letter_grade
letter_grade... | [
"def grade_to_gpa(grade):\n\n letter_grade = \"\"\n gpa = 0.0\n\n if type(grade) is str:\n if grade == \"A+\" or grade == \"A\" or grade == \"A-\" or grade == \"B+\" or grade == \"B\" or grade == \"B-\" or grade == \"FZ\":\n letter_grade = grade\n else:\n raise ValueErro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connect a datacenter to this endpoint. An endpoint can only be connected to a single datacenter. | def connect_datacenter(self, dc):
self.compute.dc = dc
for ep in self.openstack_endpoints.values():
ep.manage = self.manage
logging.info \
("Connected DC(%s) to API endpoint %s(%s:%d)" % (dc.label, self.__class__.__name__, self.ip, self.port)) | [
"def connect_dc_network(self, dc_network):\n self.manage.net = dc_network\n self.compute.nets[self.manage.floating_network.id] = self.manage.floating_network\n logging.info(\"Connected DCNetwork to API endpoint %s(%s:%d)\" % (\n self.__class__.__name__, self.ip, self.port))",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connect the datacenter network to the endpoint. | def connect_dc_network(self, dc_network):
self.manage.net = dc_network
self.compute.nets[self.manage.floating_network.id] = self.manage.floating_network
logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % (
self.__class__.__name__, self.ip, self.port)) | [
"def connect_datacenter(self, dc):\n self.compute.dc = dc\n for ep in self.openstack_endpoints.values():\n ep.manage = self.manage\n logging.info \\\n (\"Connected DC(%s) to API endpoint %s(%s:%d)\" % (dc.label, self.__class__.__name__, self.ip, self.port))",
"def connec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Start all connected OpenStack endpoints that are connected to this API endpoint. | def start(self, wait_for_port=False):
for c in self.openstack_endpoints.values():
c.compute = self.compute
c.manage = self.manage
c.server_thread = threading.Thread(target=c._start_flask, args=())
c.server_thread.daemon = True
c.server_thread.name = c.... | [
"async def start_all(self):\n for transport in self.transports:\n await transport.start()",
"def _register_endpoints(self):\n self.api_server.start_api()\n # Register controller endpoints as /api/kytos/core/...\n self.api_server.register_core_endpoint('config/',\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stop all connected OpenStack endpoints that are connected to this API endpoint. | def stop(self):
for c in self.openstack_endpoints.values():
c.stop()
#for c in self.openstack_endpoints.values():
# if c.server_thread:
# print("Waiting for WSGIServers to be stopped ...")
# c.server_thread.join() | [
"def shutdown_all_endpoints(self):\n logger.debug('Removing all endpoints')\n endpoints = []\n with self._endpoint_lock:\n endpoints = list(self._endpoints)\n # be sure we're not holding the lock when shutdown calls\n # _remove_endpoint.\n for e in endpoints:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Download and generate Alexia top 1 million url lists | def get_alexia_urls():
#download top 1 million site urls
zip_top_urls = requests.get(ALEXIA_URL)
response_buf = StringIO.StringIO(zip_top_urls.content)
# unzip contents
zfile = zipfile.ZipFile(response_buf)
buf = StringIO.StringIO(zfile.read('top-1m.csv'))
for line in buf.readlines():
(rank,domain) = line.spl... | [
"def __init__(self, maxurls=1000):\n self.maxurls = maxurls\n self.seed_url = \"http://s3.amazonaws.com/alexa-static/top-1m.csv.zip\"",
"def fetch_50(url):\n\n results = requests.get(url,headers = headers).json()\n return results",
"def scrape(n=50, sub_dir=\"topsites\", local=\"global\", su... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Format new sequence so it matches the type of the original sequence. | def format_seq(seq, new_seq):
if type(seq) == str:
return "".join(new_seq)
elif type(seq) == tuple:
return tuple(new_seq)
else:
return new_seq | [
"def reformat(self, newformat):\n # check whether the column is defined\n if self._defined:\n # get the appropriate null-format\n nullformat = self._get_nullformat(newformat)\n # set the new formats\n self._format = [newformat, nullformat]\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return sequence with every other item removed. | def remove_every_other(seq):
# Make a copy of the original sequence and step by 2
new_seq = seq[::2]
return new_seq | [
"def remove_every_other_item(seq):\n seq_copy = seq [0::2]\n return seq_copy",
"def every_other_removed(seq):\n return seq[::2]",
"def remove_every_other(seq):\n\n # print(seq[::2])\n return seq[::2]",
"def every_other_removed(seq):\n return(seq[::2])",
"def remove_four_and_every_other(seq... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return sequence with the first four and last four items removed, plus every other item in the remaining sequence. | def remove_four_and_every_other(seq):
# Make a copy of the original sequence, but omit the first four and last four elements
new_seq = seq[4:-4]
# Make a copy of new sequence and step by 2
new_seq = new_seq[::2]
return new_seq | [
"def remove_4s_every_other_in_between(seq):\n seq_copy = seq [4:-4:2]\n return seq_copy",
"def remove_firstLast4_every_other(seq):\n first_last_removed = seq[4:-4]\n a_new_seq3 = first_last_removed[1::2]\n print(a_new_seq3)\n return a_new_seq3",
"def remove_every_other_item(seq):\n seq_copy = s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a sequence with the last third, then first third, then middle third in the new order. | def last_first_middle_third(seq):
# Using the length of the sequence, figure out roughly what one third should be
one_third = len(seq) // 3
new_seq = list(seq[-one_third:])
new_seq.extend(seq[:-one_third])
return format_seq(seq, new_seq) | [
"def third_reorder(seq):\n third = len(seq)//3\n return seq[third:-third]+seq[-third:]+seq[:third]",
"def middle_last_first_third(seq):\n a_new_seq5 = seq[int(len(seq) / 3):int((len(seq) / 3) * 2)] + seq[-int(len(seq) / 3):] + seq[:int(len(seq) / 3)]\n print(a_new_seq5)\n return a_new_seq5",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the zenith PWV (reported by APEX) and altitude of source, returns the real amount of water between the telescope and space. Basically returns pwv/cos(zenith_angle) | def get_real_pwv(pwv, altitude):
zenith_angle = 90-altitude
airmass = 1/np.cos(zenith_angle*np.pi/180)
return pwv*airmass | [
"def VaporPressure(dwpt):\n\n return 611.2*exp(17.67*dwpt/(243.5+dwpt))",
"def pressure_from_altitude(alt): \n return 1013.25 * (1-0.0065*alt/288.15)**(5.255)",
"def seeing_at_pointing(zenith_seeing, altitude):\n X = altitude_to_airmass(altitude)\n return zenith_seeing * (X**(3. / 5.))",
"def s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert a Follower into the database | def fillFollowerInDB(self):
sqlInsertFollowers = "INSERT INTO follower screen_name VALUES %s"
mycursor.execute(sqlInsertFollowers,self.screen_name)
mydb.commit() | [
"def insert_follower(conn, follower):\n sql = ''' INSERT INTO followers(follower_id,messaged,shared_likes,shared_retweets)\n VALUES(?,?,?,?) '''\n cur = conn.cursor()\n try:\n cur.execute(sql, follower)\n except Error as e:\n print(e)\n \n #return cur.lastrowid unsure if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the total loss on a single tower running the CIFAR model. | def tower_loss(scope):
# Get images and flows for Flownet.
img1, img2, flo = flownet_input.inputs(False, FLAGS.data_dir, FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = flowNet.inference(img1, img2, FLAGS.batch_size)
# Add to the Graph the Ops for loss calculation... | [
"def calc_loss_total(self):\n loss = self.calc_loss()\n return tf.reduce_sum(loss)",
"def tower_loss(scope, images, labels):\n \n # build a inference graph\n logits = cifar10.inference(images)\n \n # build the portion of the graph calculating the losses\n _ = cifar10.loss(logits, labels)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Random Subdomain attack packet builder | def randomSubBuilder(dom: string, src_ip: string, dst_ip: string, src_port: int, t: float, seed: float):
id_IP = int(RandShort()) #id for IP layer
id_DNS = int(RandShort()) #id for DNS layer
sub = randomSub(seed) #Random subdomain
q_name = sub + '.' + dom #Complete domain request
ans = Ether(src= '1... | [
"def get_random_domain(self):\n domain = random.choice(self.get_data('all_domain.txt', DOMAIN))\n if domain in BLACK_DOMAIN:\n self.get_random_domain()\n else:\n return domain",
"def generate_domainname():\n domainname = ''.join(generate_string(10, valid_domain_name_c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gives an array of arguments to create packets | def argsBuilder(target_dom:string, server_ip: string, domain_ip:string, server_dom_ip:string, ti:float, d:int, packets:int, n_bot:int):
tf = ti + d #End time of the attack
new_packets_args = []
if n_bot == 1: #If dos attack
ips = randomIP(n_bot, Time.time(), False)
else: #If ddos attack
... | [
"def give_arg_space(self, template):\n answer = []\n for arg in self.listed_args:\n data = []\n for place in template:\n data += [arg[place]]\n answer += [data]\n return answer",
"def create_argument_list(self):\n pyfile = os.path.join(os... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Start the stopwatch if it is not running; stop it if it is running. | def start_stop( self ):
if self.stop_event.is_set():
# Stopwatch was stopped, so start it.
self.stop_event.clear()
self.timer_thread = Thread( target=self.run_stopwatch, args=( time(), ) )
self.timer_thread.start()
else:
# Stopwatch was ... | [
"def start(self):\n\n if not self.running:\n self.running = True\n self.start_stopwatch()",
"def start( self ):\n self.__startTime = time.time()\n if self.__showStartAndStop:\n LOGGER.debug( \"StopWatch [%s] has started at %s\" % ( self.__watchName, datetime.datet... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs a stopwatch loop showing the time elapsed at regular intervals. | def run_stopwatch( self, start_time ):
self.start_time = start_time
while not self.stop_event.is_set():
sleep( 0.01 ) # Accurate to about 1/100th of a second.
self.gui.time_label.setText( "{:.2f}".format( time() - self.start_time ) ) | [
"def stopwatch_loop(self, s):\r\n\t\thours, seconds = divmod(s, 3600)\r\n\t\tminutes, seconds = divmod(seconds, 60)\r\n\t\tx = 0\r\n\t\tself.task_time = s\r\n\t\tif self.mode == RUNNING:\r\n\t\t\tself.update_crash_file()\r\n\t\t\tself._redraw_clock_label(hours, minutes, seconds)\r\n\t\t\tx = 1\r\n\t\t\t\r\n\t\telif... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if |val| is an instance of list, False otherwise | def _is_list(val):
return isinstance(val, list) | [
"def is_list(value):\n return isinstance(value, list)",
"def isList(self):\n return _yarp.Value_isList(self)",
"def isList(self):\r\n return self._wrap(type(self.obj) is list)",
"def is_list(obj):\n return type(obj) is list",
"def is_tuple_or_list(val):\n return isinstance(val, (list,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if |val| is an instance of dict, False otherwise | def _is_dict(val):
return isinstance(val, dict) | [
"def _is_dict(v):\n return isinstance(v, dict)",
"def is_dict(value):\n return isinstance(value, dict)",
"def isdictinstance(obj):\n return isinstance(obj, dict) or isinstance(obj, DotDict)",
"def is_dict(obj):\n return type(obj) == type({})",
"def is_dict(o):\n return isinstance(o, dict)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if |wildcard| string matches |s| string. A valid wildcard | def _is_wildcard_match(s, wildcard):
wildcard = wildcard.strip()
glob_pat = re.compile(r'\*(:(?P<type>\w+))?$')
m = glob_pat.match(wildcard)
if m:
if m.group('type'):
type_to_meth = globals()['__builtins__']
type_to_meth = {k:v for k,v in type_to_meth.items()
... | [
"def __reWildcard(self, regexp, string):\n regexp = re.sub(\"\\*+\", \"*\", regexp)\n match = True\n if regexp.count(\"*\") == 0:\n if regexp == string:\n return True\n else:\n return False\n blocks = regexp.split(\"*\")\n start ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if regex pattern string |pat| matches string |s|. A valid | def _is_regex_match(s, pat):
pat = pat.rstrip()
m = re.search(Settings._REPAT, pat)
if m:
flags_combined = 0
if m.group('flag'):
char_to_flag = {
'A':re.A, 'I':re.I, 'L':re.L, 'M':re.M, 'S':re.S, 'X':re.X}
for flag in list(m.group('flag')):
... | [
"def somePatternMatches(patterns, s):\n for each in patterns:\n if re.match(each, s):\n return True\n return False",
"def _re_match(self, pattern, string):\n RV = False\n\n match_obj = re.match(pattern, string, re.U)\n\n if match_obj:\n RV = True\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if |v| is in |valid_v|. |v| should be a primitive of either int, float, str, or bool. |valid_v| should be a list of any possible legal primitive, wildcard, or regex values. |valid_v| can also be a single primitive value, which will implicitly be converted to a list containing one element. Return False other... | def _is_in_prim(v, valid_v):
if not isinstance(valid_v, list):
valid_v = [valid_v]
for pat in valid_v:
if isinstance(pat, str):
if '*' in pat:
if Settings._is_wildcard_match(v, pat):
return True
elif re.search(Settings._REPAT, pat)... | [
"def has(self, v):\n return v in self.values",
"def _primitive_validity_check(v, valid_v):\n\n if not Settings._is_in_prim(v, valid_v):\n raise InvalidSettingError()",
"def _is_in_list(l, valid_l):\n\n for elem in l:\n if Settings._is_primitive(elem):\n if not Setting... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if every element in list |sublist| is in one of the lists contained in |lists|, False otherwise. Legal elements in |sublist| or the lists in |lists| are any primitive (int, float, str, bool), list, or dict. If an illegal element exists in |sublist|, an InvalidSettingError is raised | def _is_sublist_in_one_of_lists(sublist, lists):
type_to_one_of = Settings._get_type_to_one_of()
for vl in lists:
next_vl = False
for e in sublist:
if Settings._is_primitive(e):
t = 'primitive'
elif Settings._is_list(e):
vl = [l for l... | [
"def sublist_in(lst, sublst):\n for i in sublst:\n if i not in lst:\n return False\n return True",
"def _is_in_list(l, valid_l):\n\n for elem in l:\n if Settings._is_primitive(elem):\n if not Settings._is_in_prim(elem, valid_l):\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if dict |d| is in one of the dicts in |dicts|, False otherwise. |dicts| is obviously just a list of dictionaries. Legal elements in the dictionaries are the typical primitives (int, float, bool, str), lists, and dicts. | def _is_dict_in_one_of_dicts(d, dicts):
for vd in dicts:
if Settings._is_in_dict(d, vd):
return True
return False | [
"def _is_in_dict(d, valid_d):\n\n for k, v in d.items():\n if k not in valid_d:\n return False\n else:\n if Settings._is_primitive(v):\n if not Settings._is_in_prim(v, valid_d[k]):\n return False\n elif Settings._is_list(v):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if all elements in list |l| is in one of the lists contained in |valid_l|, False otherwise. Legal elements in the lists are the typical primitives (int, float, bool, str), lists, and dicts. | def _is_in_list(l, valid_l):
for elem in l:
if Settings._is_primitive(elem):
if not Settings._is_in_prim(elem, valid_l):
return False
elif Settings._is_list(elem):
valid_lists = [l for l in valid_l if isinstance(l, list)]
if not Settings._is_su... | [
"def _list_validity_check(l, valid_l):\n\n if not Settings._is_in_list(l, valid_l):\n raise InvalidSettingError()",
"def valid(l):\n return all([x is not None for x in l])",
"def _list_check_subset(valid_super_list):\n valid_superset = set(valid_super_list)\n\n def validate(value):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if dict |d| has all keys in dict |valid_d|. False otherwise. | def _has_all_keys_from(d, valid_d):
for k, v in valid_d.items():
if k not in d:
return False
return True | [
"def _is_in_dict(d, valid_d):\n\n for k, v in d.items():\n if k not in valid_d:\n return False\n else:\n if Settings._is_primitive(v):\n if not Settings._is_in_prim(v, valid_d[k]):\n return False\n elif Settings._is_list(v):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return True if all dict |d| keys are in dict |valid_d|, values in |d| are legal values with respect to the valid values defined in |valid_d|, and all |valid_d| keys are in |d|. Values in |d| are determined legal based on Settings._is_in_prim(), Settings._is_list(), or recursively Settings._is_in_dict(). False otherwise... | def _is_in_dict(d, valid_d):
for k, v in d.items():
if k not in valid_d:
return False
else:
if Settings._is_primitive(v):
if not Settings._is_in_prim(v, valid_d[k]):
return False
elif Settings._is_list(v):
if no... | [
"def _dict_validity_check(d, valid_d):\n\n if not Settings._is_in_dict(d, valid_d):\n raise InvalidSettingError()",
"def _has_all_keys_from(d, valid_d):\n\n for k, v in valid_d.items():\n if k not in d:\n return False\n return True",
"def valid_entries(d):\n\n if d i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
raise InvalidSettingError if primitive (int, float, bool, str) value |v| is not in list |valid_v| | def _primitive_validity_check(v, valid_v):
if not Settings._is_in_prim(v, valid_v):
raise InvalidSettingError() | [
"def _list_validity_check(l, valid_l):\n\n if not Settings._is_in_list(l, valid_l):\n raise InvalidSettingError()",
"def __validate_dimensions(self, v):\n return \\\n is_positive_float(v) and self.__are_parameters_consistent()",
"def _check_value(self,val):\n if self.allow_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
raise InvalidSettingError if list |l| is not in list |valid_l| where \"in\" semantics are aligned with Settings._is_in_list(), so see the doc for that | def _list_validity_check(l, valid_l):
if not Settings._is_in_list(l, valid_l):
raise InvalidSettingError() | [
"def _is_in_list(l, valid_l):\n\n for elem in l:\n if Settings._is_primitive(elem):\n if not Settings._is_in_prim(elem, valid_l):\n return False\n elif Settings._is_list(elem):\n valid_lists = [l for l in valid_l if isinstance(l, list)]\n if not Se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
raise InvalidSettingError if dict |d| is not in dict |valid_d| where \"in\" semantics are aligned with Settings._is_in_dict(), so see the doc for that | def _dict_validity_check(d, valid_d):
if not Settings._is_in_dict(d, valid_d):
raise InvalidSettingError() | [
"def _is_in_dict(d, valid_d):\n\n for k, v in d.items():\n if k not in valid_d:\n return False\n else:\n if Settings._is_primitive(v):\n if not Settings._is_in_prim(v, valid_d[k]):\n return False\n elif Settings._is_list(v):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
error check |settings| and |valid|. Both are dict types. |settings| represents the user settings where each pair is a setting name associated to a chosen setting value. |valid| represents all valid user settings where each pair is a setting name associated to legal valid setting values. | def _validity_check(settings, valid):
Settings._dict_validity_check(settings, valid) | [
"def validate_settings(self, settings):\n pass",
"def validate_settings(_cfg, _ctx):\n pass",
"def _dict_validity_check(d, valid_d):\n\n if not Settings._is_in_dict(d, valid_d):\n raise InvalidSettingError()",
"def check_settings_syntax(settings_dict: dict, settings_metadata_dict: dict):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
inject any defaults specified in |defaults| into settings. Default values will only be applied if a key exists in |defaults| and doesn't exist in |settings|, or if a key in |settings| has an associating value of None. If |defaults| is None, |settings| is returned as is. | def _inject_defaults(settings, defaults):
new_settings = {}
if defaults is None:
return settings
elif settings is None or len(settings) == 0:
new_settings = defaults
else:
for k, v in settings.items():
if isinstance(v, dict) or v is None:
ne... | [
"def setDefaults(self, defaults):\n assert isinstance(defaults, dict);\n\n self.__defaults = dict();\n\n return self.addDefaults(defaults);",
"def addDefaults(self, defaults):\n assert isinstance(defaults, dict);\n\n for name, default in defaults.items():\n self.__def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
create a Settings object. |settings| can be a dict or path to json file. If a dict, then values in |settings| must be a primitive (int, float, bool, str), list, or dict. |valid| must be a dict. |settings| represents the user settings where each pair is a setting name associated to a chosen setting value. |valid| repres... | def __init__(self, settings, valid, defaults=None):
try:
with open(settings, 'r') as settings_file:
self._settings = json.load(settings_file)
except TypeError:
self._settings = dict(settings)
self._settings = Settings._inject_defaults(self._settings, defaults)
Sett... | [
"def _validity_check(settings, valid):\n\n Settings._dict_validity_check(settings, valid)",
"def _read_settings_file(cls, settings_path=''):\n if not settings_path:\n return {}\n\n if os.path.isdir(settings_path):\n settings_path = os.path.join(settings_path, '.' + cls.__n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return the number of settings | def __len__(self):
return len(self._settings) | [
"def get_num_of_entries():\n try:\n number_of_entries = len(sg.user_settings_get_entry('-cb value-'))\n except TypeError:\n number_of_entries = 0\n return number_of_entries",
"def number_of_sections(self):\n #print (len(self.config.sections()))\n return len... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Push a single ElasticSearchObject to index. Assumes objects do NOT have an id. | def push(self, es_obj, doc_type=None, refresh=True):
doc_type, es_repr = self._validate_doc_and_get_type_and_repr(es_obj, doc_type)
response = self.conn.elastic_search_client.index(index=self.index_name, doc_type=doc_type,
body=es_repr, refresh=u'true' if refre... | [
"def add_object(self, content, object_id = None):\n if object_id is None:\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"POST\", \"/1/indexes/%s\" % self.url_index_name, self.client.timeout, content)\n else:\n return AlgoliaUtils_request(self.client.header... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the text of a child node found by name. Only one such named child is expected. | def getSingleChildTextByName(rootNode, name):
try:
nodeList = [e.firstChild.data for e in rootNode.childNodes if e.localName == name]
if len(nodeList) > 0:
return nodeList[0]
else:
return None
except AttributeError:
return None | [
"def get_child(self, name):\n for n in self.children:\n if n.name == name:\n return n\n\n raise ChildError(\"Can't find child node '{name}'\".format(**locals()))",
"def get_child(node, name):\r\n for child in node.childNodes:\r\n if child.localName == name:\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the text of a child node found by name and namespaceURI. Only one such named child is expected. | def getSingleChildTextByNameNS(rootNode, ns, name):
try:
nodeList = [e.firstChild.data for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns]
if len(nodeList) > 0:
return nodeList[0]
else:
return None
except AttributeError:
return No... | [
"def get_child(node, name):\r\n for child in node.childNodes:\r\n if child.localName == name:\r\n return child",
"def getSingleChildTextByName(rootNode, name):\n try:\n nodeList = [e.firstChild.data for e in rootNode.childNodes if e.localName == name]\n if len(nodeList) > 0:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a descendent node found by a list of names and namespaceURIs forming a path. The path is expected to define a unique node. | def getSingleChildByPathNS(rootNode, path):
parentNode = rootNode
for (ns, name) in path:
node = getSingleChildByNameNS(parentNode, ns, name)
if node == None:
return None
else:
parentNode = node
return node | [
"def _get_child_by_path(element,path,namePrefix=\"{http://gtr.rcuk.ac.uk/api}\"):\r\n\r\n pathChildren = path.split('/')\r\n for childName in pathChildren:\r\n if(element.tag == namePrefix + childName):\r\n return element\r\n element = element.find(namePrefix + childName)\r\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all child nodes of a specified name. | def getChildrenByName(rootNode, name):
return [e for e in rootNode.childNodes if e.localName == name] | [
"def _find_children_named(self, parent, name):\n for node in parent.childNodes:\n if node.nodeName == name:\n yield node",
"def children(self, name=None):\n for child in [c for c in self._node.childNodes if c.nodeType == 1]:\n if name in (None, child.tagName):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all child nodes of a specified name and namespaceURI. | def getChildrenByNameNS(rootNode, ns, name):
return [e for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns] | [
"def getChildrenByName(rootNode, name):\n return [e for e in rootNode.childNodes if e.localName == name]",
"def children(self, name=None):\n for child in [c for c in self._node.childNodes if c.nodeType == 1]:\n if name in (None, child.tagName):\n yield ParsedElement(child)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add new node to the Pipeline | def add_node(self, new_node: 'GraphNode'):
self.operator.add_node(new_node) | [
"def add_node(self, node):",
"def add(self, node):\n pass",
"def add_node(self, *args, **kwargs):\n raise NotImplementedError",
"def add_node(self, node):\n self.nodes.add(node)",
"def AddNode(self, node):\n self.nodes.append(node)\n return node",
"def insert_node(self, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace the subtrees with old and new nodes as subroots | def update_subtree(self, old_subroot: 'GraphNode', new_subroot: 'GraphNode'):
self.operator.update_subtree(old_subroot, new_subroot) | [
"def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substitute = node.substitute\r\n if node.left is not None and node... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the subtree with node as subroot. | def delete_subtree(self, subroot: 'GraphNode'):
self.operator.delete_subtree(subroot) | [
"def delete_root(self, node):\n current = node\n successor = self.find_successor(current) \n temp_height = current.height\n current.height = successor.height\n successor.height = temp_height\n\n if successor != None:\n self.root = successor\n parent = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
compute the hs300 and zz500 weekly exposure on style factors | def factor_exposure(self):
exp_hs_all = pd.DataFrame([])
exp_zz_all = pd.DataFrame([])
for i in range(len(self.weekly_date)):
date = self.weekly_date.iloc[i,0]
factor = get_barra_factor_from_sql(date)
factor['secID'] = factor.index.tolist()
stockli... | [
"def get_weight(ew1, ew2):\n dw = flu.delta_epiweeks(ew1, ew2)\n yr = 52.2\n hl1, hl2, bw = yr, 1, 4\n a = 0.05\n #b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2\n b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2))\n c = 2 ** -(dw / hl1)\n d = 1 - 2 ** -(dw / hl2)\n retu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read taxonomy nodes.dmp file into pandas DataFrame | def read_nodes_dmp(fname):
df = pd.read_csv(fname, sep="|", header=None, index_col=False,
names=['tax_id',
'parent_tax_id',
'rank',
'embl_code',
'division_id',
... | [
"def _read_node_file(self):\n self.node_df = gt.remove_colons(pd.read_csv(self.node_file, dtype=str))",
"def get_node_data(file: str, sep=\"\\t\") -> pd.DataFrame:\n return pd.read_csv(file, sep=sep)",
"def nodes_df_creation(self, path: str) -> pyspark.sql.dataframe.DataFrame:\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read taxonomy names.dmp file into pandas DataFrame | def read_names_dmp(fname):
df = pd.read_csv(fname, sep="|", header=None, index_col=False,
names=["tax_id",
"name_txt",
"unique_name",
"name_class"])
return df.assign(name_txt = lambda x: x['name_txt'].str.strip(... | [
"def read_nodes_dmp(fname):\n df = pd.read_csv(fname, sep=\"|\", header=None, index_col=False,\n names=['tax_id', \n 'parent_tax_id',\n 'rank', \n 'embl_code',\n 'division_id', \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks authorization of a rule against the target in this context. This function is not to be called directly. Calling the function with a target that evaluates to None may result in policy bypass. Use 'authorize_on_' calls instead. | def __authorize(context, rule, target=None):
target = target or {'tenant': context.tenant}
return get_enforcer().authorize(
rule, target, context.to_dict(), do_raise=True,
exc=trove_exceptions.PolicyNotAuthorized, action=rule) | [
"def authorize(rule, target, creds, do_raise=False, *args, **kwargs):\n enforcer = get_enforcer()\n try:\n return enforcer.authorize(rule, target, creds, do_raise=do_raise,\n *args, **kwargs)\n except policy.PolicyNotAuthorized:\n raise exception.HTTPForbidden... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
'To assume as true in the absence of proof to the contrary.' Returns a modified transaction with this value set if the value of the item is not already known. If a value has already been fetched or presumed, this will be a noop. If modified, the presumed value will be available via `get`, and will additionally check yo... | def presume(
transaction: VersionedTransaction,
table: TableNameOrResource,
item_key: ItemKey,
item_value: Optional[Item],
) -> VersionedTransaction:
if item_value is not None:
for key_attr, key_val in item_key.items():
assert item_value[key_attr] == key_val, "Item key must match... | [
"def test_update_transaction_dispute_item(self):\n pass",
"def test_partial_update_transaction(self):\n pass",
"async def test_store_ensure_with_primary_key(\n keyed_store: Store[CoolModel], mock_filesystem: AsyncMock\n) -> None:\n default_item = CoolModel(foo=\"foo\", bar=0)\n existing_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Idempotent definition of key attribute schema for the given table without forcing any IO operations/effects up front. The main reason you might want to do this is if you need to do a `put`, because `put` cannot infer the shape of your key. If the table definition is already present, this is a noop. | def define_table(
transaction: VersionedTransaction, table: TableNameOrResource, *key_attributes: str,
) -> VersionedTransaction:
assert len(key_attributes) > 0 and len(key_attributes) <= 2
if _table_name(table) in transaction.tables:
return transaction
return VersionedTransaction(
table... | [
"def _get_table_schema(self):\n\n return {\n 'AttributeDefinitions': [\n {\n 'AttributeName': self._key_field.name,\n 'AttributeType': self._key_field.data_type\n }\n ],\n 'TableName': self.table_name,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a relpath like drake/pkg/res.txt or external/repo/pkg/res.txt, find the data file and return its path | def find_data(relpath):
# Because we are in a py_binary, Bazel's wrapper script sets up our
# $PYTHONPATH to have our resources somewhere on a sys.path entry.
for one_path in sys.path:
possible = os.path.join(one_path, relpath)
if os.path.exists(possible):
return possible
rai... | [
"def path_in_data(rel_path):\n return os.path.join(os.path.dirname(__file__), 'data', rel_path)",
"def _get_data_path(data_file):\n this_file_dir = os.path.dirname(__file__)\n return this_file_dir + \"/data/{}\".format(data_file)",
"def get_data_file(f):\n if os.path.isfile(f):\n path = f\n\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a userinput_listener coroutine. | def user_input_listener(state: SharedState): | [
"def call_get_user_input_event(self):\n ...",
"def listen_for_input(self, prompt='> '):\n Thread(target=self.__input_thread, args=[prompt]).start()",
"def listen(self):\n while self.active:\n self.handle_input()",
"def start_listener():\n listener = keyboard.Listener(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an Action from this intent, filling missing data from state | def at(self, state):
self.complete_data(state)
self.check_duplicate(state)
action = entities.Action(
action_id=new_id(state),
type=self.get_type_name(),
data=pmap(self.data),
time=state.context.time,
randomness=state.context.randomness,... | [
"def _make_action_class(self) -> None:\n # get list of action property names, containing legal chars only\n legal_attribute_names = [prop.get_legal_name() for prop in\n self.action_variables]\n self.Action = namedtuple('Action', legal_attribute_names)",
"def __... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Match the calibSources and sources, and propagate Interesting Flags (e.g. PSF star) to the sources | def propagateCalibFlags(keysToCopy, calibSources, sources, matchRadius=1):
if calibSources is None or sources is None:
return
closest = False # return all matched objects
matched = afwTable.matchRaDec(calibSources, sources, matchRadius*afwGeom.arcseconds, closest)
#
# Becaus... | [
"def find_sources(\n self,\n thresh=3.5,\n extname='sci',\n extnum=1,\n dq_mask=None,\n use_kernel=True,\n deblend_cont=0.01,\n kernel_size=5,\n save=True\n ):\n if self.data[f\"{extname}{extnum}\"] is None:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks that the Drakecreated flavor of nlopt.cpp (via a patch file) is consistent with the upstreamgenerated flavor of same (via CMake). If this test fails during an NLopt version pin upgrade, you will need to update patches/gen_enums.patch with the reported differences. | def test_enum_cross_check(self):
# Load both input files.
# "actual" refers to the the Drake-created flavor (via a patch file).
# "expected" refers to the upstream-generated flavor (via CMake).
manifest = runfiles.Create()
actual_file = manifest.Rlocation(
"nlopt_inte... | [
"def rpn_version_check(self):",
"def test_valid_min_cppstd_from_outdated_settings(cppstd):\n conanfile = _create_conanfile(\"gcc\", \"9\", \"Linux\", cppstd, \"libstdc++\")\n assert not valid_min_cppstd(conanfile, \"17\", False)",
"def CheckOriginals(Opts):\r\n CPath = os.path.join(os.environ['BASE_TOO... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Evaluate the given distribution function in the point(s) (r, ppar, pperp). Use the vector 'v' to specify the parameters of this distribution function. | def Eval(self, r, ppar, pperp, v, gamma=None, p2=None, p=None, xi=None):
while False:
yield None | [
"def perp_vector(p, q, r):\n v = cross(q - r, q - p)\n return v / mod(v) + q",
"def pareto_distribution(v, p=0.8):\n thr = np.sum(v)*p\n cumsum = 0\n for i, _v in enumerate(v, 1):\n cumsum += _v\n if cumsum >= thr:\n return i * 1.0 / len(v)",
"def EvaluateFunction(self, p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate phase autocorrelation for each signal in seismic_stream | def acorr(seismic_signal, **kwargs):
# if seismic signal is a trace object, we pack it to a stream
if isinstance(seismic_signal, _tr.Trace):
sources = _st.Stream([seismic_signal])
else:
sources = seismic_signal
if not isinstance(sources, _st.Stream):
raise TypeError('seismic_str... | [
"def autocorrelation(series):\n from scipy import signal\n\n x=series-np.mean(series)\n y=np.conj(x[::-1])\n\n acf=np.fft.ifftshift(signal.fftconvolve(y,x,mode='full'))\n\n N=series.shape[0]\n\n acf = acf[0:N]\n\n return acf/acf[0]",
"def step_autocorrelation(self):\n\n max_hops = max(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate phase cross correlation (pcc) between signal1 and signal2 For this purpose signal2 is shifted in time and compared to corresponding portion in signal1 | def _xcorr_trace(signal1, signal2, **kwargs):
kwargs['mode'] = 'pcc'
kwargs['lags'] = __default_lags_if_not_set(signal1, signal2, **kwargs)
pcc_signal = phasecorr.xcorr(signal1.data, signal2.data, **kwargs)
trace = _tr.Trace(data=pcc_signal)
__writeheader(trace, signal1, **kwargs)
return tra... | [
"def phase_shift(trajectory_1, trajectory_2, t, nsamples, period):\n # regularize datasets by subtracting mean and dividing by s.d.\n trajectory_1 -= trajectory_1.mean(); trajectory_1 /= trajectory_1.std()\n trajectory_2 -= trajectory_2.mean(); trajectory_2 /= trajectory_2.std()\n corr = correlate(traje... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate phase auto correlation (pac) of signal1 For this purpose a shifted copy in time of signal1 is compared to corresponding portion in signal1 | def _acorr_trace(signal1, **kwargs):
kwargs['mode'] = 'pac'
kwargs['lags'] = __default_lags_if_not_set(signal1, signal1, **kwargs)
pac_signal = phasecorr.acorr(signal1.data, **kwargs)
trace = _tr.Trace(data=pac_signal)
__writeheader(trace, signal1, **kwargs)
return trace | [
"def phase_shift(trajectory_1, trajectory_2, t, nsamples, period):\n # regularize datasets by subtracting mean and dividing by s.d.\n trajectory_1 -= trajectory_1.mean(); trajectory_1 /= trajectory_1.std()\n trajectory_2 -= trajectory_2.mean(); trajectory_2 /= trajectory_2.std()\n corr = correlate(traje... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get random proxy from proxypool | def get_random_proxy():
url=requests.get(proxypool_url).text.strip()
#logger.info("now url is",url)
return url | [
"def get_random_proxy(self):\n return requests.get(self.__proxypool_url).text.strip()",
"def get_random_proxy(proxypool_url):\n proxy = requests.get(proxypool_url).text.strip()\n proxies = {\n 'http': 'https://' + proxy}\n return proxies",
"def get_random(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
use proxy to crawl page | def crawl(url):
while True:
try:
proxy=get_random_proxy()
proxies = {'http': 'http://' + proxy}
logger.info(proxies)
resp = requests.get(url, proxies=proxies,timeout=3) # 设置代理,抓取每个公司的连接
resp.encoding = resp.apparent_encoding # 可以正确解码
... | [
"def __spider(self, url):\n while 1:\n try:\n response = self.s.get(url, proxies=self.proxies, timeout=2)\n if response.status_code == 200:\n break\n except Exception as e:\n print(e)\n self.count += 1\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if given position is empty ("") in the board. | def _position_is_empty_in_board(position, board):
return board[position[0]][position[1]] == "-" | [
"def emptyAt(self, position):\n\n #check for any sprites at the position\n for key in self.sprites:\n s = self.sprites[key]\n if s.position == position and s.visible: #not visible means it isn't taking up the tile\n return False\n\n #check whether the position is reserved ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if given position is a valid. To consider a position as valid, it must be a twoelements tuple, containing values from 0 to 2. | def _position_is_valid(position):
# Make sure that...
# position is a tuple
# position's length is 2
# every value in the tuple is an int
# every int in the tuple is either 0, 1 or 2
# if not, return False
if not isinstance(position, tuple) \
or len(position) != 2 \... | [
"def check_valid_position_tuple(pos):\n try: chrom, start_pos, end_pos, strand = pos\n except (TypeError, ValueError): raise MutantError(\"Didn't get a correct position tuple! %s\"%pos)\n if strand not in SEQ_STRANDS: raise MutantError(\"Invalid strand %s!\"%strand)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if all 3 positions in given combination are occupied by given player. | def _is_winning_combination(board, combination, player):
"""
### Code before refactoring into a comprehension list:
for a_tuple in combination:
# e.g. a_tuple = (0,0)
# if board[0][0] != "X"
if board[a_tuple[0]][a_tuple[1]] != player:
return False
"""
if any(... | [
"def _check_winning_combinations(board, player):\n winning_combinations = (\n ((0, 0), (0, 1), (0, 2)),\n ((1, 0), (1, 1), (1, 2)),\n ((2, 0), (2, 1), (2, 2)),\n ((0, 0), (1, 0), (2, 0)),\n ((0, 1), (1, 1), (2, 1)),\n ((0, 2), (1, 2), (2, 2)),\n ((0, 0), (1, 1), (... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
There are 8 posible combinations (3 horizontals, 3, verticals and 2 diagonals) to win the Tictactoe game. This helper loops through all these combinations and checks if any of them belongs to the given player. | def _check_winning_combinations(board, player):
winning_combinations = (
((0, 0), (0, 1), (0, 2)),
((1, 0), (1, 1), (1, 2)),
((2, 0), (2, 1), (2, 2)),
((0, 0), (1, 0), (2, 0)),
((0, 1), (1, 1), (2, 1)),
((0, 2), (1, 2), (2, 2)),
((0, 0), (1, 1), (2, 2)),
... | [
"def check_win(board):\n\n is_winner = False\n\n vertical_list_first_column = []\n vertical_list_second_column = []\n vertical_list_third_column = []\n column_list = [vertical_list_first_column, vertical_list_second_column, vertical_list_third_column]\n\n cross_from_left = [board[0][0], board[1][1], boa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Used to build the gender branch of our face recognition network. This branch is composed of three Conv > BN > Pool > Dropout blocks, followed by the Dense output layer. | def build_gender_branch(self, inputs, num_genders=2):
x = Lambda(lambda c: tf.image.rgb_to_grayscale(c))(inputs)
x = self.make_default_hidden_layers(inputs)
x = Flatten()(x)
x = Dense(128)(x)
x = Activation("relu")(x)
x = BatchNormalization()(x)
x = Dropout(0.5)... | [
"def discriminator_block(in_filters, out_filters):\n layers = [ nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1),\n nn.LeakyReLU(0.01)]\n return layers",
"def discriminator_block(in_filters, out_filters, stride, normalize):\n layers = [nn.Conv2d(in_filters... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Used to build the age branch of our face recognition network. This branch is composed of three Conv > BN > Pool > Dropout blocks, followed by the Dense output layer. | def build_age_branch(self, inputs):
x = self.make_default_hidden_layers(inputs)
x = Flatten()(x)
x = Dense(128)(x)
x = Activation("relu")(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
x = Dense(1)(x)
x = Activation("linear", name="age_output")(x)
... | [
"def discriminator_block(in_filters, out_filters):\n layers = [ nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1),\n nn.LeakyReLU(0.01)]\n return layers",
"def LadderNet(input_size = (256, 256, 1), num_classes=2, filters=30): \n \n # X's denote standard flow\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
raise WinproxyError if result is 0 | def fail_on_zero(func_name, result, func, args):
if not result:
raise WinproxyError(func_name)
return args | [
"def winhttp_WinHttpFreeProxyResult(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pProxyResult\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def _check_call(self, ret):\n if ret != 0:\n raise DLRError(self.lib.DLRGetLastError().d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
LciaEngine.__getitem__ retrieves a canonical context by more intensively searching for matches from a given context. Adds foreign context's full name as synonym if one is affirmatively found. If one is not found, returns the NullContext. None is returned as None, to represent 'unspecified' (i.e. accept all) as opposed ... | def __getitem__(self, item):
if item is None:
return None
try:
return self._cm.__getitem__(item)
except KeyError:
if isinstance(item, Context):
return self._cm.find_matching_context(item)
elif isinstance(item, tuple) and len(item) >... | [
"def __dis_context__(self, context, word):\n senses = self.vs.get_senses(word, self.ignore_case)\n if self.verbose:\n print(\"Senses of a target word:\")\n print(senses)\n\n if len(senses) == 0: # means we don't know any sense for this word\n return None\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a quantity, import its CFs into the local database. Unfortunately this is still going to be slow because every part of the CF still needs to be canonicalized. The only thing that's saved is creating a new Characterization instance. | def import_cfs(self, quantity):
try:
qq = self._canonical_q(quantity)
except KeyError:
qq = self.add_quantity(quantity)
count = 0
for cf in quantity.factors():
count += 1
# print(cf)
try:
fb = self._fm[cf.flowab... | [
"def import_currency():\n csv_file = os.path.join(app.root_path, \"ingest/currency/currency.csv\")\n df = pd.read_csv(csv_file)\n df = df.dropna()\n\n MAX_SYMBOL_LENGTH = 3\n\n for index, row in df.iterrows():\n unicode_decimal = str(row[\"_unicode-decimal\"])\n unicode_as_array = unico... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a str (one line for each function). | def text_for_funcs_in_script(filename, prefix):
funcs = funcs_in_script(filename)
###################################################
# FIND LENGTH OF LONGEST FUNCTION NAME #
###################################################
maxlen = 0
for func in funcs:
name, header = func... | [
"def whereami(level=0):\n\n # string = module_name(level=level+1)+', '+func_name(level=level+1)+', '+line_no(level=level+1)+': '\n string = line_no(level=level+1)+', '+func_name(level=level+1)+', '+module_name(level=level+1)+':\\t'\n return string",
"def __str__(self):\n header = [\n ' Ob... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |