query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Return list of files below each of paths w/ extensions. | def _collect_files(paths, extensions):
files = list()
for ext in extensions:
for path in paths:
index_path, glob = pathlib.Path(path), r'**/*' + ext
files += index_path.glob(glob)
return [str(f) for f in sorted(set(files))] | [
"def _get_files(path, extensions=image_extension):\n path = Path(path)\n files = [Path(file.path) for file in os.scandir(path) if Path(file).suffix\n in extensions]\n return files",
"def collect_files_by_ext(path, ext=[]):\n if isinstance(ext, str):\n ext = [ext]\n collected_file... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create nested dict made up of components of links paths. | def _create(links):
nested = dict()
for link in links:
nested = Webpages._nest(link.split(os.sep), nested)
return nested | [
"def wrap_with_links(obj, links, val, root_path, many=False):\n if many:\n for item in obj:\n item['links'] = {}\n for key in links:\n item['links'][key] = root_path + links[key].format(item[val])\n else:\n obj['links'] = {}\n for key in links:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if directory has any of defaults. | def _has_default(directory, defaults):
return any(os.path.isfile(os.path.join(directory, default))
for default in defaults) | [
"def defaults_installed(self):\n defaults = []\n for skill in self.msm.default_skills.values():\n if not skill_is_blacklisted(skill):\n defaults.append(skill)\n return all([skill.is_local for skill in defaults])",
"def dir_exists(self, path=''):\n if path == '... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return formatted, nested, HTML ULs matching nested dicts. link is a list of directory components up until nested. indent is UL indent level. Each branch and leaf is made an anchor with self.abs_top replaced by self.rel_top if that directory has any of self.defaults. | def _lists(self, nested, link=list(), indent=3):
abs_top, rel_top, defaults = self.abs_top, self.rel_top, self.defaults
if nested:
items = ''
for key in sorted(nested):
kp = self._path(*[key] + [''], old=abs_top, new=rel_top)
nest = nested[key]
... | [
"def exportDirPage(self, linkDict, level=0):\n title = self.title()\n lines = [u'<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 '\\\n 'Transitional//EN\">', u'<html>', u'<head>',\n u'<meta http-equiv=\"Content-Type\" content=\"text/html; '\\\n 'charset=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write webpage to outpath, or print if outpath is None. | def write_webpage(self, outpath=None):
if outpath:
with open(outpath, 'w') as outfile:
outfile.write(self.webpage)
print(outpath)
else:
print(self.webpage) | [
"def write_to_output(self, out=None):\n # Check the output file\n if out is None:\n out = sys.stdout\n\n # Generate header string and body\n header_string = self._get_header_string()\n if self.view is None:\n body = ''\n else:\n body = self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write files list to outpath, or print if outpath is None. | def write_files(self, outpath=None):
files = '\n'.join(self.files + [''])
if outpath:
with open(outpath, 'w') as outfile:
outfile.write(files)
print(outpath)
else:
print(files) | [
"def write_list(path_out, image_list):\n filename = os.path.join(args.root, path_out)\n print('filename=', filename)\n with open(filename, 'w') as fout:\n for i, item in enumerate(image_list):\n line = '%s\\t' % item[1]\n line += '%f\\n' % item[2]\n fout.write(line)"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a dictbased example to a tf.Example proto. | def as_tf_example(example):
return tf.train.Example(features=tf.train.Features(feature={
'sequence': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[example['sequence']])),
'mutation_sequence': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[example['mutation_sequence']])... | [
"def _parse_tf_imp_dict(example_proto):\n image_feature_description = {\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'depth': tf.io.FixedLenFeature([], tf.int64),\n 'label': tf.io.FixedLenFeature([], tf.int64),\n 'class_labe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses a tf.Example proto string. | def parse_tf_example(tf_example_str):
return tf.parse_single_example(
serialized=tf_example_str,
features={
'sequence': tf.FixedLenFeature([], dtype=tf.string),
'mutation_sequence': tf.FixedLenFeature([], dtype=tf.string),
'partition': tf.FixedLenFeature([], dtype=tf.string),... | [
"def parse_example_proto(example_serialized):\n # Dense features in Example proto.\n feature_map = {\n 'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string,\n default_value=''),\n 'image/class/label': tf.io.FixedLenFeature([1], dtype=tf.int64,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a tf.Dataset of examples to a corresponding dataframe. | def as_dataframe(dataset, batch_size=1024):
# Note pulling examples in batches is done here purely for efficiency, versus
# pulling examples one-by-one.
it = dataset.batch(batch_size).make_one_shot_iterator()
examples = None
with tf.Session() as sess:
while True:
try:
batch_examples = sess.r... | [
"def _dataframe_to_dataset(X: pd.DataFrame, y: np.ndarray, batch_size: int) -> tf.data.Dataset:\n features = X.copy()\n target = y.copy()\n dataset = tf.data.Dataset.from_tensor_slices((dict(features), target))\n dataset = dataset.shuffle(buffer_size=len(features))\n dataset = dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes the given example protos to TFRecord format. | def write_tfrecord_dataset(filepath, examples):
with tf.python_io.TFRecordWriter(filepath) as writer:
for example in examples:
writer.write(example.SerializeToString()) | [
"def _write_examples(examples_file, examples):\n with tf.io.TFRecordWriter(examples_file) as writer:\n for example in examples:\n writer.write(example.SerializeToString())",
"def save_as_tfrecord(data, output, name):\n writer = tf.python_io.TFRecordWriter(name);\n for i in range(0, len(data)):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads a dataset of tf.Example protos from TFRecordformatted files. | def read_tfrecord_dataset(filepaths):
return tf.data.TFRecordDataset(filenames=filepaths).map(parse_tf_example) | [
"def _read_tfrecord_examples(filename):\n result = {}\n for raw_record in tf.data.TFRecordDataset([filename]):\n tf_example = tf.train.Example()\n tf_example.ParseFromString(raw_record.numpy())\n id_feature = tf_example.features.feature['id'].bytes_list\n result[id_feature.value[0].decode('utf-8')] = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encodes an example to a fixed length vector respresentation. The fixed length representation is useful for models that need all examples to have the same shape. | def encode_fixedlen(example, encoder, sequence_key='mutation_sequence'):
encoded_sequence = tf.cast(
tf.py_func(encoder.encode, [example[sequence_key]], tf.float64),
tf.float32)
features = {
'sequence': encoded_sequence,
}
label = tf.cast(example['is_viable'], tf.int64)
return features, l... | [
"def encode_varlen(example, encoder, sequence_key='sequence'):\n encoded_sequence = tf.cast(\n tf.py_func(encoder.encode, [example[sequence_key]], tf.float64),\n tf.float32)\n\n features = {\n 'sequence': encoded_sequence,\n 'sequence_length': tf.shape(encoded_sequence)[0],\n }\n\n label = t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encodes an example to a variable length vector representation. | def encode_varlen(example, encoder, sequence_key='sequence'):
encoded_sequence = tf.cast(
tf.py_func(encoder.encode, [example[sequence_key]], tf.float64),
tf.float32)
features = {
'sequence': encoded_sequence,
'sequence_length': tf.shape(encoded_sequence)[0],
}
label = tf.cast(example[... | [
"def encode_variable(name, value):\n vb = VecBuf()\n _encode_variable_name(vb, name)\n _encode_single(vb, value)\n return vb",
"def encode_fixedlen(example, encoder, sequence_key='mutation_sequence'):\n encoded_sequence = tf.cast(\n tf.py_func(encoder.encode, [example[sequence_key]], tf.float64)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a report for profiled entities grouped by year, that contains the attributes provided by _get_insights_for_filtered The filtered entities must have a get_time attribute which returns a datetime or date object | def get_yearly_report(self):
ans = dict()
for year in range(Settings.CF_OPENED_YEAR, date.today().year+1):
current_year_stats = self._get_insights_for_filtered(lambda entity: entity.get_time().year == year)
if current_year_stats:
ans[year] = current_year_stats
... | [
"def ytd(self, year=None):\n if year is None:\n year = date.today().year\n return self.filter(time__year=year)",
"def statistic_per_timeslice(self, statistic, timeslice, timeslice_is_static, start_datetime, end_datetime, filters={}):\n\n if timeslice_is_static and (start_datetime !... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if lizard can parse the file extension in the given file path | def lizard_can_parse(file_path):
parseable_extensions = [
'.c',
'.h',
'.cpp',
'.hpp',
'.java',
'.cs',
'.js',
'.m',
'.mm',
'.swift',
'.py',
'.rb',
'.ttcn',
... | [
"def has_valid_ext(path: str) -> bool:\n\n return is_valid_ext(get_extension(path))",
"def allowed_file(filename):\n\n\tif '.' in filename and filename.rsplit('.', 1)[1] == 'dae':\n\t\t# Extract the file extension and return true if dae\n\t\treturn True\n\n\treturn False",
"def _is_with_extension(self, filen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a new time of day instance. Remember to call load() before using this instance | def __init__(self):
DebugObject.__init__(self, "TimeOfDay")
self._createProperties() | [
"def create(self, validated_data):\n return TimeSlot.objects.create(**validated_data)",
"def _add_tummytime_entry(self):\n milestone = \"\"\n if choice([True, False]):\n milestone = self.faker.sentence()\n start = self.time + timedelta(minutes=randint(1, 60))\n end = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Binds the shader inputs to a node. This only has to be done once | def bindTo(self, node, uniformName):
for propid, prop in self.properties.iteritems():
name = propid.replace(".", "_")
node.setShaderInput(name, prop.getPTA()) | [
"def bind_to(self, target):\r\n for key, val in iteritems(self.inputs):\r\n target.set_shader_input(self.name + \".\" + key, val)",
"def _setShaderInputs(self):\n\n # Shader inputs for the light-culling pass\n if self.haveLightingPass:\n self.lightBoundsComputeBuff.setSh... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all property keys, ordered | def getPropertyKeys(self):
return self.propertiesOrdered | [
"def _get_keys(self):\n # NB You can get the keys of an instance more directly, via\n # Path\\_.Keys but this doesn't apply to classes. The technique\n # here appears to work for both.\n if self._keys is None:\n _set(self, \"_keys\", [])\n for property in self.ole_o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes the GLSL structure representation to a given location | def saveGlslInclude(self, dest):
output = "// Autogenerated by Time of Day Manager\n"
output += "// Do not edit! Your changes will be lost.\n\n\n"
output += "struct TimeOfDay {\n\n"
for propid, prop in self.properties.iteritems():
name = propid.replace(".", "_")
... | [
"def generate_shader_code(self):\r\n\r\n content = \"#pragma once\\n\\n\"\r\n content += \"// Autogenerated by the render pipeline\\n\"\r\n content += \"// Do not edit! Your changes will be lost.\\n\\n\"\r\n\r\n structs = {}\r\n inputs = []\r\n\r\n for input_name, handle in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load DeCS ontology from local file 'DeCS_2019.obo' or from online source. | def load_decs():
print("Loading ES DeCS...")
graph = obonet.read_obo("./data/vocabularies/DeCS_2019.obo") # Load the ontology from local file
graph = graph.to_directed()
name_to_id, synonym_to_id, edges = dict(), dict(), list()
#print(len(graph.nodes()))
for node in graph.nodes(data=True... | [
"def loadDOFile(filename):\n\n\tscriptDir = os.path.dirname(__file__)\n\tabsFilename = os.path.join(os.path.dirname(scriptDir),'src','ontology', filename)\n\treturn pronto.Ontology(absFilename)",
"def load_ontology(self):\n multipart_data = MultipartEncoder(fields={'file': ('FinancialNewsOntology_beta3.owl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get best spanish or portuguese DeCS matches for entity text according to lexical similarity (edit distance). | def map_to_decs(entity_text, name_to_id,synonym_to_id):
global decs_cache
entity_text_spaces = entity_text.replace("_"," ")
if entity_text_spaces.replace(',','') in map(str.lower,name_to_id): #There is an exact match for this entity in name_to_id
codes = process.extract(entity_text_spaces.rep... | [
"def diversity(sentence: str, tokenized_sentences: str, similarity_metric: str) -> float:\n # sentences = nltk.sent_tokenize(document)\n max_sim_sentence = ''\n sentence = sentence.lower()\n tokenized_sentences = [sent.lower() for sent in tokenized_sentences]\n\n if similarity_metric == 'jaccard':\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform a speed into a rgb (or rgba) color. | def speed2color(speed: float, speed_limit: float, cmap: Colormap = None, alpha: bool = True) -> tuple:
# TODO: vectorize this s.t. 'speed' can be an array
if cmap is None:
# DIVERGING -> ['coolwarm', 'RdBu_r', 'jet']
# SEQUENTIAL -> ['gist_heat', 'autumn', 'hot']
num_colors = 256 / 2
... | [
"def set_rgb_animation(self, anim_type, color, speed, timeout=RESPONSE_DELAY):\n\n value = bytearray()\n value.append(int(anim_type))\n value.append(int(color))\n value.append(int(speed))\n\n command.create_set_command(command.PROTOCOL_COMMAND_SET_RGB_ANIMATION, value, 3)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Populate a cycle dict | def _init_cycle_dict(self):
dict_arr = np.zeros(self.epochs, dtype=int)
length_arr = np.zeros(self.epochs, dtype=int)
start_arr = np.zeros(self.epochs, dtype=int)
c_len = self.cycle_len
idx = 0
for i in range(self.cycles):
current_start = idx
for... | [
"def generate_cycle(self, workflow=None):\n if not workflow:\n _, workflow = self.generate_workflow()\n\n workflow = self._session_add(workflow) # this should be nicer\n\n obj_name = \"cycle\"\n\n obj_dict = {\n obj_name: {\n \"workflow\": {\n \"id\": workflow.id,\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The main part of the stemming algorithm starts here. b is a buffer holding a word to be stemmed. The letters are in b[k0], b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is readjusted downwards as the stemming progresses. Zero termination is not in fact used in the algorithm. Note that only lower ca... | def __init__(self):
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string | [
"def get_stem(word):\r\n #stub\r\n #PLACEHOLDER\r\n\r\n ps = PorterStemmer()\r\n \r\n return word",
"def test_stem_words():\n\n # check for stemmed version of certain words\n test_str = 'The dog was running up the sidewalk.' # should be stemmed\n stemmed_text = text_utilities.stem_words(t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
cons(i) is TRUE b[i] is a consonant. | def cons(self, i):
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1 | [
"def is_consonant(word, i):\n\n vowels = {\"a\", \"e\", \"i\", \"o\", \"u\"}\n\n if word[i] in vowels:\n return False\n\n if word[i] == \"y\":\n if i == 0:\n return True\n else:\n return not (is_consonant(word, i - 1))\n\n return True",
"def conjugate_grad(A,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
doublec(j) is TRUE j,(j1) contain a double consonant. | def doublec(self, j):
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j-1]):
return 0
return self.cons(j) | [
"def ends_double_consonant(word):\n\n if len(word) >= 2:\n if word[-1] == word[-2]:\n return is_consonant(word, len(word) - 1)\n\n return False",
"def isconsonant(letter):\n for l in consonants:\n if letter == l:\n return True\n for L in capconsonants:\n if l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
cvc(i) is TRUE i2,i1,i has the form consonant vowel consonant and also if the second c is not w,x or y. this is used when trying to restore an e at the end of a short e.g. cav(e), lov(e), hop(e), crim(e), but snow, box, tray. | def cvc(self, i):
if i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) or not self.cons(i-2):
return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1 | [
"def ends_cvc(word):\n\n if len(word) == 2:\n return not is_consonant(word, 0) and is_consonant(word, 1)\n\n elif len(word) >= 3:\n return (is_consonant(word, len(word) - 3) and\n not is_consonant(word, len(word) - 2) and\n is_consonant(word, len(word) - 1) and\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ends(s) is TRUE k0,...k ends with the string s. | def ends(self, s):
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1 | [
"def str_ends_with(str, end):\n return str[-len(end):] == end",
"def both_ends(s): \n l = len(s)\n new_s = '' \n \n if l < 2:\n return new_s\n else:\n new_s = s[0] + s[1] + s[l-2] + s[l-1]\n return new_s",
"def str_ends_with(s, val, length=None):\n if length is None:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
setto(s) sets (j+1),...k to the characters in the string s, readjusting k. | def setto(self, s):
length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:]
self.k = self.j + length | [
"def set_to(self, s):\n length = len(s)\n self.word = self.word[:self.offset + 1] + s + self.word[self.offset + length + 1:]\n self.end = self.offset + length",
"def subst_string(s,j,ch):\n res = ''\n ls = list(s)\n for i in range(len(s)):\n if i == j:\n res = res +... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
step1ab() gets rid of plurals and ed or ing. e.g. caresses > caress ponies > poni ties > ti caress > caress cats > cat feed > feed agreed > agree disabled > disable matting > mat mating > mate meeting > meet milling > mill messing > mess meetings > meet | def step1ab(self):
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("eed"):
if self.m... | [
"def step1(self): # real signature unknown; restored from __doc__\n pass",
"def main(step_list):",
"def sandhiPrepCompoundAdjust(self):\n # c is last character of Pada1\n c = self.Linary[self.Index - 1]\n if c == sktc:\n if self.Pada1 == \"aYc\":\n # PMS: aYc -> ak\n self.Linary[self.Index - 1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
step1c() turns terminal y to i when there is another vowel in the stem. | def step1c(self):
if (self.ends("y") and self.vowelinstem()):
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:] | [
"def apply_rule_1c(word):\n\n if word.endswith(\"y\"):\n stem = word[:-1]\n\n if len(stem) > 1 and is_consonant(stem, len(stem) - 1):\n return stem + \"i\"\n\n return word",
"def first_vowel(s):\n result=len(s) # In case there is no 'a'\n \n if 'a' in s:\n result=int... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
step2() maps double suffices to single ones. so ization ( = ize plus ation) maps to ize etc. note that the string before the suffix must give m() > 0. | def step2(self):
if self.b[self.k - 1] == 'a':
if self.ends("ational"): self.r("ate")
elif self.ends("tional"): self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"): self.r("ence")
elif self.ends("anci"): self.r("ance")
el... | [
"def apply_rule_2(word):\n\n # \"alli\" --> \"al\" and then re-apply Rule 2.\n if word.endswith(\"alli\") and measure(word[:-4]) > 0:\n return apply_rule_2(word[:-2])\n\n for suffix, replacement in [\n (\"ational\", \"ate\"),\n (\"tional\", \"tion\"),\n (\"enci\", \"ence\"),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
step3() dels with ic, full, ness etc. similar strategy to step2. | def step3(self):
if self.b[self.k] == 'e':
if self.ends("icate"): self.r("ic")
elif self.ends("ative"): self.r("")
elif self.ends("alize"): self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k]... | [
"def main(step_list):",
"def conduct_experiment_3(self):\n self.experiment_3.conduct_experiment()",
"def step(self):\n\t\tpass",
"def em_step(t, eng, fre):\n\t# TODO",
"def step1(self): # real signature unknown; restored from __doc__\n pass",
"def test3():\n\n #TODO: Test for a couple nor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
step4() takes off ant, ence etc., in context vcvc. | def step4(self):
if self.b[self.k - 1] == 'a':
if self.ends("al"): pass
else: return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"): pass
elif self.ends("ence"): pass
else: return
elif self.b[self.k - 1] == 'e':
if sel... | [
"def step(self):\n\t\tpass",
"def step1(self): # real signature unknown; restored from __doc__\n pass",
"def main(step_list):",
"def conduct_experiment_4(self):\n self.experiment_4.conduct_experiment()",
"def step(self):\n \tif not self.is_done():\n actions = [agent.program(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
step5() removes a final e if m() > 1, and changes ll to l if m() > 1. | def step5(self):
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k -1 | [
"def _5(self, _5):\n\n self.__5 = _5",
"def __case_5(self, node):\n sibling, direction = self._get_sibling(node)\n closer_node = sibling.right if direction == 'L' else sibling.left\n outer_node = sibling.left if direction == 'L' else sibling.right\n if closer_node.color == RED a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
In stem(p,i,j), p is a char pointer, and the string to be stemmed is from p[i] to p[j] inclusive. Typically i is zero and j is the offset to the last character of a string, (p[j+1] == '\0'). The stemmer adjusts the characters p[i] ... p[j] and returns the new endpoint of the string, k. Stemming never increases word len... | def stem(self, p, i=None, j=None):
if i is None:
i = 0
if j is None:
j = len(p) - 1
# copy the parameters into statics
self.b = p
self.k = j
self.k0 = i
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With thi... | [
"def get_stem(word):\r\n #stub\r\n #PLACEHOLDER\r\n\r\n ps = PorterStemmer()\r\n \r\n return word",
"def stem(word):\n global _stemmer\n if _stemmer is None:\n _stemmer = nltk.stem.porter.PorterStemmer()\n return _stemmer.stem(word)",
"def stem(s):\r\n if(s[-1:] == 's'):\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
generates a 3 digit list for the code | def generate_code():
digits = list(range(10))
random.shuffle(digits[:3])
print(digits[:3])
return digits | [
"def main(self):\n result = []\n rawCodes = self.randomize()\n for code in rawCodes:\n code36 = self.base36_encode(code)\n #Be sure to have X characters in the code [ugly check]\n nbCharLeft = self.nbChar - len(code36)\n while nbCharLeft > 0:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
read a spectrum from a csv/tab delimited txt file, returning an array | def readSpectrumFile(filename):
sp = []
# use ValueError to deal with varied header length/format
with open(filename, 'r') as csvf:
rdr = csv.reader(csvf, delimiter='\t')
for row in rdr:
try:
# test that [float...] is 2 elements; otherwise skip. Deals wit... | [
"def array(file):\n\tsequences = []\n\trecSite = []\n\tfreq = []\n\twith open(file, 'r') as csv_file:\n\t\tfileReader = csv.reader(csv_file, delimiter = \"|\")\n\t\tfileReader.next() # throwaway header row\n\n\t\tfor row in fileReader:\n\t\t\tstrippedRow = row[0].strip(\",\").split(',')\n\t\t\tsequences.append(stri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
display spectra input as a list on shared axes | def displaySpectra(spectra):
colList = ['r', 'g', 'b', 'm', 'c', 'y', 'k']
for idx, spectrum in enumerate(spectra):
#assign color
c = colList[idx % len(colList)]
plt.plot(spectrum[:,0], spectrum[:,1], c)
plt.show() | [
"def plot_spectra(self, **kwargs):\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots()\n ax.set(xlabel=\"Wavelength ($\\\\AA$)\", ylabel=\"Intensity (a.u.)\")\n\n for spinch in [self.dd, self.du, self.ud, self.uu]:\n if spinch is None:\n continue\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
take input spectrum and interpolate to sample every dlambda be careful of cases with spectra narrower than dlambda | def interpolateSpectrum(spectrum, dlambda):
wlIn = spectrum[:,0]
wlInterp = dlambda * ( np.arange( np.floor(min(wlIn/dlambda)),
np.ceil(max(wlIn/dlambda))))
spectrumIn = spectrum[:,1] ... | [
"def integrateSpectra(spectra, dlambda):\n \n \"\"\"\n spectra = list of Nx2 arrays describing filter or dye spectra, or laser wavelength profile\n dlambda = wavelength difference betweeen adjacent values in the spectra\n \"\"\"\n\n lowerLimit = min( [min(spectrum[:,0]) for spectrum in spectra] )\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
take list of spectra, and return integral of their product over the largest possible range | def integrateSpectra(spectra, dlambda):
"""
spectra = list of Nx2 arrays describing filter or dye spectra, or laser wavelength profile
dlambda = wavelength difference betweeen adjacent values in the spectra
"""
lowerLimit = min( [min(spectrum[:,0]) for spectrum in spectra] )
upperLimit = m... | [
"def multiplySpectra(spectra, dl = 0.5):\n \"\"\" dl = optional parameter to control in-built interpolation\"\"\"\n interpSpectra = [interpolateSpectrum(sp, dl) for sp in spectra]\n \n lowerLimit = min( [min(spectrum[:,0]) for spectrum in interpSpectra] )\n upperLimit = max( [max(spectrum[:,0]) for s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
spectra = list of Nx2 arrays describing filter or dye spectra to be multiplied | def multiplySpectra(spectra, dl = 0.5):
""" dl = optional parameter to control in-built interpolation"""
interpSpectra = [interpolateSpectrum(sp, dl) for sp in spectra]
lowerLimit = min( [min(spectrum[:,0]) for spectrum in interpSpectra] )
upperLimit = max( [max(spectrum[:,0]) for spectrum in inter... | [
"def spectrum_splice(*spectra):\n\n wavelengths = np.concatenate([x.wavelengths for x in spectra])\n values = np.concatenate([x.values for x in spectra])\n value_errors = np.concatenate([x.value_errors for x in spectra])\n\n return Spectrum(wavelengths=wavelengths,\n values=values,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Normalise maximum of spectrum to 1 | def normaliseSpectrum(spectrum):
m = max(spectrum[:,1]);
spectrum[:,1] = spectrum[:,1] / m;
return spectrum; | [
"def normalize(signal):\n return signal / np.abs(signal).max()",
"def normalize_intensity(x: np.ndarray, max_intensity: float) -> np.ndarray:\n return (x / max_intensity).clip(0, 1)",
"def channel_normalization(x):\n max_values = tf.reduce_max(tf.abs(x), axis = 2, keepdims = True) + 1e-5\n out = x ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pad spectra with zeros for undefined values between min and max | def padWithZeros(spectrum, min_lambda, max_lambda):
dl = np.diff(spectrum[:,0])[0]
# TODO: check and throw error if dl isn't constant throughout spectrum
min_included_l = min(spectrum[:,0])
max_included_l = max(spectrum[:,0])
l_low = np.linspace(min_lambda, (min_included_l - dl), int(((min_incl... | [
"def zero_out_min(input, spectrum, max=None):\n assert len(input.size()) == 5\n assert len(spectrum.size()) == 4\n idx = global_arg(spectrum, is_min=True)\n if max is None:\n spectrum_max = spectrum.max()\n if spectrum_max < float(\"inf\"):\n max = spectrum_max + 1.0\n el... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collect integration files from changed files in the pull request. | def collect_integration_files(input_files: List[str]) -> Set[Path]:
integration_dir = Path("/validate", "components")
integration_files = set(integration_dir.glob("**/*.json"))
changed_integration_files = set(
[
integration_dir / fil.split("/")[-1]
for fil in input_files
... | [
"def get_list_of_changed_files() -> None:\n start_log_group(\"Get list of specified source files\")\n files_link = f\"{GITHUB_API_URL}/repos/{GITHUB_REPOSITORY}/\"\n if GITHUB_EVENT_NAME == \"pull_request\":\n files_link += f\"pulls/{Globals.EVENT_PAYLOAD['number']}/files\"\n elif GITHUB_EVENT_NA... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make the corresponding optimizer from the flags. Only below optimizers are allowed. Welcome to add more | def make_optimizer(self):
# parameters = [self.encoder.parameters(), self.decoder.parameters(), self.spec_enc.parameters()]
if self.flags.optim == 'Adam':
op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'RMS... | [
"def make_optimizer(self):\n raise NotImplementedError",
"def _create_optimizer(self) -> None:\r\n if self.args.optimizer_name == \"adamw\":\r\n self.optimizer = create_optimizer(\r\n self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the selected data and update the data in the source | def update():
df_active = select_reviews()
source.data = ColumnDataSource(data=df_active).data | [
"def UpdateData(self, event = None):\n #currentSelection = self.confList.GetStringSelection()\n #self.state.Edit(\"JconfSelection\", currentSelection)\n self.React()\n self.UpdateDisplay()\n return",
"def btn_update_clicked(self):\n selected_index = self.gui.tbldata.selec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
takes a current cell and a character outputs a csv file of the key | def cells_to_csv(cell, pwd, character):
flatten_list = [int(item) for sublist in cell for item in sublist]
flatten_list.insert(0,character)
with open(pwd + '/data/gathered_data.csv', 'a', newline='',encoding='utf-8') as fd:
writer = csv.writer(fd)
writer.writerow(flatten_list) | [
"def __output_to_file(self):\n\n fn = self.out_dir + self.output_file\n map_keys = self.ordered_keys\n row_count = len(self.output_map[map_keys[0]])\n\n with open(fn, 'w') as csvfile:\n wr = writer(csvfile)\n wr.writerow(map_keys)\n\n for row in range(row_count):\n temp = []\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
takes an image and analyze_cells the cells, saves the cells in a folder | def analyze_cells(img,pwd,character):
TARGET = 100 #number of cells
percentage = 15
percentage = percentage / 200
kernels = [x for x in range(3,249) if x%2 != 0]
kernel = kernels[round(len(kernels)/2)]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
heirarchy = [[],[]]
while ... | [
"def process_test():\n\n test_entry = unpickle(test_file)\n test_dataset = test_entry[b'data']\n test_targets = test_entry[b'fine_labels']\n test_dataset = np.vstack(test_dataset).reshape(-1, 3, 32, 32)\n test_dataset = test_dataset.transpose((0, 2, 3, 1)) \n\n root_path = data_dir + '/cifar100/te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View and modify your current BotAccess server(s). | async def _servers(self, ctx: commands.Context):
user_settings = await self.config.user(ctx.author).all()
if user_settings["supporting_in"]:
await ctx.send(embed=discord.Embed(
title="BotAccess Servers",
description=f"{humanize_list([f'`{gu.name}` (`{g}`)' if ... | [
"async def _main_servers(self, ctx: commands.Context):\n settings = await self.config.main_servers()\n servers = \"\"\n for g, d in settings.items():\n if s := self.bot.get_guild(int(g)):\n roles = []\n for r in d:\n if ro := s.get_rol... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add to your allowed BotAccess server(s). | async def _servers_add(self, ctx: commands.Context, *servers: int):
async with self.config.user(ctx.author).all() as user_settings:
if user_settings["supporting_in"]:
if user_settings["end_timestamp"]:
return await ctx.send("You are no longer a supporter, and cann... | [
"async def _allowlist_add(self, ctx: commands.Context, *servers: int):\n async with self.config.allowed() as settings:\n for server in servers:\n if server not in settings:\n settings.append(server)\n return await ctx.tick()",
"async def _allowlist(self, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove from your allowed BotAccess server(s). | async def _servers_remove(self, ctx: commands.Context, *servers: int):
main_servers = await self.config.main_servers()
allowed = await self.config.allowed()
async with self.config.user(ctx.author).all() as user_settings:
if user_settings["supporting_in"]:
for server i... | [
"async def _allowlist_remove(self, ctx: commands.Context, *servers: int):\n async with self.config.allowed() as settings:\n for server in servers:\n if server in settings:\n settings.remove(server)\n return await ctx.tick()",
"async def unignore_server(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View and set your designated BotAccess main server(s). | async def _main_servers(self, ctx: commands.Context):
settings = await self.config.main_servers()
servers = ""
for g, d in settings.items():
if s := self.bot.get_guild(int(g)):
roles = []
for r in d:
if ro := s.get_role(r):
... | [
"async def _servers(self, ctx: commands.Context):\n user_settings = await self.config.user(ctx.author).all()\n if user_settings[\"supporting_in\"]:\n await ctx.send(embed=discord.Embed(\n title=\"BotAccess Servers\",\n description=f\"{humanize_list([f'`{gu.name... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the access roles in a BotAccess main server. | async def _access_roles(self, ctx: commands.Context, server: discord.Guild, *roles: discord.Role):
async with self.config.main_servers() as settings:
if str(server.id) not in settings.keys():
return await ctx.send(f"{server.name} is not a BotAccess main server!")
settings... | [
"def setRoles(self, roles):\n pass",
"def set_server_roles(ssh_client, roles):\n yaml = get_vmdb_yaml_config(ssh_client)\n yaml['server']['role'] = ','.join(roles)\n set_vmdb_yaml_config(ssh_client, yaml)",
"async def roles(self, ctx):\n pass",
"async def set_permissions(self, permissio... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Toggle whether to automatically leave users' servers if they stop supporting. | async def _auto_leave(self, ctx: commands.Context, true_or_false: bool):
await self.config.auto_leave.toggle.set(true_or_false)
return await ctx.tick() | [
"def turn_off(self) -> None:\n self.delayed_turn_on = None\n\n if not Network.is_someone_home():\n super().turn_off()",
"def turn_off(self):\n self.status = False",
"def shutdown(self):\n\t\tbody = dict()\n\t\tbody[\"stop_server\"] = {\n\t\t\t\"stop_type\" : \"soft\",\n \t\t\t\"t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the delay to wait before automatically leaving a BotAccess server (requires autoleave to be toggled on). | async def _leave_delay(self, ctx: commands.Context, delay_in_hours: int):
if delay_in_hours < 1:
return await ctx.send("Please enter a number greater than 0!")
await self.config.auto_leave.delay.set(delay_in_hours)
return await ctx.tick() | [
"def delay_timeout(self, delay_timeout):\n\n self._delay_timeout = delay_timeout",
"def delay(self, delay):\n delay = int(delay)\n if self.__handler.delay != int(delay):\n self.__handler.delay = delay\n debug('ReplyServer.delay: set to %d ms', delay)",
"async def timeo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Toggle whether to send the Thank You and Expiration messages. | async def _message_toggles(self, ctx: commands.Context, thanks: bool, expire: bool):
await self.config.messages.thanks.toggle.set(thanks)
await self.config.messages.expire.toggle.set(expire)
return await ctx.tick() | [
"def toggle_recharge_on(self):\n self.will_recharge = True",
"def thank_you_all():\n donor_dict.email_all()",
"async def _welcometoggle(self, ctx):\n welcometoggle = await self.config.guild(ctx.guild).autowelcome()\n if welcometoggle:\n await self.config.guild(ctx.guild).autow... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
View and set the BotAccess server allowlist. | async def _allowlist(self, ctx: commands.Context):
settings = await self.config.allowed()
await ctx.send(embed=discord.Embed(
title="BotAccess Allowed Servers",
description=f"{humanize_list([f'`{gu.name}` (`{g}`)' if (gu := self.bot.get_guild(g)) else f'`{g}`' for g in settings])... | [
"def allow_access(self, share, access, share_server):",
"def fetch_allow_list(self) -> None:\n\n logging.info(\"fetching valid allow list\")\n ipfs_client = ipfshttpclient.connect()\n res = ipfs_client.get(self.robonomics_allow_list_hash)\n pass",
"def getAllow(self):\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add to the BotAccess server allowlist. | async def _allowlist_add(self, ctx: commands.Context, *servers: int):
async with self.config.allowed() as settings:
for server in servers:
if server not in settings:
settings.append(server)
return await ctx.tick() | [
"async def _allowlist(self, ctx: commands.Context):\n settings = await self.config.allowed()\n await ctx.send(embed=discord.Embed(\n title=\"BotAccess Allowed Servers\",\n description=f\"{humanize_list([f'`{gu.name}` (`{g}`)' if (gu := self.bot.get_guild(g)) else f'`{g}`' for g i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove from the BotAccess server allowlist. | async def _allowlist_remove(self, ctx: commands.Context, *servers: int):
async with self.config.allowed() as settings:
for server in servers:
if server in settings:
settings.remove(server)
return await ctx.tick() | [
"async def RemoveBlackList(self, ctx, server):\r\n\t\tBL = self.BotConfig('BlacklistedServers')\r\n\t\tif int(server) in BL:\r\n\t\t\tBL.remove(int(server))\r\n\r\n\t\tself.BotConfig('BlacklistedServers', BL)\r\n\t\tawait ctx.send('Removed {} from Blacklisted servers'.format(server))",
"async def unignore_server(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the amount of BotAccess servers a user is allowed to have. | async def _server_limit(self, ctx: commands.Context, num_servers: int):
if num_servers < 1:
return await ctx.send("Please enter a number greater than 0!")
await self.config.limit.set(num_servers)
return await ctx.tick() | [
"async def limit(self, ctx: commands.Context, limit: int = 0):\n await self.config.limit.set(limit)\n await ctx.send(\n f\"The server limit has been set to {limit}.\"\n if limit\n else \"The server limit has been disabled.\"\n )\n await self.build_cache()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Refresh current BotAccess supporters. | async def _refresh(self, ctx: commands.Context):
async with ctx.typing():
await self._refresh_supporters()
return await ctx.send("BotAccess supporters have been refreshed!") | [
"def refresh(self):\r\n self._agents = self._get_agents()",
"def refresh(self):\r\n self._media_agents = self._get_media_agents()",
"def update_info():\n\n update_channels_list()\n update_user_list()",
"def refresh_tokens(self):\n _log.info('Refreshing Ecobee auth tokens.')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reset BotAccess settings for a user or for everything. | async def _reset(self, ctx: commands.Context, user: typing.Optional[discord.User], leave_servers: bool, enter_true_to_confirm: bool):
if not enter_true_to_confirm:
return await ctx.send("Please provide `true` as the parameter to confirm.")
async with ctx.typing():
to_leave: list... | [
"def reset_user(self):\n self.user_model = None",
"def reset(\n self,\n username: Optional[str],\n password: Optional[str],\n ) -> None:\n self.password_mgr.rb_user = username\n self.password_mgr.rb_pass = password\n self.used = False",
"async def reset(self, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses an NCBI TSV file and insert the relavent values from it into the Database. | def ParseAndInsertTSVintoDB(path, cur):
df = pd.read_csv(path, delimiter="\t")
for _, row in tqdm(df.iterrows()):
InsertRow(row, cur) | [
"def generate_db(tsv_file, db_file):\n logger.info(\"Converting tsv %s to db file %s\", tsv_file, db_file)\n if os.path.exists(db_file):\n os.remove(db_file)\n db = TinyDB(db_file)\n with codecs.open(tsv_file, \"rb\", encoding=\"utf-8\") as f:\n row = f.readline().s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get epoch with best accuracy. | def get_best_epoch_and_accuracy(path_to_model_files):
all_models = os.listdir(path_to_model_files)
while '_' not in all_models[-1]:
all_models = all_models[:-1]
best_model = all_models[-1]
all_us = list(find_all_substr(best_model, '_'))
return int(best_model[5:all_us[0]]), float(best_model[a... | [
"def best_epoch(self):\n return(self.__best_epoch)",
"def get_best_epoch(self) -> Tuple[int, float]:\n best_epoch = np.argmin(self.history[\"val_loss\"])\n score = self.history[\"val_loss\"][best_epoch]\n return best_epoch, score",
"def get_epoch_accuracy(directory, param, ens_size, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get Optimizer based on name. | def get_optimizer(optimizer_name):
if optimizer_name == "sgd":
return optim.SGD
elif optimizer_name == "adam":
return optim.Adam
else:
raise ValueError('Unknown Optimizer ' + optimizer_name) | [
"def get_optimizer(optimizer_name='Adam'):\n optimizer_name = optimizer_name.capitalize()\n return getattr(torch.optim, optimizer_name)",
"def get_optimizer(optimizer, **kwargs):\n return optimizers[optimizer](**kwargs)",
"def optimizer(self):\n return self.optimizers[0]",
"def optimizer(self)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
extract platform from useragent ios, android, web, wechat | def get_client_platform(request):
if hasattr(request_context, 'ctx'):
return request_context.ctx.platform
userAgent = request.META.get('HTTP_USER_AGENT', '').upper()
app = get_param_string(request, 'app')
# header.put("User-Agent", "volley/1.0.0 Android HJC/" + BuildConfig.VERSION_NAME);; 这是安... | [
"def get_OS(user_agent):\n for p in compiled_patterns_os:\n m = p[1].search(user_agent)\n if m:\n return p[0]\n return ''",
"def platform() -> list:\n if GetOS.OS == \"Linux\":\n x = InformationManager(SysFiles.ver.value)\n return x.openF().read().split(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the observation for the next step from the data Frame we support Pandas for now. The data can be either downloaded live online from yahooFinace library or can be read from CSV file | def _getNextObservation(self, getData="fromCSV", mode="step"):
if self.pandasData is None :
#Download the data if csv file is not available
self.getStockObservation_fromCSV(mode=self.agent_mode)
self.groupData() # Groups the data in subsets grouped on daily basis
... | [
"def askForData(self,nbNeededObservation=32):\n numberOfDays = int(nbNeededObservation/8)+1 # the theorical number of days we need + 1 day for safety\n try:\n response = requests.get(self.infos[\"URL\"]+\"prediction/{}\".format(numberOfDays))\n if response.ok:\n da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the stock observation directly from the yFinance or other frameworks. If you have your data already written to CSV file please use methode 'getStockObservation_fromCSV' | def getStockObservation_online(self):
print("Downloading following stock data now --> {}".format(self.stockTicker))
self.pandasData = yf.download(tickers=self.stockTicker, interval=self.trade_mode,
group_by=self.stockTicker,
... | [
"def get_stock_data(x):",
"def retrieve_OHLC_data(inputs):\n global stock_dict,symbol,CURRENT_DATE\n stock_dict=dict()\n \n for i in inputs['stock_list']:\n # send_results_to_file({'TRADE DATA FOR------>':i.upper()},'a')\n symbol = i.upper() \n stock_name=symbol\n stock =pd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Picks a random number. Marks dubs. Generates a meter that shows how close it was to the maximum. The meter will be yellow if you got dubs, green if you were one off, aqua if you were two off, or blue if you were further. Pink replaces anything but yellow if you got the minimum or maximum. Start with [unclear] to get em... | async def roll(self, context):
q = mf.contentq(context.message.content)
high = 100
low = 0
cm = False
fl = False
highroll = False
for x in range(0, 5):
q = " ".join(q)
if q.startswith("[unclear]"):
q = q[9:]
... | [
"def next_float(self, min, max):\n\t\treturn min + (max * self.__rand.random())",
"def next_float(min, max = None):\n if max == None:\n max = min\n min = 0\n\n if max - min <= 0:\n return min\n\n return min + random.random() * (max - min)",
"def random_int_r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
model.feed_manager.FeedManager.add Add a new article to a feed ONLY if the feed already exists and the feed does not already have the article. Returns False if the no feed could be found corresponding to the article given. Also returns False if the article already exists. | def add(self, new_article: Article, feed_name: str) -> bool:
fm_logger.debug('FeedManager.add')
if self.is_empty() or self.contains(new_article, feed_name):
return False
try:
feed: Feed = self.__get_feed(feed_name)
feed.add_new(new_article)
retu... | [
"def add_new(self, new_article: Article) -> bool:\n\n f_logger.debug('Feed.add_new')\n\n if self.contains(new_article):\n return False\n\n self.__list_of_articles.append(new_article)\n self.__sort()\n return True",
"def feed_exists(self, feed):\n if not self.li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
model.feed_manager.FeedManager.contains Determines whether an article exists in the feed indicated. Returns True if a match was found. | def contains(self, article: Article, feed_name: str) -> bool:
fm_logger.debug('FeedManager.contains')
if self.is_empty():
return False
for feed in self.__list_of_feeds:
if feed.name == feed_name:
return feed.contains(article)
# No feed matched ... | [
"def contains(self, article: Article) -> bool:\n\n f_logger.debug('Feed.contains')\n\n for list_article in self.__list_of_articles:\n if list_article.title == article.title:\n return True\n\n return False",
"def article_exists(article):\n articles = db_session().q... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
model.feed_manager.FeedManager.get_current_article Attempts to get the current article that is displayed. Raises an exception if the FeedManager is empty. Returns the next article which is displayed. | def get_current_article(self) -> Article:
fm_logger.debug('FeedManager.get_current_article')
if self.__current_feed_index == -1:
raise FeedManagerEmptyException("This FeedManager is empty. Current article does not exist.")
current_feed: Feed = self.__list_of_feeds[self.__current_f... | [
"def get_current_article(self) -> Article:\n\n f_logger.debug('Feed.get_current_article')\n\n return self.__list_of_articles[self.__current_article_index]",
"def get_next_article(self) -> Article:\n\n fm_logger.debug('FeedManager.get_next_article')\n\n if self.is_empty():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
model.feed_manager.FeedManager.get_next_article Attempts to get the next article that to display Raises an exception if the FeedManager is empty. Returns the next article to display. May return the currently displayed article if only one feed exists and that feed only contains one article. | def get_next_article(self) -> Article:
fm_logger.debug('FeedManager.get_next_article')
if self.is_empty():
raise FeedManagerEmptyException("This FeedManager is empty. Could not get next article.")
else:
# current feed is at last entry of list, wrap to beginning
... | [
"def get_next_article(self) -> Article:\n\n f_logger.debug('Feed.get_next_article')\n\n if len(self.__list_of_articles) == 1:\n return self.get_current_article()\n\n # The current article is at the end.\n if self.__current_article_index == (len(self.__list_of_articles) - 1):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
model.feed_manager.FeedManager.is_empty Determines whether the model contains any feeds. Returns True if the FeedManager is empty | def is_empty(self) -> bool:
fm_logger.debug('FeedManager.is_empty')
if self.size() == 0:
return True
return False | [
"def is_empty(self):\r\n return len(list(self.__iterable)) == 0",
"def is_empty( self ):\n\t\treturn not self.guard.is_connected()",
"def isEmpty(self):\n return len(self._widgets) == 0",
"def is_empty(self):\n return self.is_leaf(self.root)",
"def empty(self):\n return self.page... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
model.feed_manager.FeedManager.remove Removes the indicated feed from the manager and updates the current feed if another exists. Returns False if no feed matched the name given. | def remove(self, feed_name: str) -> bool:
fm_logger.debug('FeedManager.remove')
try:
matched_feed: Feed = self.__get_feed(feed_name)
except FeedNotFoundException:
return False
# feed_manager will be empty after removal
if self.size() == 1:
s... | [
"def remove(self, feed):\n if isinstance(feed, dict):\n feed = feed.get('id')\n return kaa.feedmanager.remove_feed(feed)",
"def remove_feed(self, feed: Union[str, Feed]):\n url = feed_argument(feed)\n return self._storage.remove_feed(url)",
"def delete_feed(self, feed: Fee... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
model.feed_manager.FeedManager.size Returns the number of feeds currently held. | def size(self) -> int:
fm_logger.debug('FeedManager.size')
return len(self.__list_of_feeds) | [
"def getFeedLen(self, feedId):\n\t\ttry:\n\t\t\treturn len(self.getFeed(feedId=feedId).entries)\n\t\texcept Exception:\n\t\t\tprint \"The ID %r doesn't exist.\" % feedId",
"def stats_monitored_feeds(self):\n # open the articles database and return the nulber of articles inside\n with shelve.open(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
model.feed_manager.FeedManager.update Creates a new Feed object if one doesnt already exist, or updates an existing feed with the contents given. Will not update if feed_contents list is empty. | def update(self, feed_name: str, feed_link: str, feed_contents: List[Article]):
fm_logger.debug('FeedManager.update')
if len(feed_contents) == 0:
# DO not add the articles if the list of articles given is empty
return
try:
feed = self.__get_feed(feed_name)
... | [
"def update_feed(self, feed: Union[str, Feed]):\n url = feed_argument(feed)\n rows = list(self._storage.get_feeds_for_update(url))\n if len(rows) == 0:\n raise FeedNotFoundError(url)\n elif len(rows) == 1:\n self._update_feed(rows[0])\n else:\n ass... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
model.feed_manager.FeedManager.__get_feed Returns the feed which matches the given name. Raises an exception if a match could not be found. | def __get_feed(self, feed_name: str) -> Feed:
fm_logger.debug('FeedManager.__get_feed')
for feed in self.__list_of_feeds:
if feed.name == feed_name:
return feed
raise FeedNotFoundException("No feed found with the name: %s" % feed_name) | [
"def getFeed(self, feedId):\n\t\ttry:\n\t\t\treturn self.getFeeds()[feedId]\n\t\texcept Exception:\n\t\t\tprint \"The ID %r doesn't exist.\" % feedId",
"def get_feed(\n self, feed: FeedInput, default: Union[MissingType, _T] = MISSING\n ) -> Union[Feed, _T]:\n return zero_or_one(\n self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
model.feed_manager.create_feed_manager Uses the model.parser module to download the contents of the indicated feed and load it into a new instance of FeedManager. Returns a newly created FeedManager | def create_feed_manager(feed_url: str):
fm_logger.debug('model.feed_manager.create_feed_manager')
feed_name = parser.get_feed_name(feed_url)
feed_contents = parser.get_feed_contents(feed_url)
feed_manager = FeedManager()
feed_manager.update(feed_name, feed_url, feed_contents)
return feed_mana... | [
"def get( self ):\n #using urlgrabber so it doesn't matter whether feed is a file or a url\n logger.debug(\"Opening feed: \" + self.feed)\n fd = urlopen( self.feed )\n feed = {}\n #is this an OPML file?\n try:\n outlines = OPML.parse( fd ).outlines\n l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to open 'f' as a Dataset if it's a string; else let it pass through. | def _try_open(f):
if isinstance(f, str):
f = xr.open_dataset(f)
return f | [
"def maybe_read(dataset_cls):\n try:\n return dataset_cls.read()\n except IOError, e:\n print >> sys.stderr, 'Warning: skipping example data tests due to:\\n%s' % e\n return None",
"def from_asdf(cls, filepath):\n from .loader import load_dataset\n return load_dataset(filepath)",
"def t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remap a source dataset to the horizontal grid of the given target dataset. The source dataset should be a Dataset; it can have multiple fields and multiple dimensions although we assume the horizontal grid is defined by 'lat' and 'lon'. The target data only needs to have 'lat' and 'lon' coordinates. The weights provide... | def remap_dataset(src_data, target_data, weights):
src_data = _try_open(src_data)
target_data = _try_open(target_data)
weights = _try_open(weights)
snlon, snlat = len(src_data.lon), len(src_data.lat)
tnlon, tnlat = len(target_data.lon), len(target_data.lat)
# Stack the source data so that we ... | [
"def transform_bounds(ds, src_crs=None, trg_crs=None, trg_dims=None, bnds_dim=None):\n ds = ds.copy(deep=False)\n\n if src_crs is None:\n src_crs = CRS.from_cf(ds.cf[\"grid_mapping\"].attrs)\n if trg_crs is None:\n # default target crs\n trg_crs = CRS(\"EPSG:4326\")\n if trg_dims is... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resample data from a model to the obs grid. | def model_to_obs_grid(model_data, obs_def, mod_def, coords={}):
data_model_rs = xr.Dataset(coords=coords)
resample_to_obs = lambda data2d: pyresample.kd_tree.resample_nearest(
mod_def, data2d, obs_def, radius_of_influence=500000, fill_value=None
)
for field in model_data.data_vars:
p... | [
"def resample(self):\n for unit in self.model:\n if isinstance(unit, Unit):\n unit.sample()\n else:\n for u in unit:\n u.sample()",
"def resample_grid(self, name=None, **kwargs):\n # Make an empty index\n new_index = copy(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called before the routes map is generated. ``before_map`` is before any other mappings are created so can override all other mappings. | def before_map(self, map): | [
"def before_map(self, dmrs, nodeid):\n pass",
"def after_map(self, map):",
"def set_initial_map(self,map0,initial_inverse_map=None):\n\n if self.opt is not None:\n self.opt.set_initial_map(map0, initial_inverse_map)\n # self.opt.set_initial_inverse_map(initial_inverse_map)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called after routes map is set up. ``after_map`` can be used to add fallback handlers. | def after_map(self, map): | [
"def after_map(self, dmrs, nodeid):\n pass",
"def before_map(self, map):",
"def before_map(self, dmrs, nodeid):\n pass",
"def test_route_added_callback(self):\n self.ht.add_route('/blah/<param>', callback=dummy)\n\n resp = requests.get(self.ht.base + '/blah/12345')\n\n last_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Credentials authentication key (eg AWS ARN) | def credentials_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "credentials_key") | [
"def get_access_key():\n return environ['AWS_ACCESS_KEY_ID']",
"def aws_access_key(self) -> 'outputs.AwsAccessKeyResponse':\n return pulumi.get(self, \"aws_access_key\")",
"def authorizer_credentials(self) -> str:\n return pulumi.get(self, \"authorizer_credentials\")",
"def get_meta_access_se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an existing Connector resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Connector':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ConnectorArgs.__new__(ConnectorArgs)
__props__.__dict__["collection"] =... | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n desired_state: Optional[pulumi.Input[str]] = None,\n properties: Optional[pulumi.Input[str]] = None,\n role_arn: Optional[pulumi.Input[str]] = None,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connector providerAccountId (determined from credentials) | def provider_account_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "provider_account_id") | [
"def account_id(config):\n try:\n account = config.get('CONFIG', 'account-id')\n except NoOptionError:\n account = None\n regions = [region.strip()\n for region in config.get('CONFIG', 'regions').split(',')]\n while (account is None) and regions:\n regi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Solve for the localization precision of a point source with a given flux. | def compute_ps_loc(egy, flux):
pass | [
"def actualSolve(self, lp):\n raise PulpSolverError(\"CPLEX_DLL: Not Available\")",
"def Resolve(splu,RHS):\n\t# array 2D -> array 1D\n\tf2 = RHS.ravel()\n\n\t# Solving the linear system\n\tx = lg.lsqr(splu.tocsc(),f2)\n\n\treturn x[0].reshape(RHS.shape)",
"def actualSolve(self, lp):\n rai... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the observed signal and background counts given models for the exposure, background intensity, PSF, and source flux. | def compute_ps_counts(ebins, exp, psf, bkg, fn, egy_dim=0, spatial_model='PointSource',
spatial_size=1E-3):
ewidth = utils.edge_to_width(ebins)
ectr = np.exp(utils.edge_to_center(np.log(ebins)))
r68 = psf.containment_angle(ectr, fraction=0.68)
if spatial_model != 'PointSource':
... | [
"def model_counts(self, name, ib=0):\n def select_band(x):\n return x[ib] if ib is not None else x.sum()\n if name=='observed':\n return np.array([ select_band(self.df.ix[i]['counts']['observed']) for i in range(1728)])\n def source_counts(i):\n m = self.df.ix[i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Evaluate the binaveraged PSF model over the energy bins ``egy_bins``. | def interp_bin(self, egy_bins, dtheta, scale_fn=None):
npts = 4
egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
egy = np.exp(utils.edge_to_center(np.log(egy_bins)))
log_energies = np.log10(egy)
vals = self.interp(egy[None, :], dtheta[:, None],
... | [
"def calc_bins(self):\n \n if self.is_fitted:\n self.bins_est = calc_log_bins(e_min = self.e_min_est,\n e_max = self.e_max_est, over_under = True,\n n_bins = self.n_bins_est)\n\n self.bins_true = ca... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Evaluate the PSF containment angle at a sequence of energies. | def containment_angle(self, energies=None, fraction=0.68, scale_fn=None):
if energies is None:
energies = self.energies
vals = self.interp(energies[np.newaxis, :], self.dtheta[:, np.newaxis],
scale_fn=scale_fn)
dtheta = np.radians(self.dtheta[:, np.newaxi... | [
"def test_propose_angle():\n NSAMPLES = 1000 # number of samples to draw\n NDIVISIONS = 180 # number of divisions for angle\n\n from perses.rjmc.geometry import FFAllAngleGeometryEngine\n import scipy.stats as stats\n geometry_engine = FFAllAngleGeometryEngine()\n\n #Create a test system with only... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an array of effective areas versus energy and incidence angle. Binning in energy and incidence angle is controlled with the egy and cth input parameters. | def create_aeff(event_class, event_type, egy, cth):
irf = create_irf(event_class, event_type)
irf.aeff().setPhiDependence(False)
theta = np.degrees(np.arccos(cth))
# Exposure Matrix
# Dimensions are Etrue and incidence angle
m = np.zeros((len(egy), len(cth)))
for i, x in enumerate(egy):
... | [
"def get_ee_t_csys(self):\n robot_t_ee = ros2np(self.robot.move_group.get_current_pose())\n self.acam_t_csys = np.load(\"/home/fasod/fasod_ws/src/project_arbeit/Data/head_coordinate/acam_t_csys.npy\")\n ee_t_csys = np.matmul(la.inv(robot_t_ee),np.matmul(self.robot_t_acam, self.acam_t_csys))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate model for exposureweighted PSF averaged over incidence angle. | def create_avg_psf(skydir, ltc, event_class, event_types, dtheta,
egy, cth_bins, npts=None):
return create_avg_rsp(create_psf, skydir, ltc,
event_class, event_types,
dtheta, egy, cth_bins, npts) | [
"def test_WFI_psf():\n wi = wfirst.WFI()\n wi.calc_psf(fov_pixels=4)",
"def model_SA():\n m = 7 # The parameter within the distribution of E\n n = 7 # The parameter within the distribution of I\n\n mean_exposed_days = 6\n mean_infectious_days = 20\n # The result is that the median time in I... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
the coro for create jsonrpc server\n return an instance of asyncio Server | async def create_server_coro(cls, addr):
app = web.Application()
app.router.add_post('/', cls.handle)
loop = asyncio.get_event_loop()
server = await loop.create_server(app.make_handler(), addr[0], addr[1])
rpc_logger.info("RPC server is serving on %s", addr)
return server... | [
"async def __aenter__(self):\n return Server(self)",
"async def listen_asyncio(self):\n # NOTE: First thing, realize the server from the Future created during __init__().\n self.server = await self.server\n \n if PYTHON_IS_311:\n await self.run_311()\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the words per minute for a given text. | def words_per_minute(data, pause_thresh=10**10, window=10**10):
pause_thresh = pause_thresh / 10**9
window = window / 10**9
# windows per minute
win_per_min = 60 / window
# print(data)
all_words = data[-1]["words"]
# for item in data:
# all_words += item["words"]
... | [
"def calcWpm(text, test, time):\n time = time.total_seconds() / 60\n words = text.split()\n nWords = len(words)\n wpm = round(nWords / time)\n\n return wpm",
"def __produce_text_stats(self, text):\n\n char_count = len(text)\n # next line works because stats are only built for strings ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the diffusive flux matrix without shock capturing contribution. | def ComputeDiffusiveFlux(dofs, dUdx, params):
print("\nCompute diffusive flux (without shock capturing)\n")
## Auxiliary variables
dim = params["dim"]
gamma = params["gamma"]
rho = dofs[0]
mom = []
vel = []
for i in range(dim):
mom.append(dofs[i + 1])
vel.append(dofs[i ... | [
"def flux(self):\n #flux = self.data[self.select]['flux'] * self.units['flux']\n flux = self.data['flux'][self.select].compressed() * self.units['flux']\n if self.normed and self.co_is_set:\n # Avoid dividing by zero\n co = self.data['co'][self.select].compressed()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the diffusive flux matrix with a physicsbased shock capturing contribution. See A physicsbased shock capturing methods for largeeddy simulation, Fernandez, Nguyen and Peraire (2018). | def ComputeDiffusiveFluxWithPhysicsBasedShockCapturing(dofs, dUdx, params, beta_sc, lamb_sc, mu_sc):
print("\nCompute diffusive flux (with physics-based shock capturing)\n")
## Auxiliary variables
dim = params["dim"]
rho = dofs[0]
mom = []
vel = []
for i in range(dim):
mom.append(d... | [
"def ComputeDiffusiveFlux(dofs, dUdx, params):\n\n print(\"\\nCompute diffusive flux (without shock capturing)\\n\")\n\n ## Auxiliary variables\n dim = params[\"dim\"]\n gamma = params[\"gamma\"]\n rho = dofs[0]\n mom = []\n vel = []\n for i in range(dim):\n mom.append(dofs[i + 1])\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Auxiliary function to calculate the viscous stress tensor for the given dynamic and bulk viscosity values | def CalculateViscousStressTensor(mu, beta, rho, mom, dim, dUdx):
## Calculate velocity divergence
## Note that this is computed as div(mom/rho) = (dx(mom)*rho - mom*dx(rho))/rho**2
div_vel = 0.0
for d in range(dim):
div_vel += (dUdx[d + 1, d] * rho - mom[d] * dUdx[0, d])
div_vel /= rho**2
... | [
"def calc_stress(self) -> None:\n\n n_dof = self._mesh.n_dof\n n_dfdof = self._mesh.n_dfdof\n n_element = self._mesh.n_element\n connectivity = self._mesh.connectivity\n\n for ielm in range(n_element):\n\n mater_id = self._mesh.material_numbers[ielm]\n n_node... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Auxiliary function to calculate the heat flux vector with the Fourier's law | def CalculateHeatFluxVector(c_v, lamb, rho, mom, e_tot, dim, dUdx):
## Calculate the heat flux vector (Fourier's law q = -lambda * grad(theta))
## Note that the temperature is expressed in terms of the total energy
heat_flux = []
for d in range(dim):
aux_1 = (dUdx[dim + 1, d]*rho - e_tot * dUdx... | [
"def heat_func(self):\n return self.Q.val + self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)",
"def ConvectiveFlux(self):\n\n e = self.e[:,:,75]; uy = self.vy[:,:,75]; ux = self.vx[:,:,75]\n Fc_up = 0; Fc_down = 0; e_list = []; uy_up = []; uy_down = []\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |