content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import argparse
def separator(string):
"""Parse the separator, possibly raising error"""
options = {"tab": "\t", "comma": ","}
s = options.get(string, string)
if len(s) == 1:
return s
else:
raise argparse.ArgumentTypeError("must be a 1-character string")
|
df4712f01a552911d9d9d8bc70b4b3f31415ec4c
| 11,508
|
def read_params_Coulomb(fname='in.params.Coulomb'):
"""
Read in.params.Coulomb. But only for fixed or fixed_bvs charges.
"""
with open(fname,'r') as f:
lines = f.readlines()
mode = None
chgtype = None
chgs = {}
coul = {}
bvs = {}
for line in lines:
if line[0] in ('#','!'): continue
data = line.split()
if len(data) == 0:
mode = None
continue
if data[0] == 'charges':
mode = 'charges'
chgtype = data[1]
continue
elif data[0] == 'terms':
mode = None
continue
elif data[0] == 'interactions':
mode = None
continue
elif data[0] == 'fbvs':
mode = None
coul['fbvs'] = float(data[1])
continue
if mode == 'charges' and chgtype == 'fixed':
if len(data) == 2:
csp = data[0]
chg = float(data[1])
chgs[csp] = chg
elif mode == 'charges' and chgtype == 'fixed_bvs':
if len(data) == 4:
csp = data[0]
vid = float(data[1])
rad = float(data[2])
npq = int(data[3])
bvs[csp] = (vid,rad,npq)
if chgtype == 'fixed':
coul['charges'] = chgs
elif chgtype == 'fixed_bvs':
coul['bvs'] = bvs
return coul
|
a870d1a7b504fdbbc87f09db758bfd11b770be63
| 11,509
|
import json
def mac_entry_info_get(client, vlan_name, port, peer_mac_addr):
"""based on the input information,found any mac entry is matched.
:returns: True if one entry is matched
"""
status = False
# show ethernet-switching table interface xxx
command = 'exec cli show ethernet-switching table interface ' + \
port + ' \| display json'
stdin, stdout, stderr = client.exec_command(command)
output = json.loads(stdout.read())
mac_db = output["l2ng-l2ald-interface-macdb-vlan"][0]
mac_entry_info = mac_db["l2ng-l2ald-mac-entry-vlan"][0]
# "vlan_name, port_name, mac_addr" is matched, then return True
for mac_entry in mac_entry_info["l2ng-mac-entry"]:
if mac_entry["l2ng-l2-mac-vlan-name"][0]["data"] == vlan_name and \
mac_entry["l2ng-l2-mac-address"][0]["data"] == peer_mac_addr:
status = True
break
return status
|
d8e01b4aa60741c355d80125d87b0a1ae02ee6f6
| 11,510
|
import sys
def _catch_thread_exception(fn):
"""Sets self._worker_exception when fn raises an exception"""
def wrapped(self, *args, **kwargs):
try:
ret = fn(self, *args, **kwargs)
except Exception:
self._worker_exception = sys.exc_info()
else:
return ret
return wrapped
|
9c1f43c1c3237102c60367b6b5dc2967a74faec0
| 11,511
|
def generate_graph(data, course_to_id):
"""
Method to read the JSON and build the directed graph of courses (= DAG of course IDs)
The graph is represented in an adjacency list format
:return: A graph in adjacency list format, with vertices as course IDs and directed edges implying prerequisite relation
:rtype: List[List[int]]
"""
graph = [] * len(course_to_id)
ctr = 0
for obj in data:
graph.append([])
for prereq in obj["prerequisites"]:
graph[ctr].append(course_to_id[prereq.strip()])
ctr += 1
return graph
|
0a6fe305e39bd812e000a1d22d2374636a7900b2
| 11,513
|
from typing import Optional
import math
def round_for_theta(
v: float,
theta: Optional[float]
) -> float:
"""
Round a value based on the precision of theta.
:param v: Value.
:param theta: Theta.
:return: Rounded value.
"""
if theta is None:
return v
else:
return round(v, int(abs(math.log10(theta)) - 1))
|
ef6d18df588df8a80be9dc6c9b7f9104e3109d69
| 11,514
|
from typing import List
def split_badge_list(badges: str, separator: str) -> List[str]:
"""
Splits a string of badges into a list, removing all empty badges.
"""
if badges is None:
return []
return [badge for badge in badges.split(separator) if badge]
|
6dc53d45cc8390422e5a511e39475ae969bf37c9
| 11,516
|
def get_teams_of_coach(cursor, coach):
"""gets all the teams a coach has
Args:
cursor: the cursor
coach: the name of the coach
Returns:
a list of all the teams a coach has
"""
cursor.execute(
"Select team_name from TeamsCoaches where coach_name = '{}'".format(coach))
teams = []
while True:
res = cursor.fetchone()
if res is None:
break
else:
# append_this = res[0] + ",coached by " + coach
teams.append(res[0])
return teams
|
3d189a20ee23a2ace4b16bb66aca134f65635efb
| 11,520
|
import random
def seq_generator():
"""
Generate each part of sequence.
:return: sequence part as str
"""
return random.choice(["T", "H"])
|
4714194dea06611b269a7157ac6f0f0cb7c1415c
| 11,521
|
import re
import html
def convert_html_escapes_to_xml(html_text):
"""Avoid XML parsing errors by converting HTML escape codes to XML."""
html_entities = set(
re.findall(r'&(?!quot|lt|gt|amp|apos)[a-zA-Z]{1,30};', html_text)
)
for entity in html_entities:
html_text = html_text.replace(entity, html.unescape(entity))
return html_text
|
0ada386b69c8249db7cf1b356051e167a1b8dbdf
| 11,522
|
def column(matrix, col):
"""Returns a column from a matrix given the (0-indexed) column number."""
res = []
for r in range(len(matrix)):
res.append(matrix[r][col])
return res
|
f5214b5a41bf265f6892fdfd9422061b02f61c63
| 11,523
|
import re
def parse_scenario_gtest_name(name):
"""Parse scenario name to form acceptable by gtest"""
# strip
name = name.strip()
# space and tab to _
name = name.replace(" ", "_").replace("\t", "_")
# remove dots. commas
name = re.sub('[^a-zA-Z_]+', '', name)
# make lower
name = name.lower()
return name
|
faca8406a6b033536c3f9e0da21d9dc818fd2a7e
| 11,524
|
import json
def load_json(filepath):
"""Load a json file.
Args:
filepath (str): A path to an input json file.
Returns:
dict: A dictionary data loaded from a json file.
"""
with open(filepath, 'r') as f:
ret = json.load(f)
return ret
|
c64fec0c861d223e28001051906e32d3aa9ffc7a
| 11,525
|
def sort_states(state, columns, reverse=True):
"""
Sort the states according to the list given by prioritize_jobs.
prioritize_jobs (or columns in this case) is list of according to
which state should be prioritized for job submission. The position
in the list indicates the prioritization. columns = ["memory", "disk"]
with reverse=True means jobs with high memory will be submitted before
jobs with lower memory requirements, followed by jobs with high disk vs.
low disk requirement. Jobs with high memory and disk requirements
will be submitted first then jobs with high memory and medium disk
requirements, and so on and so forth.
Args:
state: List of states
columns: List of keys in the dict by which dict is sorted
reverse: Reverse the sorting or not. True = Bigger first,
False = smaller first
"""
key_cache = {}
col_cache = dict([(c[1:],-1) if c[0] == '-' else (c,1) for c in columns])
def comp_key(key):
if key in key_cache:
return key_cache[key]
if key in col_cache:
ret = len(columns)-columns.index(key if col_cache[key] == 1 else '-'+key)
else:
ret = 0
key_cache[key] = ret
return ret
def compare(row):
ret = []
for k in sorted(row, key=comp_key, reverse=True):
v = row[k]
if k in col_cache:
v *= col_cache[k]
ret.append(v)
return ret
return sorted(state, key=compare, reverse=reverse)
|
ae8c042d87e5bd2553ceaa08f5e7c88ea7ac2909
| 11,527
|
def statement(prop, value, source):
"""
Returns a statement in the QuickStatements experted format
"""
return "LAST\t{}\t{}\tS854\t\"{}\"".format(prop, value, source)
|
2c0b6ea8f757c8586ca462d85fca3b9d281adb26
| 11,529
|
import json
def get_config_db_json_obj(dut):
"""
Get config_db content from dut
Args:
dut (SonicHost): The target device
"""
config_db_json = dut.shell("sudo sonic-cfggen -d --print-data")["stdout"]
return json.loads(config_db_json)
|
92920197b2056d889ffb76ff66e01061f0654032
| 11,530
|
import argparse
def get_arg_parser(parser=None):
"""Parse the command line arguments for merge using argparse
Args:
parser (argparse.ArgumentParser or CompliantArgumentParser):
an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add arguments that are specific to the component
if parser is None:
parser = argparse.ArgumentParser(description="Training arg parser")
parser.add_argument(
"--train_dir", type=str, help="Directory where training data is stored"
)
parser.add_argument(
"--valid_dir", type=str, help="Directory where validation data is stored"
)
parser.add_argument(
"--model_dir", type=str, help="Directory to output the model to"
)
parser.add_argument("--num_epochs", type=int, help="Number of epochs to train for")
parser.add_argument("--batch_size", type=int, help="Train batch size")
parser.add_argument(
"--learning_rate", type=float, help="Learning rate of optimizer"
)
parser.add_argument("--momentum", type=float, help="Momentum of optimizer")
return parser
|
f66745ab2d2d5ed80b4efc59bd90b5dfbefb1370
| 11,531
|
def check_sender(tx: dict, sender: str) -> bool:
"""
:param tx: transaction dictionary
:param sender: string containing sender's address
:return: boolean
"""
if tx["events"][0]["sub"][0]["sender"][0]["account"]["id"] == sender:
return True
return False
|
785c8453b876a2ca2b14f374836987c77a01f882
| 11,534
|
def is_prime_det(n):
"""
Returns <True> if <n> is a prime number, returns <False> otherwise. It
uses an (optimized) deterministic method to guarantee that the result
will be 100% correct.
"""
# 2 and 3 are prime numbers
if (n <= 3):
return (n > 1)
# Corner cases to speed up the next loop
if (n % 2 == 0) or (n % 3 == 0):
return False
# Using (6k-1) and (6k+1) optimization
i = 5
while (i * i <= n):
if (n % i == 0) or (n % (i + 2) == 0): # Not a prime number
return False
i += 6
return True
|
65ee46364510f45485c7d95ee0973d42ebc81d34
| 11,535
|
def count_indents(text):
"""Takes a string and counts leading white spaces, return int count"""
return len(text) - len(text.lstrip(" "))
pass
|
1c0466c932f0a4204dc704d9718781821246ee89
| 11,538
|
def zippify(iterable, len=2, cat=False):
"""
Zips an iterable with arbitrary length pieces
e.g. to create a moving window with len n
Example:
zippify('abcd',2, cat=False)
--> [('a', 'b'), ('b', 'c'), ('c', 'd')]
If cat = True, joins the moving windows together
zippify('abcd',2, cat=True)
--> ['ab', 'bc', 'cd']
"""
iterable_collection = [iterable[i:] for i in range(len)]
res = list(zip(*iterable_collection))
return [''.join(r) for r in res] if cat else res
|
def01ff9f940009f80c312fe9ee07b282733d99b
| 11,540
|
import os
def find_paths(directory,stipe):
"""""
Arguments:
directory {str} -- taranmak istenen ust dizin
stipe {str} -- taranan uzanti
Returns:
path_list {list} -- path list for stipe
"""
if stipe.islower():
stipe_u = stipe.upper()
else:
stipe_u = stipe.lower()
if directory:
path_list = []
for root, subdirectory, files in os.walk(directory):
for f in files:
if f.endswith(stipe) or f.endswith(stipe_u):
path_list.append(os.path.join(root,f))
return path_list
else:
return None
|
e30b7ee0427db2b4fa45d3944524e056043888f3
| 11,541
|
def sanitize_int(value):
"""
Sanitize an input value to an integer.
:param value: Input value to be sanitized to an integer
:return: Integer, or None of the value cannot be sanitized
:rtype: int or None
"""
if isinstance(value, str):
try:
return int(value)
except ValueError:
return None
elif isinstance(value, int):
return value
|
5c0481c0c7414f5fdba946f66346b3233ffe6644
| 11,542
|
import sys
import os
def getResourcePath(relative_path):
"""Get absolute path to resource"""
# if cx_freeze attribute found, use frozen resources
if getattr(sys, 'frozen', False):
base_path = os.path.dirname(sys.executable)
else: # use path to development resources
base_path = os.path.abspath(os.path.dirname(sys.argv[0]))
return os.path.join(base_path, 'resources', relative_path)
|
fe7c087a81eef0e002f893a3f9a4e2a3a8b40ff9
| 11,543
|
def MeanIoUConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the mean Intersection-Over-Union metric.
Mean Intersection-Over-Union is a common evaluation metric for semantic image
segmentation, which first computes the IOU for each semantic class and then
computes the average over classes. IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Standalone usage:
>>> # cm = [[1, 1],
>>> # [1, 1]]
>>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
>>> # iou = true_positives / (sum_row + sum_col - true_positives))
>>> # result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33
>>> m = tf.keras.metrics.MeanIoU(num_classes=2)
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
>>> m.result().numpy()
0.33333334
>>> m.reset_states()
>>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1],
... sample_weight=[0.3, 0.3, 0.3, 0.1])
>>> m.result().numpy()
0.23809525
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanIoU(num_classes=2)])
```"""
argument_parser.add_argument(
"--num_classes",
help="""The possible number of labels the prediction task can have. This value must be provided, since a
confusion matrix of dimension = [num_classes, num_classes] will be allocated.""",
required=True,
)
argument_parser.add_argument(
"--name", help="(Optional) string name of the metric instance.", required=True
)
argument_parser.add_argument(
"--dtype", help="(Optional) data type of the metric result.", required=True
)
return argument_parser
|
119b3c005a47e92dad2d298befa330ddab33128e
| 11,545
|
def run_part2(_):
"""Implmentation for Part 2."""
# Reverse-engineered the input, it simply does:
return sum(val for val in range(1, 10551340 + 1) if 10551340 % val == 0)
|
75cd89377d4500c8455b96ef1dcf010215e9d058
| 11,547
|
def _get_dir_names(param):
"""Gets the names of the directories for testing the given parameter
:param param: The parameter triple to be tested.
:returns: A list of directories names to be used to test the given
parameter.
"""
return [
'-'.join([param[0], str(i)])
for i in range(0, 5)
]
|
8ef2ad44127402987a26ec0bc6bfffc372f3c64d
| 11,548
|
def bool_converter(value: str) -> bool:
"""
:param value: a string to convert to bool.
:return: False if lower case value in "0", "n", "no" and "false", otherwise, returns the value returned
by the bool builtin function.
"""
if value.lower() in ['n', '0', 'no', 'false']:
return False
return bool(value)
|
10251dfbb0200297d191dce1eb20237aed9e5ac9
| 11,550
|
def heat_stor(cp, spm, tdif):
"""
Calculate the heat storage
Args:
cp: specific heat of layer (J/kg K)
spm: layer specific mass (kg/m^2)
tdif: temperature change (K)
"""
return cp * spm * tdif
|
397736e091a2cea09328cbf7f6e81c99db6fe384
| 11,551
|
from typing import Any
from typing import Optional
from typing import IO
import yaml
def dump_yaml(data: Any, stream: Optional[IO[str]] = None) -> Optional[str]:
"""Dump YAML to stream or return as string."""
if stream is not None:
return yaml.safe_dump(data, stream, default_flow_style=False)
else:
return yaml.safe_dump(data, default_flow_style=False)
|
7ff998722e5d7754f83e960127bbed39d4a8d728
| 11,552
|
def get_link_to_referenced_comment(subreddit_name, user_note):
"""Extracts the id of the comment referenced by the usernote and formats it as a reddit-internal link"""
link_code_segments = user_note['l'].split(',')
if len(link_code_segments) == 3:
submission_id = link_code_segments[1]
comment_id = link_code_segments[2]
return '/r/{0}/comments/{1}/x/{2}/?context=3'.format(subreddit_name, submission_id, comment_id)
else:
return None
|
cb41189bc05962e37fdf91aa3e5c53c64acb9400
| 11,553
|
def _detects_peaks(ecg_integrated, sample_rate):
"""
Detects peaks from local maximum
----------
Parameters
----------
ecg_integrated : ndarray
Array that contains the samples of the integrated signal.
sample_rate : int
Sampling rate at which the acquisition took place.
Returns
-------
choosen_peaks : list
List of local maximums that pass the first stage of conditions needed to be considered as
a R peak.
possible_peaks : list
List with all the local maximums in the signal.
"""
# Minimum RR interval = 200 ms
min_rr = (sample_rate / 1000) * 200
# Computes all possible peaks and their amplitudes
possible_peaks = [i for i in range(0, len(ecg_integrated)-1)
if ecg_integrated[i-1] < ecg_integrated[i] and
ecg_integrated[i] > ecg_integrated[i+1]]
possible_amplitudes = [ecg_integrated[k] for k in possible_peaks]
chosen_peaks = []
# Starts with first peak
if not possible_peaks:
raise Exception("No Peaks Detected.")
peak_candidate_i = possible_peaks[0]
peak_candidate_amp = possible_amplitudes[0]
for peak_i, peak_amp in zip(possible_peaks, possible_amplitudes):
if peak_i - peak_candidate_i <= min_rr and peak_amp > peak_candidate_amp:
peak_candidate_i = peak_i
peak_candidate_amp = peak_amp
elif peak_i - peak_candidate_i > min_rr:
chosen_peaks += [peak_candidate_i - 6] # Delay of 6 samples
peak_candidate_i = peak_i
peak_candidate_amp = peak_amp
else:
pass
return chosen_peaks, possible_peaks
|
ab9dd461f65095f048942ab8b8c069c456ea4933
| 11,554
|
import re
def pascal2title(string: str) -> str:
"""Splits on whitespace between capital and lowercase letters."""
# https://stackoverflow.com/a/29922050
return " ".join(re.findall(r"[A-Z](?:[a-z]+|[A-Z]*(?=[A-Z]|$))", string))
|
c91019552d1d0a3e58b65b3d2d84ec78a48fec52
| 11,555
|
import copy
def flatten_samplesets(samplesets):
"""
Takes a list of SampleSets (with one or multiple samples in each SampleSet)
and returns a list of SampleSets (with one sample in each SampleSet)
Parameters
----------
samplesets: list of SampleSets
Input list of SampleSets (with one or multiple samples in each SampleSet
"""
new_samplesets = []
# Iterating over the samplesets
for sset in samplesets:
# Iterating over the samples, and deep copying each sampleset
# for each sample
for i, s in enumerate(sset):
new_sset = copy.deepcopy(sset)
new_sset.samples = [s]
# Very important step
# We need to redo the keys
new_sset.key = f"{new_sset.key}-{i}"
new_samplesets.append(new_sset)
return new_samplesets
|
d89c5b58db69caa7570088fbfae4d7e414c5b9af
| 11,556
|
import torch
def center_conv_point(bboxes, kernel_size=3, c_min=0, c_max=1, v3_form=False):
"""In a parallel manner also keeps the gradient during BP"""
#bboxes.clamp_(min=c_min, max=c_max)
if v3_form:
base = torch.cat([bboxes[:, :2][:, (1, 0)]] * (kernel_size ** 2), dim=1)
else:
base = torch.cat([bboxes[:, :2]] * (kernel_size ** 2), dim=1)
multiplier = torch.tensor([(2 * i + 1) / kernel_size / 2
for i in range(kernel_size)]).cuda(bboxes.device.index)
# multiplier生成的时候顺序先从上往下数,再从左往右数
# 应当换成先从左往右数,再从上往下数的顺序,所以有了[:, :, (1, 0)]
multiplier = torch.stack(torch.meshgrid([multiplier, multiplier]),
dim=-1).contiguous().view(-1)
multiplier = multiplier.unsqueeze(0).repeat(bboxes.size(0), 1)
if v3_form:
center = torch.stack([bboxes[:, 3] - bboxes[:, 1], bboxes[:, 2] - bboxes[:, 0]], dim=-1)
else:
center = torch.stack([bboxes[:, 2] - bboxes[:, 0], bboxes[:, 3] - bboxes[:, 1]], dim=-1)
center = torch.cat([center] * (kernel_size ** 2), dim=1)
return base + center * multiplier
|
a5b14c544525672f8f47fbdbb31a1ccb96ff8e8b
| 11,557
|
def auto_type_convert(value):
"""Try to convert 'value' to a int, float, or bool. Otherwise leave
as a string. This is done recursively with complex values."""
if value is None:
return None
if isinstance(value, list):
return [auto_type_convert(item) for item in value]
elif isinstance(value, dict):
return {key: auto_type_convert(val) for key, val in value.items()}
if isinstance(value, (int, float, bool)):
return value
# Probably a string?
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
if value in ('True', 'False'):
return value == 'True'
return value
|
1fe873dca7017d25128c30cc5144c3cf890319a1
| 11,559
|
import itertools
def normalise_text(text):
"""
Removes leading and trailing whitespace from each line of text.
Removes leading and trailing blank lines from text.
"""
stripped = text.strip()
stripped_lines = [line.strip() for line in text.split("\n")]
# remove leading and trailing empty lines
stripped_head = list(itertools.dropwhile(lambda s: not s, stripped_lines))
stripped_tail = itertools.dropwhile(lambda s: not s, reversed(stripped_head))
return "\n".join(reversed(list(stripped_tail)))
|
52ab40cfa4c39ed194fa71aa28d70f5fa737d70c
| 11,560
|
def remove_stale_events(data):
"""
Parse an event dictionary and remove transitions to the same state:
.. code::
event_dict = {
(entity_id, cohort interval) : [(time, state), ..., (time, state)]
(entity_id, cohort interval) : (time, state), ..., (time, state)]
}
:param data: a pandas dataframe
:return: dict
"""
event_dict = {}
for event_key in data.keys():
# Pull the list of all events for this entity in this time
event_list = data[event_key]
new_event_list = []
# Iterate over all events and only keep those leading to changed state
for i in range(len(event_list) - 1):
if event_list[i][1] != event_list[i + 1][1]:
new_event_list.append(event_list[i])
# Last event added by default
i = len(event_list) - 1
new_event_list.append(event_list[i])
event_dict[event_key] = new_event_list
return event_dict
|
be80965efc7077e06d6e9b2ab5fb8ddc65df79a9
| 11,562
|
import sqlite3
def sqlite3get_table_names(sql_conn: sqlite3.Connection):
"""For a given sqlite3 database connection object, returns the table names within that database.
Examples:
>>> db_conn = sqlite3.connect('places.sqlite')\n
>>> sqlite3get_table_names(db_conn)\n
['moz_origins', 'moz_places', 'moz_historyvisits', 'moz_inputhistory', 'moz_bookmarks', 'moz_bookmarks_deleted', 'moz_keywords', 'moz_anno_attributes', 'moz_annos', 'moz_items_annos', 'moz_meta', 'sqlite_stat1']
References:
# How to list tables for a given database:\n
https://techoverflow.net/2019/10/14/how-to-list-tables-in-sqlite3-database-in-python/\n
# Firefox forensics - what sqlite databases store which artifacts\n
https://www.foxtonforensics.com/browser-history-examiner/firefox-history-location\n
Args:
sql_conn (sqlite3.Connection): Reference an existing slite3 Connection object.
Returns:
list: Returns a list of the table names in the database.
"""
cursor = sql_conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
table_names = [
name[0] for name in cursor.fetchall() if name[0] != 'sqlite_sequence'
]
cursor.close()
return table_names
|
0cb1ba2ba475268d39fef8bdbe044436f51c4f07
| 11,563
|
def make_key_frames():
"""
make key frames for the regions
"""
key_frames = {}
key_frames[0] = [100, 200, 300]
key_frames[1] = [50, 150]
return key_frames
|
3b551b7bb7fb22fab8e35abc54b2c524bdfa36fd
| 11,565
|
from typing import List
from typing import Any
def sort(xs: List[Any]) -> List[Any]:
"""
Sort the list `xs` with the python `sort` function
and return the sorted function. The given list is not modified.
Parameters
----------
xs
A list.
Returns
-------
xs_sorted
A sorted copy of `xs`.
"""
xs_copy = xs.copy()
xs_copy.sort()
return xs_copy
|
706190a533c19b6a7ea4baf540823ca73406560b
| 11,566
|
def response_200(result=None):
"""GET에 대한 반환"""
return {
'msg': 'success',
'result': result
}, 200
|
2c2acbfed5ee678536671a367c67cc7d0b1fb6ba
| 11,567
|
def limit():
"""The number of corpus files to process, e.g ``--limit 100`."""
return '--limit 50'
|
45638e81c16cc9a89a7137566581b43d7e952f46
| 11,568
|
def add_pacient(ime, priimek, cepivo=None):
"""Funkcija v bazo vstavlja novega pacienta za sprejem v bolnišnico."""
return None
|
18d302d4a926d7af194850f68425137f5ef48968
| 11,569
|
import pickle
def read_solver(filename):
"""
This function reads a .pk1 file.
Parameters:
filename (str): A file name or path to file excluding file extension.
Returns:
Content of the loaded .pk1 file.
"""
try:
file = open(filename+'.pk1', 'rb')
except IOError:
print('unable to open file')
return False
else:
solver_obj = pickle.load(file)
file.close()
return solver_obj
|
da7bcfb75af1edde3d43091fdbc5c1829c13e5e0
| 11,570
|
def sentence_length_feature(sentences):
""" List of values from 0 to 1 rating the length of the sentence in comparation with the longest one """
sentence_length_feature_values = []
max_length_sentence = len(sentences[0].original.split(' '))
# Find the longest sentence
for sentence in sentences[1:]:
if len(sentence.original.split(' ')) > max_length_sentence:
max_length_sentence = len(sentence.original.split(' '))
# Normalize the lenght of every sentence
for sentence in sentences:
sentence_length_feature_values.append(len(sentence.original.split(' '
)) / max_length_sentence)
return sentence_length_feature_values
|
89e705a6b12b30f29352d70977bd80db45a14b6a
| 11,571
|
import os
def system_terminal_emulator() -> str:
"""
Goal: find the default terminal emulator
"""
try:
return os.environ["TERM"]
except KeyError:
raise EnvironmentError("Cannot find default terminal")
|
21f6914a656fb018289b9193b9ab378de2bcf7ff
| 11,572
|
import inspect
def get_argument_help_string(param: inspect.Parameter) -> str:
"""Get a default help string for a parameter
:param param: Parameter object
:type param: inspect.Parameter
:return: String describing the parameter based on the annotation and default value
:rtype: str
"""
help_str = ""
if param.annotation is not param.empty:
help_str = inspect.formatannotation(param.annotation)
if param.default is not param.empty:
if param.annotation is not param.empty:
help_str += ", "
help_str = f"{help_str}defaults to {param.default!r}"
return help_str
|
7381591ce45591bf1aef62280fc4b98e57969576
| 11,573
|
def _dask_array_fft_has_norm_kwarg():
"""returns True if dask.array's fft supports the norm keyword argument
"""
return False
|
93e111839aa5a175ebcdb995193c7774c45e25ec
| 11,574
|
def nested_select(d, v, default_selected=True):
"""
Nestedly select part of the object d with indicator v. If d is a dictionary, it will continue to select the child
values. The function will return the selected parts as well as the dropped parts.
:param d: The dictionary to be selected
:param v: The indicator showing which part should be selected
:param default_selected: Specify whether an entry is selected by default
:return: A tuple of two elements, the selected part and the dropped part
Examples:
>>> person = {'profile': {'name': 'john', 'age': 16, 'weight': 85}, \
'relatives': {'father': 'bob', 'mother': 'alice'}}
>>> nested_select(person, True)
({'profile': {'name': 'john', 'age': 16, 'weight': 85}, 'relatives': {'father': 'bob', 'mother': 'alice'}}, {})
>>> nested_select(person, {'profile': False})
({'relatives': {'father': 'bob', 'mother': 'alice'}}, {'profile': {'name': 'john', 'age': 16, 'weight': 85}})
>>> nested_select(person, {'profile': {'name': False}, 'relatives': {'mother': False}})
({'profile': {'age': 16, 'weight': 85}, 'relatives': {'father': 'bob'}},
{'profile': {'name': 'john'}, 'relatives': {'mother': 'alice'}})
"""
if isinstance(v, dict):
assert isinstance(d, dict)
choosed = d.__class__()
dropped = d.__class__()
for k in d:
if k not in v:
if default_selected:
choosed.setdefault(k, d[k])
else:
dropped.setdefault(k, d[k])
continue
if isinstance(v[k], dict):
assert isinstance(d[k], dict)
child_choosed, child_dropped = nested_select(d[k], v[k])
if child_choosed:
choosed.setdefault(k, child_choosed)
if child_dropped:
dropped.setdefault(k, child_dropped)
else:
if v[k]:
choosed.setdefault(k, d[k])
else:
dropped.setdefault(k, d[k])
return choosed, dropped
else:
other = d.__class__() if isinstance(d, dict) else None
return (d, other) if v else (other, d)
|
84aa16adfb324fef8452966087cbf446d57893d2
| 11,575
|
import time
def timestamp_2_datetime_str(timestamp):
"""将时间戳转为日期时间字符串"""
datetime_tuple = time.localtime(timestamp)
return time.strftime("%Y-%m-%d %H:%:%S", datetime_tuple)
|
1b8b5e4338d1fa49729fa253b029c0521a7f5f55
| 11,576
|
import os
import pickle
def load_parameters():
"""Read the "bridge file to get back the arguments."""
parameter_file_name = os.path.abspath("gdb_driver.params")
parameter_file = open(parameter_file_name, "rb")
return pickle.loads(parameter_file.read())
|
521f2e71eee324ec048e79a300abdcfd26b73066
| 11,577
|
def is_trans(reaction):
"""Check if a reaction is a transporter."""
return len(reaction.metabolites) == 1
|
a0b9be30b4a88156e7a89cf48b8ecb76345e4cab
| 11,578
|
def format_tile(tile, tile_format, format_str='{x} {y} {z}'):
"""Convert tile to necessary format.
Parameters
----------
tile: pygeotile.tile.Tile
Tile object to be formatted.
tile_format: str
Desired tile format. `google`, `tms`, or `quad_tree`
format_str: str
String to guide formatting. Only used for `google` or `tms`
(as quad_tree is one value).
Default: "{x} {y} {z}". Example: "{z}-{x}-{y}"
"""
if tile_format == 'google':
td = {key: val for key, val
in zip(['x', 'y', 'z'], list(tile.google) + [tile.zoom])}
return format_str.format(**td)
elif tile_format == 'tms':
td = {key: val for key, val
in zip(['x', 'y', 'z'], list(tile.tms) + [tile.zoom])}
return format_str.format(**td)
elif tile_format == 'quad_tree':
return tile.quad_tree
else:
raise ValueError('`tile_format`: {} not recognized'.format(tile_format))
|
e7ce2653f49f0c535bb3ea0a0c4bec3b48a74331
| 11,579
|
def percent(a, b):
"""
Given a and b, this function returns a% of b
"""
return (a/100)*b
|
d8adae353bb1b7aba6eb0149af065182bf752449
| 11,580
|
import math
def filter_none(mongo_dict):
"""Function to filter out Nones and NaN from a dict."""
for key in list(mongo_dict.keys()):
val = mongo_dict[key]
try:
if val is None or math.isnan(val):
mongo_dict.pop(key)
except Exception:
continue
return mongo_dict
|
6853a7153f98466dc00d262bfaf4a63db1b0cd5c
| 11,581
|
import configparser
def read_configfile():
"""Read the config file
:returns: The ConfigParser object
"""
c = configparser.ConfigParser()
c.read("config.conf")
return c
|
cdc8b63cca40af1cc516161746ffd016aeb4d65d
| 11,583
|
import csv
def read_us_counties_csv(file_name):
"""
"""
# read the data
with open(file_name) as csv_file:
# convert each row to a list of strings
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
counties_with_deaths = []
for row in csv_reader:
if line_count == 0:
line_count += 1
continue
if int(row[5]) > 0:
counties_with_deaths.append(row)
return counties_with_deaths
|
2b203f200d7efbe26cbcaff17c150afde388f7e3
| 11,584
|
def ARGmetric_params_from_row(row):
"""
Create some ARGmetric params (see ARGmetrics.py) from a row
We hack the same polytomy seed as inference seed
This stupidly has to be defined at the top level, since it is
part of the function passed in to a multiprocessing Pool, and
hence needs to be 'pickle'able :(
"""
return {'make_bin_seed':row.seed, 'reps':row.tsinfer_biforce_reps}
|
eeb43b5fdf0b1fd8705d95ab7dd6a2d7f6422801
| 11,585
|
def argument(*name_or_flags, **kwargs):
"""Helper function to satisfy argparse.ArgumentParser.add_argument()'s
input argument syntax"""
return (list(name_or_flags), kwargs)
|
24748b59202964d1bca05c5c24dac144e67d964e
| 11,586
|
import re
def fetch_geneid(fn):
"""
fetch gene id from GTF records
gene_id "FBgn0267431"; gene_name "Myo81F"; gene_source "FlyBase"; gene_biotype "protein_coding";
"""
assert isinstance(fn, str)
ps = fn.split(';')
dn = {}
for i in fn.split(';'):
if len(i) < 8: # shortest field
continue
id = i.strip().split(' ')
id = re.sub('"', '', id[1])
if 'gene_id' in i:
dn['gene_id'] = id
elif 'gene_name' in i:
dn['gene_name'] = id
elif 'transcript_id' in i:
dn['transcript_id'] = id
elif 'transcript_name' in i:
dn['transcript_name'] = id
elif 'exon_id' in i:
dn['exon_id'] = id
elif 'gene_biotype' in i:
dn['gene_biotype'] = id
else:
pass
return dn
|
c3ddb9c5c820799800131c6e11dc2ef91d4556c1
| 11,587
|
def fieldsMatch(f0, f1):
"""
Checks whether any entry in one list appears in a second list.
"""
for f in f0:
if f in f1: return True
return False
|
172772fca613fe97e32bb6860c3bd217b643fe2d
| 11,588
|
def list_to_str(lst):
"""convert list/tuple to string split by space"""
result = " ".join(str(i) for i in lst)
return result
|
1e6c59f834b98b1d49ca1c0919d2d976fd1de1de
| 11,589
|
def extract_bias_diagonal(module, S, sum_batch=True):
"""Extract diagonal of ``(Jᵀ S) (Jᵀ S)ᵀ`` where ``J`` is the bias Jacobian.
Args:
module (torch.nn.Conv1d or torch.nn.Conv2d or torch.nn.Conv3d): Convolution
layer for which the diagonal is extracted w.r.t. the bias.
S (torch.Tensor): Backpropagated (symmetric factorization) of the loss Hessian.
Has shape ``(V, *module.output.shape)``.
sum_batch (bool, optional): Sum out the batch dimension of the bias diagonals.
Default value: ``True``.
Returns:
torch.Tensor: Per-sample bias diagonal if ``sum_batch=False`` (shape
``(N, module.bias.shape)`` with batch size ``N``) or summed bias
diagonal if ``sum_batch=True`` (shape ``module.bias.shape``).
"""
start_spatial = 3
sum_before = list(range(start_spatial, S.dim()))
sum_after = [0, 1] if sum_batch else [0]
return S.sum(sum_before).pow_(2).sum(sum_after)
|
b2f8aaefa22d1ae915c59f80f98341059a4d1ad3
| 11,590
|
import time
def read_device(net_connect, sleep=1):
"""Sleep and read channel."""
time.sleep(sleep)
output = net_connect.read_channel()
print(output)
return output
|
b0c4c300a088886558203649f57c10cc34b88e21
| 11,592
|
import os
def __work_mode__(path: str):
"""Determines the desired mode of operation"""
if os.path.isfile(path): # Input is file
return "file"
if os.path.isdir(path): # Input is dir
return "dir"
else:
return "no"
|
a26a214653855244cd9b0e9ed7fa5ca058e9d81d
| 11,593
|
def is_numeric_array(x):
"""
Returns whether an object is a ground numeric array.
"""
return False
|
8d119f10f08f08552c9dfb59fa13dd0bf644f47c
| 11,594
|
from gzip import GzipFile
from typing import Any
import json
def gzip_load_var(gzip_file:str) -> Any:
"""
Load variable from .json.gz file (arbitrary extension!)
Parameters
----------
gzip_file : str
Filename containing the gzipped JSON variable
Returns
-------
var : Any
Variable as decoded from gzipped JSON content
"""
# IMPORT DONE HERE TO SAVE TIME AT MODULE INIT
try:
with GzipFile(gzip_file, 'r') as gzip_in:
json_var = json.loads(gzip_in.read().decode('utf-8'))
except:
raise
return json_var
|
b9f6fbad33b40bb18a1418f3134b0889d5344183
| 11,595
|
import os
def verify_directory_layout(project_dir):
"""
Verifies if directory from path has Maven's "Standard Directory Layout" (src/main/java)
:param project_dir: path to the project
:raises FileNotFoundError: when a directory is requested but doesn't exist
:raises NotADirectoryError: when path leads to something which is not a directory
:return: True if directory from path has Maven's "Standard Directory Layout"; else False
"""
try:
os.chdir(project_dir)
except FileNotFoundError as fnfe:
raise fnfe
except NotADirectoryError as nade:
raise nade
maven_standard_directory_layout = os.path.join("src", "main", "java")
path_to_analyse = os.path.join(project_dir, maven_standard_directory_layout)
return os.path.exists(path_to_analyse)
|
583f3523df356a9ff30f488d1f5d400baef18701
| 11,596
|
def five_point_derivative(f,x,h):
"""#5-point function to calaculate 1st derivative"""
h = float(h)
return (1/(12*h))*(f(x-2*h)-f(x+2*h)-
8*f(x-h)+8*f(x+h))
|
186eb1ad0a01d96a85fb0d52bf0def8d3a0d00c0
| 11,597
|
def unique_count(value):
"""Count the number of unique characters in the string representation of a
scalar value.
Parameters
----------
value: scalar
Scalar value in a data stream.
Returns
-------
int
"""
unique = set()
for c in str(value):
unique.add(c)
return len(unique)
|
3a1481e06e6b5a57a2e4c28dc3b9b34dd5c3a977
| 11,599
|
import os
def compare_file_contents(path_a, path_b) -> bool:
"""
Compares two files and returns whether the files are equal.
"""
if os.stat(path_a).st_size != os.stat(path_b).st_size:
return False
with open(path_a, 'rb') as file_a, open(path_b, 'rb') as file_b:
while True:
buf_a = file_a.read(1024*1024)
buf_b = file_b.read(1024*1024)
if buf_a != buf_b:
return False
if buf_a == b'' and buf_b == b'':
return True
|
372aa296b1daa6a5e60083e65c0e6b5847bc03a8
| 11,600
|
def category_table_build(osm_table_name, categorysql):
"""
Returns an SQL OSM category table builder
Args:
osm_table_name (string): a OSM PostGIS table name
Returns:
sql (string): a sql statement
"""
sql = ("INSERT INTO category_%s (osm_id, cat) "
"SELECT DISTINCT osm_id, "
"%s as cat "
"FROM %s as osm")
sql = sql % (osm_table_name, categorysql, osm_table_name)
return sql
|
147812bb68845e3fd0dca7e805f610d26428eac3
| 11,601
|
import time
import re
def get_uniqueID(idPrefix):
"""Generates a unique id
"""
t = time.ctime()
uid = '_'.join([idPrefix, t])
return re.sub(r'[: ]', '_', uid)
|
b36bdd7548343f69cd2c093f66c74314d37b9665
| 11,602
|
def count_helices(d):
"""Return the helix count from structure freq data."""
return d.get('H', 0) + d.get('G', 0) + d.get('I', 0)
|
8469adabac42937009a1f8257f15668d630a7ca8
| 11,603
|
from datetime import datetime
import itertools
def satisfies(query, **kwargs):
"""Check whether a given datetime object satisfies day and time predicates
Keyword Args:
month: The month predicate (January, February, ...)
day: The day of month predicate [1 31)
weekday: The day of week predicate (Sunday, Monday, ...)
hour: The hour of day predicate [0 24)
minute: The minute of hour predicate [0 60)
"""
formatters = {
'month': lambda: datetime.strftime(query, '%B'),
'weekday': lambda: datetime.strftime(query, '%A'),
'day': lambda: query.day,
'hour': lambda: query.hour,
'minute': lambda: query.minute
}
attributes = kwargs.keys()
predicates = itertools.product(*kwargs.values())
for values in predicates:
if all([formatters[attr]() == value for attr,value in zip(attributes,values)]):
return True
return False
|
e36ff908a31e71eab242b9ff6cf4df6721a1fbf7
| 11,604
|
import os
import json
def check_completeness_single_job(out_file: str,
job_no: int,
iteration: int) -> bool:
"""
Check whether an individual inference job has been completed.
Args:
out_file: n5 file to save inference to.
job_no: Number/id of the individual inference job.
iteration: Iteration of the inference.
Returns:
If True, inference job has been completed.
"""
if os.path.exists(os.path.join(out_file, 'list_gpu_{0:}.json'.format(job_no))) and os.path.exists(
os.path.join(out_file, 'list_gpu_{0:}_{1:}_processed.txt'.format(job_no, iteration))):
block_list = os.path.join(out_file, 'list_gpu_{0:}.json'.format(job_no))
block_list_processed = os.path.join(out_file, 'list_gpu_{0:}_{1:}_processed.txt'.format(job_no, iteration))
with open(block_list, 'r') as f:
block_list = json.load(f)
block_list = {tuple(coo) for coo in block_list}
with open(block_list_processed, 'r') as f:
list_as_str = f.read()
list_as_str_curated = '[' + list_as_str[:list_as_str.rfind(']') + 1] + ']'
processed_list = json.loads(list_as_str_curated)
processed_list = {tuple(coo) for coo in processed_list}
if processed_list < block_list:
complete = False
else:
complete = True
else:
complete = False
return complete
|
a9ce3adab9121ac6044d535e8e4e4d3f7781a371
| 11,606
|
import requests
import json
def duckduckgo(search, appName=""):
"""Gives instant answers from DuckDuckGo (https://duckduckgo.com/).
Keyword arguments:
search -- <str>; what you are searching for (case sensitive)
appName -- <str>; the name of your app
Return value:
{
"AbstractText": <str>; topic summary,
"AbstractSource": <str>; name of <AbstractText> source,
"Heading": <str>; name of topic that goes with
<AbstractText>,
"Answer": <str>; instant answer,
"Definition": <str>; dictionary definition (may differ from
<AbstractText>),
"DefinitionSource": <str>; name of <Definition> source,
"DefinitionURL": <str>; deep link to expanded definition
page in <DefinitionSource>
"URL": <str>; URL associated with <AbstractText>,
"URLText": <str>; text from <FirstURL>
}
"""
url = "http://api.duckduckgo.com/?q={}&format=json&t={}"
url = url.format(search, appName)
data = requests.get(url).text
data = json.loads(data)
items = {"AbstractText": data["AbstractText"],
"AbstractSource": data["AbstractSource"],
"Heading": data["Heading"],
"Answer": data["Answer"],
"Definition": data["Definition"],
"DefinitionSource": data["DefinitionSource"],
"DefinitionURL": data["DefinitionURL"]}
exists = data["Results"]
items["URL"] = data["Results"][0]["FirstURL"] if exists else ""
items["URLText"] = data["Results"][0]["Text"] if exists else ""
return items
|
7bb2958986b0e1b0219e7d1a63e475ff840f0136
| 11,607
|
def binary_to_decimal(number):
"""
Calculates the decimal of the given binary number.
:param number: decimal number in string or integer format
:return integer of the equivalent decimal number
"""
decimal = []
number = list(str(number)[::-1])
for i in range(len(number)):
decimal.append(int(number[i]) * (2 ** i))
return sum(decimal)
|
6d615b9bc5a50cc9d2a970fa77c989fa95d0d77e
| 11,608
|
def get_dom_attr_value(dom, tag_name, attr_name):
"""
Return value of a tag's attribute from dom (XML file).
Arguments:
tag_name -- name of dom tag in which the attribute is found.
attr_name -- name of dom attribute for which the value should be returned.
"""
tag=dom.getElementsByTagName(tag_name)
value=tag[0].attributes[attr_name].value
return value
|
26f31fd7db526bb5503b7ce156b19a99705041d1
| 11,609
|
def plaintext():
"""Test 6: Plaintext"""
return b"Hello, World!", {"Content-Type": "text/plain"}
|
8005ee608a45956d5aa9fc4edf34c9ee3c8605d6
| 11,611
|
def EGCD(a, b):
"""
Extended Euclidean algorithm
computes common divisor of integers a and b.
a * x + b * y = gcd(a, b)
returns gcd, x, y or gcd, y, x.
Sorry, I can't remember
"""
if a == 0:
return (b, 0, 1)
else:
b_div_a, b_mod_a = divmod(b, a)
g, x, y = EGCD(b_mod_a, a)
return (g, y - b_div_a * x, x)
|
81a9365bfe2b82f0856489710afb7186d1fcf803
| 11,613
|
def filter_attr(the_dict, attr_names):
"""
Only return the item that in the attr_names list
:param the_dict:
:param attr_names: comma separated names
:return: dict
"""
if isinstance(the_dict, dict):
attrs = attr_names.split(',')
return dict((k, v) for k, v in the_dict.items() if k.strip() in attrs)
return the_dict
|
1ca5ad38af808c6a533c3abb22ce7cc26d5f82f2
| 11,615
|
import torch
def word_to_one_hot(word, word2idx):
"""Return the one hot encoding of the word given to this function.
Args:
word (str): The word for which one hot representation is required.
word2idx (Dict): The dictionary mapping from word to indices.
Returns:
x (torch.Tensor): The one hot representation for the word.
"""
# Create a vector or zeros equal to the length of the vocab
x = torch.zeros(len(word2idx)).float()
# Setting the value corresponding to the index of word 1
x[word2idx[word]] = 1.0
return x
|
6628f25695cacb202dd51be070ff03f9a594654d
| 11,618
|
def rev_comp(seq: str) -> str:
"""
Generates the reverse complement of a sequence.
"""
comp = {
"A": "T",
"C": "G",
"G": "C",
"T": "A",
"B": "N",
"N": "N",
"R": "N",
"M": "N",
"Y": "N",
"S": "N",
"W": "N",
"K": "N",
"a": "t",
"c": "g",
"g": "c",
"t": "a",
"n": "n",
" ": "",
}
rev_seq = "".join(comp.get(base, base) for base in reversed(seq))
return rev_seq
|
cb6b95d2d3f15910ff3ad793d99bb56de898026e
| 11,619
|
def strip_variables(raw_df, targ_hd):
"""
Some of the ncdf files do not have a <complete> collection of variables
(e.g. Rnet, Qg), such that the matrices are different shapes. In order
to be able to concatenate all fluxnet datasets together, this function
strips out the uncommon variables.
"""
# include the related QC flag columns as well for masking
take_these = targ_hd + [hd + "_qc" for hd in targ_hd]
# limit dataset to only these columns
new_df = raw_df[take_these]
return new_df
|
b732faf83d5946807c6e74b13297e52ced18ff1d
| 11,620
|
def find_nuc_indel(gapped_seq, indel_seq):
"""
This function finds the entire indel missing in from a gapped sequence
compared to the indel_seqeunce. It is assumes that the sequences start
with the first position of the gap.
"""
ref_indel = indel_seq[0]
for j in range(1,len(gapped_seq)):
if gapped_seq[j] == "-":
ref_indel += indel_seq[j]
else:
break
return ref_indel
|
d1f258229300f965d9e6d0a8fba0a7cb6a071131
| 11,621
|
def _match_identifier_re():
"""
匹配BundleId
"""
return r'^Identifier:\s*([a-zA-Z0-9_\-\.]+)\s*$'
|
cbca0e2924e77c43211bdadba4d434c178eb8e18
| 11,622
|
def get_stats_for_unweighted_hpo_aggregate_metric(
hpo_object, metric, date, hpo_name,
tables_and_classes_counted, statistics_to_average):
"""
Function is used once an HPO is found and warrants its own
AggregateMetricForHPO because it has a unique set of date
and metric parameters.
This function, however, differs from
get_stats_for_weighted_hpo_aggregate_metric in that it
weights all of the different classes equally.
The other function instead creates an 'aggregate'
metric and weights the tables/categories by their
relative row contributions.
Parameters
----------
hpo_object (HPO): object of class HPO that has all the
information we want to sort across (and ultimately
average across all of the applicable tables)
metric (string): represents the kind of metric that
is to be investigated (e.g. duplicates)
date (datetime): the datetime that should be unique
for the AggregateMetricForHPO to be created.
hpo_name (string): name of the HPO object
tables_counted (list): list of tables that should not
be counted in the 'overall tally'. this is used to
prevent the same table from being counted more than
once
statistics_to_average (list): list of the 'values'
associated with various DQM objects. this is
to grow across the metric (through all tables)
for the HPO.
Returns
-------
statistics_to_average (list): list of the 'values'
associated with the HPO object for the relevant
data quality metrics. these will all ultimately
be averaged to create an 'aggregate unweighted'
data quality metric for the HPO.
tables_and_classes_counted (list): list of tables that
should not be counted in the 'overall tally'.
now also contains the tables that contributed to
the overall tally for the particular HPO on the
particular date
"""
relevant_dqms = hpo_object.use_string_to_get_relevant_objects(
metric=metric)
for dqm in relevant_dqms:
# regardless of dqm.table_or_class
if (dqm.date == date and
dqm.hpo == hpo_name and
dqm.metric_type == metric) and \
(hpo_object.date == date) and \
dqm.table_or_class not in tables_and_classes_counted:
# add the value
statistics_to_average.append(dqm.value)
# prevent double counting
tables_and_classes_counted.append(dqm.table_or_class)
return statistics_to_average, tables_and_classes_counted
|
2b451f6fc15b9f96e343d5577626a62fd57662b7
| 11,623
|
def codeblock(text):
"""
Returns text in a Markdown-style code block
:param text: Str
:return: Str
"""
return "```\n" + text + "\n```"
|
fb8fd7314273b47ace577a72c705742d6646aa0d
| 11,626
|
def generate_ensemble(num_layers, *activations):
"""Given a set of string names and a number of target layers, generate
a list of ensemble architectures with those activations
Args:
num_layers: int
the number of hidden layers in the neural network, and also
the number of activation functions
activations: list of str
a list of strings that indicates the candidates for activation
functions at for every layer
Returns:
ensemble: list of list of str
a list of architectures, where an architecture is given by a
list of activation function names
"""
if num_layers == 0:
return []
if num_layers == 1:
return [[act] for act in activations]
return [[act, *o] for act in activations
for o in generate_ensemble(num_layers - 1, *activations)]
|
42bce2fc861122938d2e5da54e4538b0e6a285bc
| 11,627
|
def regular_graphing_method():
"""
This function was created to determine whether the user wants to graph the data on top of each other or on separate graphs.
:return boolean regular_ontop: regular_ontop is returned to determine how the data will graph
Note: if regular_ontop is true, the non-derivative data will all graph on one graph
"""
regular_ontop = input("Do you want to graph the originally selected data on top of each other? (y / n)")
try:
if (regular_ontop[0] == "y"
or regular_ontop[0] == "Y"
or regular_ontop[0] == "t"
or regular_ontop[0] == "T"):
regular_ontop = True
return regular_ontop
elif (regular_ontop[0] == "n"
or regular_ontop[0] == "N"
or regular_ontop[0] == "f"
or regular_ontop[0] == "F"):
regular_ontop = False
return regular_ontop
else:
print("Invalid input, try again!")
regular_graphing_method()
except IndexError:
print("Invalid input, try again!")
regular_graphing_method()
|
75cb8de45df76cb6facf91ac0ae6e0ca9f37cf31
| 11,629
|
def div_to_int(int_numerator, int_denominator, round_away=False):
"""Integer division with truncation or rounding away.
If round_away evaluates as True, the quotient is rounded away from
zero, otherwise the quotient is truncated.
"""
is_numerator_negative = False
if int_numerator < 0:
int_numerator = -int_numerator
is_numerator_negative = True
is_denominator_negative = False
if int_denominator < 0:
int_denominator = - int_denominator
is_denominator_negative = True
quotient, remainder = divmod(int_numerator, int_denominator)
if round_away and remainder > 0:
quotient += 1
if is_numerator_negative != is_denominator_negative:
quotient = -quotient
return quotient
|
3fc3b307cf691a9bea2a33a0bf468398c77db645
| 11,630
|
import os
def dict_to_dir(data, path=str()):
"""dict_to_dir expects data to be a dictionary with one top-level key."""
dest = os.path.join(os.getcwd(), path)
if isinstance(data, dict):
for k, v in data.items():
os.makedirs(os.path.join(dest, k))
dict_to_dir(v, os.path.join(path, str(k)))
elif isinstance(data, list):
for i in data:
if isinstance(i, dict):
dict_to_dir(i, path)
else:
filename = i.split(' > ')[0]
content = i.split(' > ')[1]
with open(os.path.join(dest, filename), "a"):
os.utime(os.path.join(dest, filename), None)
with open(os.path.join(dest, filename), "wb") as outFile:
outFile.write(content.encode())
if isinstance(data, dict):
return list(data.keys())[0]
|
afb173d4963691033fe95fa7e5711a56ca90e139
| 11,631
|
import re
def get_pids(linesl):
"""
Required layout lines.
Returns dictionary of pid:meaning
"""
pids = {}
foundstartpids = False
for line in linesl:
# After table
if "These PIDs are superseded by PIDs 194, 195, and 196." in line:
break
# Before table
if not foundstartpids and " TABLE 2 - PARAMETER IDENTIFICATION" in line:
foundstartpids = True
elif foundstartpids:
if re.match(' +[0-9]+[^\.]',line):
garbage,pid,meaning = re.split(" {9,}",line)
pids[pid.strip()] = meaning.strip()
return pids
|
1216b337283851ceb0e6721abd124b2a83a0157c
| 11,632
|
def match_server_args_factory(tick_rate: int, realtime: bool, observations_only: bool, env_config_string: str):
""" Helper factory to make a argument dictionary for servers with varying ports """
def match_server_args(port):
arg_dict = {
"tick_rate": tick_rate,
"port": port,
"realtime": realtime,
"observations_only": observations_only,
"config": env_config_string
}
return arg_dict
return match_server_args
|
5059d0a4224067f485455a54f4e6d1f83ba68531
| 11,633
|
from typing import Iterable
from typing import Any
def parametrize(arg_names: Iterable[str], arg_values: Iterable[Iterable[Any]]):
"""
Decorator to create parameterized tests.
# Parameters
arg_names : `Iterable[str]`, required.
Argument names to pass to the test function.
arg_values : `Iterable[Iterable[Any]]`, required.
Iterable of values to pass to each of the args.
The decorated test will be run for each inner iterable.
"""
def decorator(func):
def wrapper(*args, **kwargs):
for arg_value in arg_values:
kwargs_extra = {name: value for name, value in zip(arg_names, arg_value)}
func(*args, **kwargs, **kwargs_extra)
return wrapper
return decorator
|
e385d2449a2572d89a055d7cbdcc2475b00b2942
| 11,634
|
def default_colors_to_resets(s):
"""Hack to make sphinxcontrib.ansi recognized sequences"""
return s.replace(b"[39m", b"[0m").replace(b"[49m", b"[0m")
|
0f025233ac63416fef47e85ebb258bb543a4c115
| 11,635
|
from typing import List
import argparse
def get_input() -> List[int]:
"""
Parse arguments passed to script.
Return:
Path
path to gziped file for this problem.
"""
parser = argparse.ArgumentParser()
msg = "comma separated numbers, i.e '0,3,6'"
parser.add_argument("sequence", help=msg)
args = parser.parse_args()
return list(map(int, args.sequence.split(",")))
|
dba07be45f6b111cd89f7791769461864185ca9d
| 11,636
|
def mask_maxipix(mydata, mymask):
"""Mask the Maxipix frames."""
mydata[:, 255:261] = 0
mydata[255:261, :] = 0
mymask[:, 255:261] = 1
mymask[255:261, :] = 1
return mydata, mymask
|
0c508c6740569e99cdb745b75b399dac8021e9bf
| 11,637
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.