content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import torch
def D_power_X(graph, X, power):
"""Y = D^{power}X"""
degs = graph.ndata["deg"]
norm = torch.pow(degs, power)
Y = X * norm.view(X.size(0), 1)
return Y | b3d821024ba028d335fc37adc8abce588d6bd0f3 | 43,404 |
def cumulttfrom(calendarMoments=['Sowing'],
calendarCumuls=[0.0],
switchMaize=0,
cumulTT=8.0):
"""
CumulTTFrom Model
Author: Pierre Martre
Reference: Modeling development phase in the
Wheat Simulation Model SiriusQuality.
See documentation at http://www1.clermont.inra.fr/siriusquality/?page_id=427
Institution: INRA Montpellier
Abstract: Calculate CumulTT
"""
cumulTTFromZC_65 = 0
cumulTTFromZC_39 = 0
cumulTTFromZC_91 = 0
if "Anthesis" in calendarMoments:
if (switchMaize == 0): cumulTTFromZC_65 = cumulTT-calendarCumuls[calendarMoments.index("Anthesis")]
if "FlagLeafLiguleJustVisible" in calendarMoments:
if (switchMaize == 0): cumulTTFromZC_39 = cumulTT-calendarCumuls[calendarMoments.index("FlagLeafLiguleJustVisible")]
if "EndGrainFilling"in calendarMoments:
if (switchMaize == 0): cumulTTFromZC_91 = cumulTT-calendarCumuls[calendarMoments.index("FlagLeafLiguleJustVisible")]
return cumulTTFromZC_65, cumulTTFromZC_39, cumulTTFromZC_91 | 0afc9279faafeac349a8953c45d2c4bd9cad5418 | 43,405 |
def generate_omim_list(omimfile):
"""
Generate dictionary of genotype id-name key-value pairs.
:omimfile: path to mimTitles.txt
"""
omimlist = {}
with open(omimfile) as file:
for line in file:
# Clean up lines and pick first occurrence of titles
lineparts = line.strip().split('\t')
genotype_id = 'OMIM:' + lineparts[1]
genotype_name = lineparts[2].split(';')[0]
omimlist[genotype_id] = genotype_name
return omimlist | aa3bff68005fbd771f2c942c6e15d11c6afd6913 | 43,406 |
def _find_only_cwl_commit(client, path, revision='HEAD'):
"""Find the most recent isolated commit of a cwl file.
Commits with multiple cwl files are disregarded
"""
file_commits = list(
client.repo.iter_commits(revision, paths=path, full_history=True)
)
for commit in file_commits:
cwl_files = [
f for f in commit.stats.files.keys()
if f.startswith(client.cwl_prefix) and path.endswith('.cwl')
]
if len(cwl_files) == 1:
return commit
raise ValueError(
"Couldn't find a previous commit for path {}".format(path)
) | 6a7800d526ee8bbaaa3841b2ee4ebb485fbc1918 | 43,408 |
def data_cleaning(post: dict):
"""
Functionality to clean up data and pass back only desired fields
:param post: dict content and metadata of a reddit post
:return:
tuple of fields for insertion into database
"""
unwanted_authors = ['[deleted]', '[removed]', 'automoderator']
# skip posts from undesirable authors or posts to personal subreddits
if (post['author'] in unwanted_authors) or (post['subreddit_name_prefixed'].startswith('u/')):
return None
# replace empty string with placeholder for posts with no body content
if post['selftext'] == '':
post['selftext'] = "[NO TEXT]"
# only a subset of the fields are being saved to the db
# adjust the table schema and add fields here if desired
author = post['author'].lower().strip()
if post['author_flair_text']:
author_flair_text = post['author_flair_text'].lower().strip()
else:
author_flair_text = "none"
if post['link_flair_text']:
post_flair_text = post['link_flair_text'].lower().strip()
else:
post_flair_text = "none"
created_utc = post['created_utc']
reddit_id = f"t3_{post['id']}"
num_comments = post['num_comments']
nsfw = post['over_18']
score = post['score']
text = post['selftext']
subreddit = post['subreddit'].lower().strip()
title = post['title']
total_awards_received = post['total_awards_received']
return (author, author_flair_text, post_flair_text, created_utc, reddit_id, num_comments,
nsfw, score, text, subreddit, title, total_awards_received) | bfe61ac3689a21731e8aaa194df02c545ed8f840 | 43,409 |
def coords_to_simbad(ra, dec, search_radius):
"""
Get SIMBAD search url for objects within search_radius of ra, dec coordinates.
Args:
ra (float): right ascension in degrees
dec (float): declination in degrees
search_radius (float): search radius around ra, dec in arcseconds
Returns:
(str): SIMBAD database search url for objects at ra, dec
"""
return 'http://simbad.u-strasbg.fr/simbad/sim-coo?Coord={0}+%09{1}&CooFrame=FK5&CooEpoch=2000&CooEqui=2000&CooDefinedFrames=none&Radius={2}&Radius.unit=arcmin&submit=submit+query&CoordList='.format(ra, dec, search_radius) | ced846c6962368c937fb2005b59ea5e0b6dc9afd | 43,410 |
def rgb2psychorgb(rgbVal):
""" Takes a tuple rgbVal on scale
from 0 to 255 and returns a tuple
along the scale of -1 to 1
(with 0 being gray)
:type rgbVal: tuple
:param rgbVal: tuple of r,g,b values
:raises: N/A
:rtype: tuple
"""
return tuple((x - 127.5) / 127.5 for index, x in enumerate(rgbVal)) | 2d691e917df920e6f639ba897eee3bc2219c4841 | 43,411 |
def ex_data_err():
"""Data format error."""
return b"SPAMD/1.5 65 EX_DATAERR\r\n\r\n" | 067c5a7e03887c73ada66d2dd0dc310511c7804f | 43,413 |
from typing import Optional
def binstr(num: int, width: Optional[int] = None) -> str:
""" Returns the binary representation of an integer.
Parameters
----------
num : int
The number to convert.
width: int, optional
Minimum number of digits used. The default is the global value `BITS`.
Returns
-------
binstr : str
"""
fill = width or 0
return f"{num:0{fill}b}" | 9f842ad69b7a1cde2ccfb8c31ed806b4a959c766 | 43,414 |
def usub(x):
"""Implement `isub`."""
return -x | bf9f7458da31c9dcb6ca501086a2944eb10361c5 | 43,415 |
def get_blank_acls():
"""
Return a blank set of ACLs, with nothing permitted.
"""
acls = {}
acls['v4'] = {}
acls['v6'] = {}
acls['v4']['inbound_default'] = "deny"
acls['v4']['outbound_default'] = "deny"
acls['v4']['inbound'] = []
acls['v4']['outbound'] = []
acls['v6']['inbound_default'] = "deny"
acls['v6']['outbound_default'] = "deny"
acls['v6']['inbound'] = []
acls['v6']['outbound'] = []
return acls | 26f82584d9a8d95b5bb6856ca84f970904b9faf4 | 43,416 |
def _simplify_method_name(method):
"""Simplifies a gRPC method name.
When gRPC invokes the channel to create a callable, it gives a full
method name like "/google.pubsub.v1.Publisher/CreateTopic". This
returns just the name of the method, in this case "CreateTopic".
Args:
method (str): The name of the method.
Returns:
str: The simplified name of the method.
"""
return method.rsplit('/', 1).pop() | 44b037bcfb7549fe25a5314d1a90750e2dfed52e | 43,417 |
def _compare_metadata_dicts(first_dict, second_dict):
"""Compares dictionaries created by `file_name_to_metadata`.
:param first_dict: First dictionary.
:param second_dict: Second dictionary.
:return: are_dicts_equal: Boolean flag.
"""
first_keys = list(first_dict.keys())
second_keys = list(second_dict.keys())
if set(first_keys) != set(second_keys):
return False
for this_key in first_keys:
if first_dict[this_key] is None and second_dict[this_key] is not None:
return False
if second_dict[this_key] is None and first_dict[this_key] is not None:
return False
if first_dict[this_key] is None:
continue
if first_dict[this_key] != second_dict[this_key]:
return False
return True | c96a63b5c84c3173c43b6c8062854f313f3b9037 | 43,418 |
import os
def generateNameS(name, sample):
"""Return a filename based on the name and sample number passed in.
The resulting name contains a directory and a filename part. The
sample number is used as the directory name and the name is used as
the filename.
The sample number normally ranges from [1, nrSamples].
See also: generateNameT(), generateNameST()
"""
return os.path.join("%d" % (sample), name) | 5d9c65ee7d6b41cf20798ea4ee19c38d503b80da | 43,419 |
def decode(scores, geometry, confidenceThreshold):
"""
Decodes found rects according to confidence threshold
"""
(numRows, numCols) = scores.shape[2:4]
confidences = []
rects = []
baggage = []
for y in range(0, numRows):
scoresData = scores[0, 0, y]
dTop = geometry[0, 0, y]
dRight = geometry[0, 1, y]
dBottom = geometry[0, 2, y]
dLeft = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
for x in range(0, numCols):
if scoresData[x] < confidenceThreshold:
continue
confidences.append(float(scoresData[x]))
(offsetX, offsetY) = (x * 4.0, y * 4.0)
angle = anglesData[x]
upperRight = (offsetX + dRight[x], offsetY - dTop[x])
lowerRight = (offsetX + dRight[x], offsetY + dBottom[x])
upperLeft = (offsetX - dLeft[x], offsetY - dTop[x])
lowerLeft = (offsetX - dLeft[x], offsetY + dBottom[x])
rects.append([
int(upperLeft[0]), # x
int(upperLeft[1]), # y
int(lowerRight[0] - upperLeft[0]), # w
int(lowerRight[1] - upperLeft[1]) # h
])
baggage.append({
"offset": (offsetX, offsetY),
"angle": angle,
"upperRight": upperRight,
"lowerRight": lowerRight,
"upperLeft": upperLeft,
"lowerLeft": lowerLeft,
"dTop": dTop[x],
"dRight": dRight[x],
"dBottom": dBottom[x],
"dLeft": dLeft[x]
})
return (rects, confidences, baggage) | a51e1cf5c35655e664e0cd33a5889456f0b59b9b | 43,420 |
def tag_to_table(html_tag):
"""
turns a table of html tags to a table of data
:param html_tag: bs4.element.Tag
:return: table: nested list
"""
html_tag = html_tag.findAll('tr')
# table format: date, time, header, link
table = [['date', 'time', 'header', 'link']]
pivot_date = ''
for i, table_row in enumerate(html_tag):
# Read the text of the element 'td' into 'data_text'; date/time
td_text = table_row.td.text
# Read the text of the element 'a' into 'link_text'; text
a_text = table_row.a.text
# Read link from a; link
link_text = table_row.a['href']
# Print the contents of 'link_text' and 'data_text'
temp = td_text.split()
time = ''
if len(temp) == 2:
pivot_date = temp[0]
time = temp[1]
else:
time = temp[0]
table.append([pivot_date, time, a_text, link_text])
return table | c198e345dcb6a77dae9a88674a47ca517ecf7a03 | 43,421 |
def get_radosgw_username(r_id):
"""Generate a username based on a relation id"""
gw_user = 'juju-' + r_id.replace(":", "-")
return gw_user | 347ff532464ef568daf753e820fef842084fe753 | 43,422 |
def read_corpus(corpus_file, binary):
"""Read input document and return the textual reviews and the sentiment or genre.
:param corpus_file: newlime delimited file with a review on each line
:type corpus_file: .txt file
:param binary: flag for binary classification task
:type binary: bool
:rtype: (list, list)
:return: reviews, classes
"""
documents = []
labels = []
with open(corpus_file, 'r', encoding='utf-8') as f:
for line in f:
tokens = line.strip().split()
documents.append(tokens[3:])
if binary:
# 2-class problem: positive vs negative
labels.append(tokens[1])
else:
# 6-class problem: books, camera, dvd, health, music, software
labels.append(tokens[0])
return documents, labels | c231547b2c032fab9d2c40ff8dd201d70bfe80e2 | 43,423 |
def path_in_tree(root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
def recursion_core(node, sum):
"""
:param node:subtree root
:param sum: sub sum
:return: [path(list)] if has path else []
"""
if not node.left and not node.right:
subpath = [[]] if sum == node.val else []
else:
subpath_l = recursion_core(node.left, sum - node.val) if node.left else []
subpath_r = recursion_core(node.right, sum - node.val) if node.right else []
subpath = subpath_l + subpath_r
return [[node.val] + path for path in subpath]
if not root: return []
return print(recursion_core(root, sum)) | 79eacee896b81cc911bba83e21bd66579280a168 | 43,424 |
import argparse
import sys
def parse_args():
"""
Parsing the input parameters.
"""
parser = argparse.ArgumentParser("Training for lexical analyzer.")
parser.add_argument(
"--traindata_dir",
type=str,
default="data/train_data",
help="The folder where the training data is located.")
parser.add_argument(
"--testdata_dir",
type=str,
default="data/test_data",
help="The folder where the training data is located.")
parser.add_argument(
"--model_save_dir",
type=str,
default="./models",
help="The model will be saved in this path.")
parser.add_argument(
"--save_model_per_batchs",
type=int,
default=1000,
help="Save the model once per xxxx batch of training")
parser.add_argument(
"--eval_window",
type=int,
default=20,
help="Training will be suspended when the evaluation indicators on the validation set" \
" no longer increase. The eval_window specifies the scope of the evaluation.")
parser.add_argument(
"--batch_size",
type=int,
default=32,
help="The number of sequences contained in a mini-batch, or the maximum" \
"number of tokens (include paddings) contained in a mini-batch.")
parser.add_argument(
"--corpus_type_list",
type=str,
default=["human", "feed", "query", "title", "news"],
nargs='+',
help="The pattern list of different types of corpus used in training.")
parser.add_argument(
"--corpus_proportion_list",
type=float,
default=[0.2, 0.2, 0.2, 0.2, 0.2],
nargs='+',
help="The proportion list of different types of corpus used in training."
)
parser.add_argument(
"--use_gpu",
type=int,
default=False,
help="Whether or not to use GPU. 0-->CPU 1-->GPU")
parser.add_argument(
"--traindata_shuffle_buffer",
type=int,
default=200000,
help="The buffer size used in shuffle the training data.")
parser.add_argument(
"--word_emb_dim",
type=int,
default=128,
help="The dimension in which a word is embedded.")
parser.add_argument(
"--grnn_hidden_dim",
type=int,
default=256,
help="The number of hidden nodes in the GRNN layer.")
parser.add_argument(
"--bigru_num",
type=int,
default=2,
help="The number of bi_gru layers in the network.")
parser.add_argument(
"--base_learning_rate",
type=float,
default=1e-3,
help="The basic learning rate that affects the entire network.")
parser.add_argument(
"--emb_learning_rate",
type=float,
default=5,
help="The real learning rate of the embedding layer will be" \
" (emb_learning_rate * base_learning_rate)."
)
parser.add_argument(
"--crf_learning_rate",
type=float,
default=0.2,
help="The real learning rate of the embedding layer will be" \
" (crf_learning_rate * base_learning_rate)."
)
parser.add_argument(
"--word_dict_path",
type=str,
default="../data/vocabulary_min5k.txt",
help="The path of the word dictionary.")
parser.add_argument(
"--label_dict_path",
type=str,
default="data/tag.dic",
help="The path of the label dictionary.")
parser.add_argument(
"--word_rep_dict_path",
type=str,
default="conf/q2b.dic",
help="The path of the word replacement Dictionary.")
parser.add_argument(
"--num_iterations",
type=int,
default=40000,
help="The maximum number of iterations. If set to 0 (default), do not limit the number."
)
#add elmo args
parser.add_argument(
"--elmo_l2_coef",
type=float,
default=0.001,
help="Weight decay. (default: %(default)f)")
parser.add_argument(
"--elmo_dict_dir",
default='data/vocabulary_min5k.txt',
help="If set, load elmo dict.")
parser.add_argument(
'--pretrain_elmo_model_path',
default="data/baike_elmo_checkpoint",
help="If set, load elmo checkpoint.")
args = parser.parse_args()
if len(args.corpus_proportion_list) != len(args.corpus_type_list):
sys.stderr.write(
"The length of corpus_proportion_list should be equal to the length of corpus_type_list.\n"
)
exit(-1)
return args | 47fed51841fba48cd8fba84aa373d4f1e2a415e4 | 43,425 |
def start_vm(client, resource_group_name, vm_name):
"""
Start a VMware virtual machine.
"""
return client.start(resource_group_name, vm_name) | b9ff6fdd9e69f51783585a527fe182e4ec8ea152 | 43,426 |
def circuit_to_bench(c):
"""
Generates a str of Bench code from a `CircuitGraph`.
Parameters
----------
c: Circuit
the circuit to turn into Bench.
Returns
-------
str
Bench code.
"""
inputs = []
outputs = []
insts = []
if c.blackboxes:
raise ValueError(f"Bench format does not support blackboxes: {c.name}")
# gates
const_inp = c.inputs().pop()
for n in c.nodes():
if c.type(n) in ["xor", "xnor", "buf", "not", "nor", "or", "and", "nand"]:
fanin = ", ".join(c.fanin(n))
insts.append(f"{n} = {c.type(n).upper()}({fanin})")
elif c.type(n) in ["0"]:
insts.append(f"{n} = XOR({const_inp}, {const_inp})")
elif c.type(n) in ["1"]:
insts.append(f"{n} = XNOR({const_inp}, {const_inp})")
elif c.type(n) in ["input"]:
inputs.append(n)
elif c.type(n) in ["output"]:
fanin = c.fanin(n).pop()
insts.append(f"{n} = BUF({fanin})")
outputs.append(n)
else:
raise ValueError(f"unknown gate type: {c.type(n)}")
bench = f"# {c.name}\n"
bench += "".join(f"INPUT({inp})\n" for inp in inputs)
bench += "\n"
bench += "".join(f"OUTPUT({out};)\n" for out in outputs)
bench += "\n"
bench += "\n".join(insts)
return bench | e97caa3b40f40ff0637dec825e1921d1a7c19e86 | 43,427 |
def prefix_filt(d, prefix):
"""return all items in dictionary d with key with given prefix."""
match_list = []
for k in d.keys():
if isinstance(k, str):
if k.startswith(prefix):
match_list.append(k)
return dict([(k, d[k]) for k in match_list]) | 5da37ca9ee7db78959e2cd8cd67cc945b941c43e | 43,428 |
import argparse
def _parse_args():
"""Parse the CLI arguments for use by yades-smtp."""
parser = argparse.ArgumentParser(description="")
parser.add_argument(
'--host', type=str, default='localhost',
help='Host to listen on (defaults to localhost)'
)
parser.add_argument(
'--port', type=int, default=25,
help='Port to listen on (defaults to 25)'
)
parser.add_argument(
'--db-uri', type=str, default='mongodb://localhost:27017',
help='Uri of mongodb server'
)
parser.add_argument(
'--db-name', type=str, default='yades',
help='Name of mongodb database'
)
parser.add_argument(
'--collect-statistic', action='store_true',
help='Save statistic about receiving emails'
)
parser.add_argument(
'--emails-count-limit', type=int, default=0,
help='Limit the number of emails for separate mailbox'
)
return parser.parse_args() | d756ca4315bfc16209a8439d1f86e42f5b111e3e | 43,430 |
def convert_impressions_data_for_graph(data):
"""
Converts the output of get_daily_impressions_data to a format useful
in Highcharts
"""
# data arrives as a list of lists, each sublist having 6 elements
# way too much code repetition here
column_names = ['Date', "Impressions", "Clicks", "CTR", "Pins", "Blocks"]
list_of_dates = ["Date.UTC({0}, {1}, {2})".format(
x[0].year, x[0].month-1, x[0].day) for x in data]
js_data = [[x, []] for x in column_names if x != 'Date']
for row_index, row in enumerate(data):
for n, cell in enumerate(row[1:-1]): # ignore date
if (type(cell) == str) and cell.endswith("%"):
cell = cell[:-1]
js_data[n][1].append([list_of_dates[row_index], cell])
return js_data | 774b5ee832a23682a57f0d11e429b6b6003d3864 | 43,431 |
def compose(*fns):
"""
Returns a function that evaluates it all
"""
def comp(*args, **kwargs):
v = fns[0](*args, **kwargs)
for fn in fns[1:]:
v = fn(v)
return v
return comp | 5b27da3568b620d8ac2cb2896dd930e3924f7a47 | 43,432 |
def from_wsgi_header(header):
"""Convert a WSGI compliant HTTP header into the original header.
See https://www.python.org/dev/peps/pep-3333/#environ-variables for
information from the spec.
"""
HTTP_PREFIX = "HTTP_"
# PEP 333 gives two headers which aren't prepended with HTTP_.
UNPREFIXED_HEADERS = {"CONTENT_TYPE", "CONTENT_LENGTH"}
if header.startswith(HTTP_PREFIX):
header = header[len(HTTP_PREFIX) :]
elif header not in UNPREFIXED_HEADERS:
return None
return header.replace("_", "-").title() | 7b3b82aaf9c2fc5d6da9428b1f301b996761ca15 | 43,433 |
def unique(S, start, stop):
"""Return True if there are no duplicate elements in slice S[start: stop]"""
print("start = {}, stop = {}".format(start, stop))
if stop - start <= 1: # at most 1 item
return True
elif not unique(S, start, stop-1): # first part has duplicate
print("checking uniqueness in (S, {}, {})".format(start, stop-1))
return False
elif not unique(S, start+1, stop): # second part has duplicate
print("checking uniqueness in (S, {}, {})".format(start+1, stop))
return False
else:
print("Check uniqueness of 1st and last element for start = {} and stop = {}"
.format(start, stop)) # do first and last differ
return S[start] != S[stop] # do first and last differ | daf738f83cb7ccc1b33978a25e022638bbfc63fe | 43,434 |
def split(string,keys):
"""
string :: 需要分割的字符
keys :: 分割关键字 eg. keys = [' ',',','"','.','(',')']
return :: 分割后的字符串数组
"""
out_strings = []
cnt = 0
for i in range(len(string)):
if string[i] in keys:
if cnt != i:
out_strings.append(string[cnt:i])
cnt = i+1
return out_strings | d5342606ded40512b185653a9593b0166cd3edff | 43,436 |
def sort_gtf(giter, chrom_order=None):
""" Sort GTF
Sort GTF records by chromosome and start position
Args:
giter (iterable): List of GTFLine or GTFCluster objects
chrom_order (list): Chromosome sort order. Default is alphabetical.
Returns:
list: Sorted GTF records.
"""
ret = sorted(giter, key=lambda x:x.start)
if chrom_order is None:
ret.sort(key=lambda x:x.chrom)
else:
c_d = {k:i for i,k in enumerate(chrom_order)}
ret.sort(key=lambda x:c_d[x.chrom] if x.chrom in c_d else x.chrom)
return ret | 60f8edbeedd5f60d55d8d43affa0f8e02bfcfe3f | 43,437 |
def _buildElementNsmap(using_elements):
"""
build a namespace map for an ADMX element
"""
thisMap = {}
for e in using_elements:
thisMap[e.attrib["prefix"]] = e.attrib["namespace"]
return thisMap | dbb9b9c39c74217a97ebc09f15af1016f5c48102 | 43,438 |
def f_polinomio(coef):
""" Funcion que genera una funcion polinomial de una
variable dados sus coeficientes
_Doctest
>>> (f_polinomio([1,0,0]))(2)
4
>>> (f_polinomio([1,0]))(123)
123
"""
grado = len(coef)-1
def f(x):
return sum([coef[n]*x**(grado-n) for n in range(grado+1)])
return f | 6512877ffd42be2a301e979e955d638c88e1d7d7 | 43,439 |
def cap_str(line):
"""
capitalize a string
:param str line:
:return str:
"""
return line.upper() | 119a7637c57f3e8ee6481897cc46cd0f9c177b0f | 43,440 |
import logging
import sys
import os
def check_profile_config(config):
"""
Validation checks on the profile config. At least one profile must exist
(otherwise exit) and the paths for each profile should exist, otherwise the
profile entry is removed.
Args:
config (ConfigObj): The ConfigObj instance.
"""
# Check that at least one profile exists
if 'profiles' not in config:
logging.error('There are currently no profiles. Use "sm_profile '
'-c <profile>" to create one.')
sys.exit(1)
# Check that the paths for each profile exist
for profile in config['profiles'].keys():
data_exists = os.path.isdir(config['profiles'][profile]['data_path'])
delete_profile = False
if not data_exists:
logging.warn('Data path for profile %s does not exist.' % profile)
delete_profile = True
install_exists = os.path.isdir(
config['profiles'][profile]['install_path'])
if not install_exists:
logging.warn(
'Install path for profile %s does not exist.' % profile)
delete_profile = True
if delete_profile:
logging.warn(' Deleting profile %s.' % profile)
del config['profiles'][profile]
config.write()
return config | e2cfa3fef917b98462e32ad718f4bc883b5ac93b | 43,441 |
def calculate_freq(idx):
"""
Estimate the sampling frequency of a pandas datetime index
"""
cfreq = (idx.max()-idx.min())/(len(idx)-1)
cfreq = cfreq.seconds/60
print("Calculated frequency is " + "{:5.3f}".format(cfreq) + " minutes")
print("Rounding to " + str(round(cfreq)) + 'min')
return str(round(cfreq)) + "min" | e78018a3d7f580e89fd7121d3c07d38acb117552 | 43,443 |
def get_dx(grid, data, axis):
"""Figure out the correct hfac given array dimensions."""
dx = None
if axis == 'X':
if 'i' in data.dims and 'j' in data.dims and 'dxG' in grid._ds:
dx = grid.interp(grid._ds.dxG, 'Y')
# Is this right or is there a different dxC for the vorticity cell?
if 'i' in data.dims and 'j_g' in data.dims and 'dxG' in grid._ds:
dx = grid._ds.dxG
if 'i_g' in data.dims and 'j' in data.dims and 'dxC' in grid._ds:
dx = grid._ds.dxC
# Is this right or is there a different dxC for the vorticity cell?
if 'i_g' in data.dims and 'j_g' in data.dims and 'dxC' in grid._ds:
dx = grid.interp(grid._ds.dxC, 'Y')
elif axis == 'Y':
if 'i' in data.dims and 'j' in data.dims and 'dyG' in grid._ds:
dx = grid.interp(grid._ds.dyG, 'X')
# Is this right or is there a different dxC for the vorticity cell?
if 'i_g' in data.dims and 'j' in data.dims and 'dyG' in grid._ds:
dx = grid._ds.dyG
if 'i' in data.dims and 'j_g' in data.dims and 'dyC' in grid._ds:
dx = grid._ds.dyC
# Is this right or is there a different dxC for the vorticity cell?
if 'i_g' in data.dims and 'j_g' in data.dims and 'dyC' in grid._ds:
dx = grid.interp(grid._ds.dyC, 'X')
return dx | 0752dc0bcb7f4701d8c4af023b533b5be05d2c6a | 43,445 |
from io import StringIO
def ase_to_xyz(atoms, comment="", file=True):
"""Convert ASE to xyz
This function is useful to save xyz to DataFrame.
"""
xyz = StringIO()
symbols = atoms.get_chemical_symbols()
natoms = len(symbols)
xyz.write("%d\n%s\n" % (natoms, comment))
for s, (x, y, z) in zip(symbols, atoms.positions):
xyz.write("%-2s %22.15f %22.15f %22.15f\n" % (s, x, y, z))
if file:
return xyz
else:
return xyz.getvalue() | dcacdb5550c1cea2706190912556d84acae0094d | 43,446 |
def config_remove_oif(existing, existing_oif_prefix_source):
"""Generates command list to remove a static-oif configuration
for an Ansible module
Args:
oif_prefix (str): IP multicast prefix
oif_source (str): IP source
oif_routemap (str): name of routemap to use for filtering
found_both: (bool): Flag from ansible mod stating if a prefix
and source are changing
found_prefix: (bool): Flag from ansible mod stating if only
a prefix is changing
Returns:
List
"""
commands = []
command = None
if existing.get('routemap'):
command = 'no ip igmp static-oif route-map {0}'.format(
existing.get('routemap'))
if existing_oif_prefix_source:
for each in existing_oif_prefix_source:
if each.get('prefix') and each.get('source'):
command = 'no ip igmp static-oif {0} source {1} '.format(
each.get('prefix'), each.get('source')
)
elif each.get('prefix'):
command = 'no ip igmp static-oif {0}'.format(
each.get('prefix')
)
if command:
commands.append(command)
command = None
return commands | 0ea5337ff7b6ccc78f6431508d5c462a42f14996 | 43,447 |
import re
def read_gtf(file_name, query_gene_name):
"""Given a GTF file and a gene name to query for,
return a dictionary where the keys are transcript IDs
and the values are arrays of start and end offsets of the
exons present in that transcript, e.g.,
{ 'tr1' : [[10,12],[17,27]] }"""
def read_gtf_keyvalues(keyvaluestr):
parts = keyvaluestr.split(";")
for keyvalue in parts:
m = re.match(r'\s*(\S+)\s*"(\S+)"', keyvalue)
if m:
yield (m.group(1), m.group(2))
matching_transcripts = {}
with open(file_name) as f:
for line in f:
parts = re.split(r"\s", line, maxsplit=8)
if parts[2] in ["exon", "CDS", "UTR"]:
gene_name, transcript_id = "", ""
for k, v in read_gtf_keyvalues(parts[8]):
if k == "gene_name":
gene_name = v
elif k == "transcript_id":
transcript_id = v
if gene_name == query_gene_name:
if transcript_id not in matching_transcripts:
matching_transcripts[transcript_id] = {
"exons": [],
"CDSs": [],
"UTRs": [],
}
start_and_end_offset = (int(parts[3]), int(parts[4]))
if parts[2] == "exon":
matching_transcripts[transcript_id]["exons"].append(
start_and_end_offset
)
elif parts[2] == "CDS":
matching_transcripts[transcript_id]["CDSs"].append(
start_and_end_offset
)
elif parts[2] == "UTR":
matching_transcripts[transcript_id]["UTRs"].append(
start_and_end_offset
)
return matching_transcripts | f66448b52a5cd73b25de606c3a22a6e58ee4cadd | 43,448 |
def _clean_name(net, name):
""" Clear prefix and some suffixes for name """
# prefix = net._prefix
# name = name.replace(prefix, "")
if name.endswith("_fwd_output"):
name = name[:-len("_fwd_output")]
elif name.endswith("_fwd"):
name = name[:-len("_fwd")]
elif name.endswith("_output"):
name = name[:-len("_output")]
return name | 5be69e1b073f7da970dcf784cb1c37382231e418 | 43,449 |
import os
def _GetPackageDir(archive_path):
"""Get package directory."""
return os.path.join(archive_path, 'infra_go_packages') | 07f688dccb7b1925b453742b4f010705b2ae8a57 | 43,450 |
def stack_deck(deck_size, cards, *_):
"""Make new stack from deck."""
return [(-card + change) % deck_size
for card, change in zip(cards, (0, -1))] | d00890bf7e6cadf77b3ac82e1b5e91058ddf6264 | 43,451 |
import csv
def get_receivers():
"""
Return a list of receivers here
"""
with open("receivers.csv") as fin:
reader = csv.reader(fin)
receivers = [row[0] for row in reader]
return receivers | 31a96cbb60d7994376b547198aca94409c84aa89 | 43,452 |
def zfp_expert_opts(minbits, maxbits, maxprec, minexp):
"""Create compression options for ZFP in "expert" mode
See the ZFP docs for the meaning of the parameters.
"""
zfp_mode_expert = 4
return zfp_mode_expert, 0, minbits, maxbits, maxprec, minexp | 11bdfddd1ff2a75e62f417deab8867d2e71b69b7 | 43,453 |
def check_file(_3DVIEWER_FILE):
""" This checks if file has already been fixed with this program. """
try:
with open(_3DVIEWER_FILE, 'r') as _3dfile:
lines = _3dfile.readlines()
for line in lines:
if "OriginalDendrite=" in line:
print(_3DVIEWER_FILE, "is not orginal file from neuromorpho 3Dviewer.")
return False
return True
except:
print("There was a problem opening 3Dviewer file.")
return False | 87257d0c5840b81d7518702c21e01f25776f2d35 | 43,454 |
import json
def cleaneddb2vg(meta_file):
"""transfer corrupted-removed db indices (108073) to vg indexes"""
dbidx2vgidx = {}
image_meta = json.load(open(meta_file))
corrupted_ims = ['1592.jpg', '1722.jpg', '4616.jpg', '4617.jpg']
keep = []
for i, item in enumerate(image_meta):
basename = str(item['image_id']) + '.jpg'
if basename in corrupted_ims:
continue
keep.append(item)
for i, item in enumerate(keep):
dbidx2vgidx[i] = item['image_id']
vgidx2dbidx = {dbidx2vgidx[i]: i for i in dbidx2vgidx}
return dbidx2vgidx, vgidx2dbidx | 716d3edda462ba4757ffca3a723474ffb90440b2 | 43,455 |
import numpy
def norm(x):
"""
Calculate the Eucledian norm of a d-dimensional vector.
@param x: vector (i.e. rank one array)
@return: length of vector
"""
return numpy.linalg.norm(x) | 98aad74c802e8a6fe869204f9bfb8125e065ef95 | 43,456 |
import subprocess
def watcher_proc(pid=None):
"""
Simply encapsulate a shell call to pstree into python, the standard way.
:param pid:
:return:
"""
# : this is a synchronous call that waits for completion
return subprocess.check_call("xterm -e watch pstree -cap {0}".format(str(pid)), shell=True) | 5d7a0e86a26677d641ef06b4f469874311e9405b | 43,458 |
def combinations(n, buttons):
"""
Известно в каком порядке были нажаты кнопки телефона, без учета повторов.
Напечатайте все комбинации букв,
которые можно набрать такой последовательностью нажатий.
"""
if n == '':
return ['']
new = []
# s = numbers[n[-1]]
for i in combinations(n[:-1:], buttons):
for x in buttons[n[-1]]:
new.append(i + x)
return new | 0559764497f016b2d7ba17394977302a24258817 | 43,459 |
import math
def check_is_number(arg_value, arg_type, arg_name=None, prim_name=None):
"""
Checks input value is float type or not.
Usage:
- number = check_is_number(number, int)
- number = check_is_number(number, int, "bias")
- number = check_is_number(number, int, "bias", "bias_class")
"""
prim_name = f'in \'{prim_name}\'' if prim_name else ''
arg_name = f'\'{prim_name}\'' if arg_name else 'Input value'
if isinstance(arg_value, arg_type) and not isinstance(arg_value, bool):
if math.isinf(arg_value) or math.isnan(arg_value):
raise ValueError(f'{arg_name} {prim_name} must be legal float, but got `{arg_value}`.')
return arg_value
raise TypeError(f'{arg_name} {prim_name} must be float, but got `{type(arg_value).__name__}`') | dd99ce682e9d3c4c3dc764e2d133c791a0fe7416 | 43,460 |
def cygwin_to_win_path(path):
"""
Converts a Cygwin path to a Windows path.
Only paths starting with "/cygdrive/" can be converted.
:param path: Cygwin path to convert.
Must be an absolute path.
:type path: str
:returns: Windows path.
:rtype: str
:raises ValueError: Cannot convert the path.
"""
if not path.startswith("/cygdrive/"):
raise ValueError(
"Only paths starting with \"/cygdrive/\" can be converted.")
drive = path[10].upper()
path = path[11:]
i = 0
r = []
while i < len(path):
c = path[i]
if c == "\\":
r.append( path[i+1:i+2] )
i += 2
continue
if c == "/":
c = "\\"
r.append(c)
i += 1
path = "".join(r)
return "%s:%s" % (drive, path) | 1d90424208b3c5b42627b1d9ac321d9e40341b1a | 43,461 |
import os
def generate_signature(value):
"""
generate _signature parameter
:param value:share_url id
:return:signature string
"""
cwd = os.path.dirname(__file__)
p = os.popen('cd %s && node signture.js %s' % (cwd, value))
return p.readlines()[0] | 09d3dfb97219283c5838970a15b94e903bd28fdb | 43,463 |
import re
def rmDigit_string(text):
"""membuang digit didalam string
:return: clean string from digit
:rtype: string
"""
return re.sub("\S*\d\S*", "", text).strip() | 5383bf2859e733fa1f079e7fceaba9e8f558fca0 | 43,464 |
import math
def extract_source_at_pos(MAP, x_pos, y_pos, PSF_npix_x, PSF_npix_y):
"""
Extract a sub-MAP around the position (x_pos, y_pos)
The shape of the sub-MAP is similar to the PSF_MAP
Parameters
----------
MAP : numpy masked array
The original MAP in which the source as to be extracted
x_pos : float.
The x (first dimension) position of the source
y_pos : float
The y (second dimension) position of the source
PSF_npix_x : integer
The x (fisrt dimension) size of the Point Spread Map
PSF_npix_y : integer
The y (second dimension) size of the Point Spread Map
Returns
-------
SCR_MAP : numpy masked array
The SouRCe MAP. A map of size (npix_x, npix_y) with a source
created according to the PSF_MAP at the position (x_pos, y_pos)
"""
#
cpix_x = int(math.floor(PSF_npix_x / 2))
cpix_y = int(math.floor(PSF_npix_y / 2))
# corner pixels
x_inf = int(round(x_pos)) - cpix_x; x_sup = x_inf + PSF_npix_x
y_inf = int(round(y_pos)) - cpix_y; y_sup = y_inf + PSF_npix_y
# extract map
SRC_MAP = MAP[x_inf:x_sup, y_inf:y_sup]
#
return SRC_MAP | 7a6549f5537336d467d403b4437128836927d141 | 43,465 |
def FoP_update(L, R, t0, t1, u, v):
"""
We update 'L' according to the link (t0,t1,u,v) and FoP properties.
:param R:
:param L:
:param t0:
:param t1:
:param u:
:param v:
:return:
"""
new_arrival = t0
# If v is already in L, we've reached v at t <= t0.
if v not in L:
L[v] = new_arrival
if v not in R:
R[v] = new_arrival
# elif t0 > L[v]:
# return False
else:
return False
return True | db06043b55511bfbe4af72b0d952a2218b4f0c3b | 43,466 |
def get_terminal_branch_lengths(Z, max_label):
""" Get terminal branch lengths of a linkage matrix """
L = [d for d, label in zip(Z[:,2], Z[:,0]) if label < max_label] + [d for d, label in zip(Z[:,2], Z[:,1]) if label < max_label]
return L | 62a883439cec3c2eaaee30d56b50f48f2dfccd84 | 43,467 |
def applyLatticeLimit(lattice, bounds):
"""Remove lattice points outside the data bounds. For 2D and 3D data.
Parameters
---------
lattice : ndarray; (N, 2) or (N, 3)
From lattice2D
bounds : tuple,
Minimum and maximum for axes 0 and 1
(min0, max0, min1, max1) or axes 0, 1 and 2
(min0, max0, min1, max1, min2, max2)
Returns
-------
: ndarray; (M, 2) or (M, 3)
Same as lattice input except only containing
points within the bounds specified. M <= N
"""
if len(bounds) == 4:
goodUVs = ((lattice[:, 0] > bounds[0]) & (lattice[:, 0] < bounds[1])) & (
(lattice[:, 1] > bounds[2]) & (lattice[:, 1] < bounds[3])
)
elif len(bounds) == 6:
goodUVs = (
((lattice[:, 0] > bounds[0]) & (lattice[:, 0] < bounds[1]))
& ((lattice[:, 1] > bounds[2]) & (lattice[:, 1] < bounds[3]))
& ((lattice[:, 2] > bounds[4]) & (lattice[:, 2] < bounds[5]))
)
else:
print("Bounds needs be be either 4 or 6 value tuple.")
return None
return lattice[goodUVs, :] | 98a280830ecf9f838eafc8cc78fccf8f13fc3f4e | 43,468 |
def _get_popinfo(popinfo_file):
"""
Helper function for make_data_dict_vcf. Takes an open file that contains
information on the population designations of each sample within a VCF file,
and returns a dictionary containing {"SAMPLE_NAME" : "POP_NAME"} pairs.
The file should be formatted as a table, with columns delimited by
whitespace, and rows delimited by new lines. Lines beginning with '#' are
considered comments and will be ignored. Each sample must appear on its own
line. If no header information is provided, the first column will be assumed
to be the SAMPLE_NAME column, while the second column will be assumed to be
the POP_NAME column. If a header is present, it must be the first
non-comment line of the file. The column positions of the words "SAMPLE" and
"POP" (ignoring case) in this header will be used to determine proper
positions of the SAMPLE_NAME and POP_NAME columns in the table.
popinfo_file : An open text file of the format described above.
"""
popinfo_dict = {}
sample_col = 0
pop_col = 1
header = False
# check for header info
for line in popinfo_file:
if line.startswith('#'):
continue
cols = [col.lower() for col in line.split()]
if 'sample' in cols:
header = True
sample_col = cols.index('sample')
if 'pop' in cols:
header = True
pop_col = cols.index('pop')
break
# read in population information for each sample
popinfo_file.seek(0)
for line in popinfo_file:
if line.startswith('#'):
continue
cols = line.split()
sample = cols[sample_col]
pop = cols[pop_col]
# avoid adding header to dict
if (sample.lower() == 'sample' or pop.lower() == 'pop') and header:
header = False
continue
popinfo_dict[sample] = pop
return popinfo_dict | 01b1ce7d45bbaace42ab3cc13e6ab9c29353e8bf | 43,469 |
def decimalDegrees2DMS(value,type):
"""
Converts a Decimal Degree Value into
Degrees Minute Seconds Notation.
Pass value as double
type = {Latitude or Longitude} as string
returns a string as D:M:S:Direction
created by: anothergisblog.blogspot.com
"""
degrees = int(value)
submin = abs( (value - int(value) ) * 60)
minutes = int(submin)
subseconds = abs((submin-int(submin)) * 60)
direction = ""
if type == "Longitude":
if degrees < 0:
direction = "W"
elif degrees > 0:
direction = "E"
else:
direction = ""
elif type == "Latitude":
if degrees < 0:
direction = "S"
elif degrees > 0:
direction = "N"
else:
direction = ""
notation = str(degrees) + "º " + str(minutes) + "' " +\
str(subseconds)[0:5] + "'' " + direction
return notation | e13ce7da3d9e855b2cfa6a8c86f3a1931f3ac26d | 43,470 |
from io import StringIO
import csv
def parse_csv_line(line:str) -> list:
"""cmon, this must already be implemented in csvkit/agate somewhere..."""
src = StringIO(line)
return next(csv.reader(src), []) | 278188b69a1f72458b311c976aea5ae9a591e9d2 | 43,471 |
def diff(it1, it2):
"""Find the differences between two iterables"""
s2 = set(it2)
diff = [x for x in it1 if x not in s2]
return diff | 163a445679bcaee0ce8aa7db320456ecee49d794 | 43,472 |
def S_from_Ainv(Ainv):
"""See footnote in notes.pdf"""
# Ainv = torch.FloatTensor(Ainv).view(1 + n_active, 1 + n_active)
S = Ainv[1:, 1:]
k = Ainv[0, 0]
b = Ainv[0, 1:].unsqueeze(0)
S -= (1 / k) * (b * b.t())
return S | 9a11e298d09368abc4f5bcf91d283b652f0905fb | 43,473 |
def getSameSpeed(drives):
""" Return a drives list of same capacity if attribute
'spindle_speed' exists in each drive information.
Return an empty list if 'spindle_speed' exists only in
some drives, otherwise, return the drives list without
the attribute.
"""
try:
speed = drives[0]['spindle_speed(RPM)']# get speed from first drive
return filter(lambda x:x['spindle_speed(RPM)']== speed,drives)
except KeyError:
if len(set(map(len,drives))) > 1:
return []
else:
return drives | 393f052bddfc47855c40f869af60b720853594d6 | 43,474 |
def is_file_of_extension(entry, *args):
"""
Checks if the given directory entry is a file
and ends in one of the given extensions.
Extensions are passed in the args parameter and
must be prefixed by a dot.
"""
if not entry.is_file():
return False
if not args:
return True
for arg in args:
if entry.name.endswith(arg):
return True
return False | e40390b88b0485323e96b328810abd3dbedf0155 | 43,475 |
def _tablename_to_class(base, tablename):
""" Return class of tablename
"""
for c in base._decl_class_registry.values():
if hasattr(c, '__tablename__') and c.__tablename__ == tablename:
return c | cd1bd98a3bcc3646f02a13c9dd36c0f604ca2a36 | 43,477 |
def backward(my_string):
"""Return the reverse of the string 'my_string'.
Examples:
>>> backward("python")
'nohtyp'
>>> backward("ipython")
'nohtypi'
"""
return my_string[::-1] | 5414027b87240863764f84163757b2090fbdae72 | 43,478 |
def n_i_j(pixel_index, offset):
"""
Args:
pixel_index (int): Current pixel index.
block_size (int): Block size to use.
Returns:
int
"""
if pixel_index - offset < 0:
samp_out = 0
else:
samp_out = pixel_index - offset
return samp_out | b71aab481bc9dbeefb005b173263774ebe55b725 | 43,479 |
def ask_move():
"""Ask user for move to make.
Returns
-------
str
User movement input.
"""
user_input = input('Make a move(ex. b2 b3) or Q to quit: ')
return user_input | b767260e9294dc72076b8fab86d4c87cae017221 | 43,480 |
def miscs_update_idxs_vals(miscs, idxs, vals, assert_all_vals_used=True, idxs_map=None):
"""
Unpack the idxs-vals format into the list of dictionaries that is `misc`.
idxs_map: a dictionary of id->id mappings so that the misc['idxs'] can
contain different numbers than the idxs argument. XXX CLARIFY
"""
if idxs_map is None:
idxs_map = {}
assert set(idxs.keys()) == set(vals.keys())
misc_by_id = {m["tid"]: m for m in miscs}
for m in miscs:
m["idxs"] = {key: [] for key in idxs}
m["vals"] = {key: [] for key in idxs}
for key in idxs:
assert len(idxs[key]) == len(vals[key])
for tid, val in zip(idxs[key], vals[key]):
tid = idxs_map.get(tid, tid)
if assert_all_vals_used or tid in misc_by_id:
misc_by_id[tid]["idxs"][key] = [tid]
misc_by_id[tid]["vals"][key] = [val]
return miscs | 391bd263e3336e3ff0fb9dde5c0e737dfd1ed997 | 43,482 |
import warnings
def unpackedList(obj):
"""
Adapted from PyCOMPS module exaqute.ExaquteTask.
"""
warnings.warn(('unpackedList is deprecated. '
'Use COLLECTION type in task parameters with COMPSs version ≥ 2.6. '
'Retro-compatibility is ensured only until 2020-08.'),
DeprecationWarning)
if not any(isinstance(l,list) for l in obj):
return obj
new_vector = []
new_vector.extend(obj[0])
for i in range(1, len(obj)):
new_vector.append("##")
new_vector.extend(obj[i])
return new_vector | 20c8e917db831b9d8833e762263f66462ef39b72 | 43,483 |
import click
def command_option(func):
"""Add a command option."""
return click.option('-c', '--command', metavar='COMMAND',
help='Command to run remotely after operation is complete.')(func) | 7e050a33e454499bf1519ef9af7a5400aa546bc8 | 43,485 |
def Teff2Tirr(Teff,Tint):
"""Tirr from effective temperature and intrinsic temperature
Args:
Teff: effective temperature
Tint: intrinsic temperature
Return:
Tirr: iradiation temperature
Note:
Here we assume A=0 (albedo) and beta=1 (fully-energy distributed)
"""
return (4.0*Teff**4 - Tint**4)**0.25 | a99d2ee324bb4031f026fa623b4a0facff290bda | 43,486 |
def get_seq_lengths_from_seqs_dic(seqs_dic):
"""
Given a dictionary of sequences, return dictionary of sequence lengths.
Mapping is sequence ID -> sequence length.
"""
seq_len_dic = {}
assert seqs_dic, "sequence dictionary seems to be empty"
for seq_id in seqs_dic:
seq_l = len(seqs_dic[seq_id])
seq_len_dic[seq_id] = seq_l
return seq_len_dic | 13baeef01c1fc4825781b72939a3e6d284aa4762 | 43,487 |
import ast
def parse(path):
"""parse a file at path and returns an AST tree structure
Args:
path (str): filepath of the file to parse
Returns:
ast.Module: ast tree of the parsed file
"""
with open(path) as source_code:
return ast.parse(source_code.read(), path) | 67fffbf3c694203a84cb15a5e75c0410f9f45e9e | 43,489 |
def get_format(path):
"""
Get the file's format by path.
:param path: (str) The path of the file.
:return: (str) The format of the file.
"""
return path.split('.')[-1] | b5cf501a8b73a2114c82eb84915249dbf7e88a97 | 43,491 |
def l_to_q(liter):
"""
Convert liters to quarts US
"""
return liter * 1.056688 | 8dafbe2ebf86b7df94d2fcf5f55fff2aa10b17f9 | 43,492 |
def check_position_detection(bounds):
"""Check whether the specified range of 5 intervals has the right
proportions to correspond to a slice through a position detection pattern.
An ideal slice through a position detection pattern consists of
5 intervals colored B,W,B,W,B with lengths proportional to 1,1,3,1,1.
Returns:
(center_coord, pixels_per_module) if this could be a position
detection pattern, otherwise (0, 0).
"""
# Expected relative positions of black/white boundaries
# within the position detection pattern.
expect_bound_pos = [-3.5, -2.5, -1.5, 1.5, 2.5, 3.5]
if (len(bounds) != 6) or (bounds[4] >= bounds[5]):
return (0, 0)
pattern_width = float(bounds[5] - bounds[0])
middle_width = float(bounds[3] - bounds[2])
if (pattern_width < 7) or (middle_width < 3):
return (0, 0)
center = float(sum(bounds)) / 6.0
pitch = (pattern_width + middle_width) / 10.0
good = True
for k in range(6):
rel_bound_pos = (bounds[k] - center) / pitch
if abs(rel_bound_pos - expect_bound_pos[k]) >= 0.5:
good = False
break
if not good:
return (0, 0)
return (center, pitch) | c425527405e0452d2069168908a81d713b7c9f33 | 43,493 |
import numpy
def _format_float(n, length=20, digits=8):
"""
Return a float with a certain length for readability
:param n: A float value
:return: The float value padded with spaces
"""
rounded = str(numpy.round(n, digits))
return f"{rounded: >{length}}" | fa14e9a66d201b3f78a0b4c25c76fceaf946ab45 | 43,495 |
import os
import logging
import re
def get_task_type(archive_file_name, archive_member_names):
"""
:param archive_member_names: a list of members/names in the archive
:return: string corresponding to the task type, which could be any of the following: '1a', '1b', '2', or '3'
:rtype: string
"""
# count the number of .'s in the archive file name to determine the task type. Hope the user has the proper naming convention
basename = os.path.basename(archive_file_name)
dot_count = basename.count('.')
# get rid of extra . count if the file extention is .tar.gz
if basename.endswith('.tar.gz'):
dot_count -= 1
if dot_count == 1:
# if theres only 1 dot_count, we need to check to see if it is 1a or 1b.
logging.info("Based on the number of periods in the file name, this is either submission task type 1a OR 1b...")
member_names_str = " ".join(archive_member_names)
# if .ttl file in "/NIST/" directory then ttls_under_nist_dir is not none (possible 1a)
ttls_under_nist_dir = re.compile(".*\/NIST\/[^\/]+.ttl").search(member_names_str)
# if ".ttl file in a subdirectory of /NIST/" then ttls_under_nist_subdir is not none (possible 1b)
ttls_under_nist_subdir = re.compile(".*\/NIST\/\S*\/\S*\.ttl").search(member_names_str)
detected = 0
if ttls_under_nist_dir is not None:
detected += 1
if ttls_under_nist_subdir is not None:
detected += 2
'''
detected values:
0 -- nothing detected continue as 1a
1 -- only 1a detected continue as 1a
2 -- only 1b detected continue as 1b
3 -- 1a and 1b detected continue as 1b
'''
if detected == 0:
logging.warning("No .ttl files found in a .../NIST/ directory, continuing to check as submission type 1a.")
return '1a'
if detected == 1:
logging.info("Found .ttl files in a .../NIST/ directory, continuing to check as submission type 1a.")
return '1a'
elif detected == 2:
logging.info("Found .ttl files in a subdirectory of ../NIST/, continuing to check as submission type 1b.")
return '1b'
elif detected == 3:
logging.warning("Found .ttl files in a .../NIST/ directory and in a subdirectory of ../NIST/, continuing to check as submission type 1b.")
return '1b'
elif dot_count == 2:
logging.info("Based on the number of periods in the file name, this archive is assumed to be a Task {0} submission.".format('2'))
return '2'
elif dot_count == 3:
logging.info("Based on the number of periods in the file name, this archive is assumed to be a Task {0} submission.".format('3'))
return '3'
else:
logging.error("Based on the number of periods in the file name, this is neither a Task 1a, 1b, 2, or 3 submission. Please name your submission file based on the rules defined in section 9 of the NIST AIDA 2019 Evaluation Plan.")
return None | 3fae8f882c42cc5b6bb59b13e48ba7f01a2b6a0b | 43,496 |
def artf_in_scan(scan, width, img_x_min, img_x_max, verbose=False):
"""return precise artefact angle and distance for lidar & camera combination"""
if scan is None:
return 0, 0
# the scan is already in mm, so angle is modified to int deg*100, ready to send
x_min, x_max = img_x_min, img_x_max
angular_resolution = len(scan) / 270
mid_index = len(scan) // 2
camera_fov_deg = 60
deg_max = camera_fov_deg * (width / 2 - x_min) / width # small value on the left corresponds to positive angle
deg_min = camera_fov_deg * (width / 2 - x_max) / width
tolerance = int(5 * angular_resolution) # in paritular the valve is detected with offset
left_index = mid_index + int(deg_min * angular_resolution) - tolerance
right_index = mid_index + int(deg_max * angular_resolution) + tolerance
# if verbose:
# print('SubSelection', deg_min, deg_max, left_index, right_index, scan[left_index:right_index])
tmp = [x if x > 0 else 100000 for x in scan]
dist_mm = min(tmp[left_index:right_index])
index = left_index + tmp[left_index:right_index].index(dist_mm)
deg_100th = int(((index / angular_resolution) - 135) * 100)
return deg_100th, dist_mm | 0e29a68011fcda0f250d9826d60f209e3ff0201f | 43,498 |
def select_login_form(forms):
"""
Select form having highest probability for login class.
:param dict forms: Nested dict containing label probabilities for each
form.
:returns: (login form, login meta)
:rtype: tuple
"""
login_form = None
login_meta = None
login_prob = 0
for form, meta in forms:
for type_, prob in meta["form"].items():
if type_ == "login" and prob > login_prob:
login_form = form
login_meta = meta
login_prob = prob
return login_form, login_meta | 8e2b95b6d575f8044d248ad38b77782cccdeedf8 | 43,499 |
from typing import Dict
from typing import Any
from typing import Optional
def find_nested_field_path(
field_name: str, mapping_definition: Dict[str, Any]
) -> Optional[str]:
"""
Given a field name, find the nested path if any related to field name
definition in provided mapping definition
Parameters
----------
field_name:
The field name
mapping_definition:
A mapping definition where field name is defined
Returns
-------
The found nested path if any, None otherwise
"""
def build_flatten_properties_map(
properties: Dict[str, Any], prefix: str = ""
) -> Dict[str, Any]:
results = {}
for prop_name, prop_value in properties.items():
if prefix:
prop_name = f"{prefix}.{prop_name}"
if "type" in prop_value:
results[prop_name] = prop_value["type"]
if "properties" in prop_value:
results.update(
build_flatten_properties_map(
prop_value["properties"], prefix=prop_name
)
)
return results
properties_map = build_flatten_properties_map(mapping_definition)
for prop in properties_map:
if properties_map[prop] == "nested" and field_name.startswith(prop):
return prop
return None | 6b60b2cfd0bbb255f100e8ed8ccf2c4cca2ef804 | 43,501 |
import re
def findDirectives(code):
"""Find testing directives"""
finder = re.compile("^'\s*VB2PY-Test\s*:\s*(\w+)\s*=\s*(.*)$", re.MULTILINE)
return finder.findall(code) | 8e98ba7453ba119a4ae7cabaeb67e74814bef008 | 43,503 |
def get_fileno(file):
"""Get the os-level fileno of a file-like object.
This function decodes several common file wrapper structures in an attempt
to determine the underlying OS-level fileno for an object.
"""
while not hasattr(file,"fileno"):
if hasattr(file,"file"):
file = file.file
elif hasattr(file,"_file"):
file = file._file
elif hasattr(file,"_fileobj"):
file = file._fileobj
else:
raise AttributeError
return file.fileno() | ede197cee9e8b97bc62b14c46d5a91241a002f6f | 43,505 |
import pandas
import json
def load_json(path: str) -> dict:
"""Load object from .json file.
json.load(object_hook) is used to construct pandas.Timestamp from
object like {type: datetime, value: 2021-04-01}.
"""
def object_hook(serialized):
if "type" not in serialized:
return serialized
if serialized["type"] == "datetime":
return pandas.Timestamp(serialized["value"])
raise TypeError(f"type `{serialized['type']}` is not recognized")
with open(path, "r", encoding="utf_8") as f:
ret = json.load(f, object_hook=object_hook)
return ret | 67a25ebb92ffd8815cd35fce9cb704e64aad67b0 | 43,506 |
def derive_from_datetime(dataset):
"""
Usage: [arg1]:[pandas dataframe]
Prerequisite: Type for datetime columns to be defined correctly
Description: Derives the hour, weekday, year and month from a datetime column
Returns: Dataframe [with new columns derived from datetime columns]
"""
columns = []
for column, dtype in dataset.dtypes.items():
if 'datetime' in str(dtype):
columns.append(column)
dataset['hour_of_' + column] = dataset[column].apply(lambda x: x.hour)
dataset['weekday_of_' + column] = dataset[column].apply(lambda x: x.weekday())
dataset['year_of_' + column] = dataset[column].apply(lambda x: x.year)
dataset['month_of_' + column] = dataset[column].apply(lambda x: x.month)
return dataset, columns | 1e35c3468170abcee71fa007375fd820d51d3d18 | 43,507 |
def convert_percentage_string_to_float(percentage_string):
"""Converts a string of the form 'xx.xx%' to its equivalent decimal value.
:param percentage_string: A string in percentage form to be converted.
:returns: A floating-point number rounded to 4 decimal places (2 decimals in percentage form).
"""
return round(float(percentage_string.replace('%', '')) / 100, 4) | 16fe6aa1475a922c15a13168400cc4a655ad544a | 43,508 |
import re
def get_platform(source = '<PLT_1>'):
"""A function to extract the platform from a source string.
Args:
source (str, optional): source string that is usually contains the platform that is used to post the tweet. Defaults to '<PLT_1>'.
Returns:
str: the platform if found, otherwise the stamp PLT_1. This stamp is used for any further updates.
"""
platform = 'PLT_1'
try:
platform = re.sub('[<>]', '\t', source).split('\t')[2]
platform = platform.replace('Twitter for','').replace('Twitter','')
except:
platform = 'PLT_1'
return platform.strip() | c12c7fd02b53b24a70a6f5343f9bc031c5d0f513 | 43,510 |
def read_raw_message(raw_data: bytes) -> tuple:
"""Splits the message header from the data bytes of the message.
:param raw_data: Full UDP packet
:type raw_data: bytes
:raises ValueError: When there header line is not present.
:return: (Header, Data bytes)
:rtype: Tuple
"""
for i in range(len(raw_data)):
if raw_data[i] == ord("\n"):
return raw_data[0:i].decode("utf-8"), raw_data[i + 1 :]
raise ValueError("Unable to find the end of line") | e69dfb570b524a2b3f2c83ae7abf59aafb616f2d | 43,511 |
import os
def choose_avaliable_path(path):
"""choose a avaliable path based on given path
@type path: str
"""
lead, ext = os.path.splitext(path)
count = 0
while True:
count += 1
result = "{}-{}{}".format(lead, count, ext)
if not os.path.exists(result):
return result | f04066aa1bcca595c7d3b5618ca251035fdfbae9 | 43,512 |
def convert_snake_case(target):
"""
文字列をスネークケースに変換する。
:param target: 変換対象の文字列
:type target: str
:return: 変換後の文字列
:rtype: str
"""
# 文字列をスネークケースに変換する
return target.lower().replace(' ', '_') | a5116f16ff08545f2ce6654c6e3e5b8534168085 | 43,514 |
def is_feature_extractor_model(model_config):
"""
If the model is a feature extractor model:
- evaluation model is on
- trunk is frozen
- number of features specified for features extraction > 0
"""
return (
model_config.FEATURE_EVAL_SETTINGS.EVAL_MODE_ON
and (
model_config.FEATURE_EVAL_SETTINGS.FREEZE_TRUNK_ONLY
or model_config.FEATURE_EVAL_SETTINGS.FREEZE_TRUNK_AND_HEAD
)
and len(model_config.FEATURE_EVAL_SETTINGS.LINEAR_EVAL_FEAT_POOL_OPS_MAP) > 0
) | 53c9b398efb09099bbe8257f875720a2ec41a495 | 43,515 |
from typing import Set
import os
def get_allowed_origins() -> Set[str]:
"""Read in comma-separated list of allowed origins from environment."""
allowed_origins = os.getenv("ALLOWED_ORIGINS", default=None)
if allowed_origins is None:
return set()
return {s.strip() for s in allowed_origins.split(",")} | 5389262e04dfb8499f498c5366d02fcbfc7777f4 | 43,516 |
import collections
import numpy
def create_batches(dataset, max_batch_size):
"""
Sort sentences by length and organise them into batches
"""
sentence_ids_by_length = collections.OrderedDict()
for i in range(len(dataset)):
length = len(dataset[i][0])
if length not in sentence_ids_by_length:
sentence_ids_by_length[length] = []
sentence_ids_by_length[length].append(i)
batches = []
for sentence_length in sentence_ids_by_length:
for i in range(0, len(sentence_ids_by_length[sentence_length]), max_batch_size):
sentence_ids_in_batch = sentence_ids_by_length[sentence_length][i:i + max_batch_size]
max_word_length = numpy.array([[len(char_ids) for char_ids in dataset[sentence_id][1]] for sentence_id in sentence_ids_in_batch]).max()
word_ids = numpy.zeros((len(sentence_ids_in_batch), sentence_length), dtype=numpy.int32)
char_ids = numpy.zeros((len(sentence_ids_in_batch), sentence_length, max_word_length), dtype=numpy.int32)
char_mask = numpy.zeros((len(sentence_ids_in_batch), sentence_length, max_word_length), dtype=numpy.int32)
label_ids = numpy.zeros((len(sentence_ids_in_batch), sentence_length-2), dtype=numpy.int32)
for i in range(len(sentence_ids_in_batch)):
for j in range(sentence_length):
word_ids[i][j] = dataset[sentence_ids_in_batch[i]][0][j]
for j in range(sentence_length):
for k in range(len(dataset[sentence_ids_in_batch[i]][1][j])):
char_ids[i][j][k] = dataset[sentence_ids_in_batch[i]][1][j][k]
char_mask[i][j][k] = 1
for j in range(sentence_length-2):
label_ids[i][j] = dataset[sentence_ids_in_batch[i]][2][j]
batches.append((word_ids, char_ids, char_mask, label_ids, sentence_ids_in_batch))
return batches | a7ff6bfd9406d6dea0495f2b241eaaeddc2082f8 | 43,517 |
def baseBart(article_to_summarize, model, tokenizer):
"""
runs BART summarization
params: model - from load_bart()
tokenizer - from load_bart()
article_to_summarize - text (string)
return: generated abstractive summary (string)
"""
inputs = tokenizer([article_to_summarize], max_length=1024, truncation='do_not_truncate', return_tensors='pt')
summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=25, early_stopping=True)
return [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids][0] | e64445c8797a75cc9fe7b5a034a1430553d40077 | 43,518 |
def assign_bins_jit(number_agents, bin_locations, bins, bins_help, max_agents):
""" This just-in-time compiled method performs the actual
calculations for the assign_bins() method.
"""
for index in range(number_agents):
# get the indices of the bin location
x, y, z = bin_locations[index]
# get the place in the bin to put the agent index
place = bins_help[x][y][z]
# if there is room in the bin, place the agent's index
if place < max_agents:
bins[x][y][z][place] = index
# update the number of agents that should be in a bin (regardless of if they're placed there)
bins_help[x][y][z] += 1
return bins, bins_help | fa8acb3c418834a45fc6423a83a2e21b537a3bb6 | 43,519 |
import argparse
def parse_args():
"""
Parse command line arguments.
Returns:
(ArgumentParser): command parse result
"""
parser = argparse.ArgumentParser("Knockoff")
parser.add_argument("--batch_size",
type=int, default=128,
help="The batch size of training and predict.")
parser.add_argument("--epochs",
type=int, default=2,
help="The iterations of training for victim and adversary.")
parser.add_argument("--learning_rate",
type=float, default=0.01,
help="The learning rate of training for victim and adversary.")
parser.add_argument("--num_queries",
type=int, default=2000,
help="The number of queries allowed for adversary.")
parser.add_argument("--knockoff_net",
type=str, default="linear",
choices=["linear", "resnet"],
help="The newwork for knockoff model, can be chosen from 'linear' and 'resnet'.")
parser.add_argument("--knockoff_dataset",
type=str, default="mnist",
choices=["mnist", "fmnist"],
help="The dataset for training knockoff model, "
"can be chosen from 'mnist' (100% labels overlap) and 'fmnist' (0% labels overlap).")
parser.add_argument("--policy",
type=str, default="random",
choices=["random", "adaptive"],
help="The policy for data sampling, can be chosen from 'random' and 'adaptive'.")
parser.add_argument("--reward",
type=str, default="all",
choices=["certainty", "diversity", "loss", "all"],
help="The reward strategy for adaptive policy.")
args = parser.parse_args()
return args | d3681add4b615e246f8c530d57e4657cb1dbbf00 | 43,520 |
def validate_database_uri(database_uri: str):
"""Ensures that the database passed to it is not one of the untouchables"""
return True | e24e57c756bc427e2d3c6047640b9d7ec2f5b1a0 | 43,521 |
def create_friedman_line(point0,point1):
"""
Determines the second point needed to form the Friedman line
:param point0: First point on glenoid line, anatomically defined as a
point on the anterior margin of glenoid
:param point1: Second point on glenoid line anatomically defined as a
point on the posterior margin of glenoid
:raises: Value Error if the z values of point0 and point1 are not equal
:returns: The midpoint of the glenoid line, which is the second point of
the Friedman line
"""
if point0[2] != point1[2]:
raise ValueError("For Friedman method points must have equal z values")
midpoint_x = (point0[0] + point1[0])/2
midpoint_y = (point0[1] + point1[1])/2
midpoint = [midpoint_x, midpoint_y,point0[2]]
return midpoint | 64c9f74985651d23cfabd6a68089a18d89f6ecfe | 43,523 |
def int_to_signed_byte(x):
"""
Converts the signed integer to a 2s-complement byte.
"""
if x > 0:
return x & 0xff
elif x < 0:
return ((-x) ^ 0xff) + 1
else:
return 0 | 9b7cf347c48558ffcfe31ceba9884e9a703ccce2 | 43,524 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.