content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import shutil
def get_archive_name_and_format_for_shutil(path):
"""Returns archive name and format to shutil.make_archive() for the |path|.
e.g., returns ('/path/to/boot-img', 'gztar') if |path| is
'/path/to/boot-img.tar.gz'.
"""
for format_name, format_extensions, _ in shutil.get_unpack_formats():
for extension in format_extensions:
if path.endswith(extension):
return path[:-len(extension)], format_name
raise ValueError(f"Unsupported archive format: '{path}'")
|
152d68ea9613d7253f78c37ce85758a2c8bc67f9
| 700,162
|
def label_candidates_db(labeler, cids_query, label_functions, apply_existing=False):
"""
This function is designed to label candidates and place the annotations inside a database.
Will be rarely used since snorkel metal doesn't use a database for annotations.
Important to keep if I were to go back towards snorkel's original database version
labeler - the labeler object
cids_query - the query make for extracting candidate objects
label_functions - a list of label functions to generate annotationslf_stats
"""
if apply_existing:
return labeler.apply_existing(cids_query=cids_query, parllelistm=5, clear=False)
else:
return labeler.apply(cids_query=cids_query, parallelism=5)
|
e62389370f377697446a72be5896157b3cd03ee7
| 700,163
|
import sys
def ToBytes(string):
"""Convert a str type into a bytes type
Args:
string: string to convert
Returns:
Python 3: A bytes type
Python 2: A string type
"""
if sys.version_info[0] >= 3:
return string.encode('utf-8')
return string
|
3d84c928e91140b56a22d6e54b966a626b18d640
| 700,164
|
def getSampleType(name):
"""Given a sample name return the sample type"""
backgrounds = open("share/sampleNamesShort.txt").readlines()
backgrounds = [i.rstrip("\n") for i in backgrounds]
signal = ['TTS','BBS','TTD','BBD','XX','YY','zprime']
data = ['data']
sampletype = ''
if name=='data':
sampletype = 'data'
elif any(name.startswith(i) for i in signal):
sampletype = 'signal'
elif name in backgrounds:
sampletype = 'background'
else:
sampletype = ''
return sampletype
|
82055b2df095e1771f9bd616d6a04759ef16c7ef
| 700,165
|
def ped_liposarcoma():
"""Create pediatric liposarcoma fixture."""
return {
"label_and_type": "ncit:c8091##merger",
"concept_id": "ncit:C8091",
"xrefs": ["mondo:0003587", "DOID:5695"],
"label": "Childhood Liposarcoma",
"aliases": [
"Liposarcoma",
"Pediatric Liposarcoma",
"childhood liposarcoma",
"liposarcoma",
"pediatric liposarcoma"
],
"associated_with": ["umls:C0279984"],
"pediatric_disease": True,
"item_type": "merger"
}
|
f69e480a449c9ecb8ceda3d8f14ec777afc769f3
| 700,166
|
def _prepare_float(data: str) -> str:
""" Removes unnecessary characters from the string representing an integer """
return data.replace(",", ".")
|
ef786b391b6b510b95b52958d73efd7890d05b10
| 700,167
|
import hashlib
def generate_ext(content_type, body):
"""Implements the notion of the ext as described in
http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-02#section-3.1"""
if content_type is not None and body is not None and len(content_type) > 0 and len(body) > 0:
content_type_plus_body = content_type + body
content_type_plus_body_hash = hashlib.sha256(content_type_plus_body)
ext = content_type_plus_body_hash.hexdigest()
else:
ext = ""
return ext
|
f4ee3845c68333b51c05ba2bba48e31ac4c989bd
| 700,168
|
def _format_headers(tabs, current_tab_number, line_length):
"""Formats just the tab portion if the config specifies a multi-tab menu
Called from format_menu()
Args:
tabs (list of tab.Tab): list of Tab objects
current_tab_number (int): number of currently selected tab (always 0 for single-tabbed menus)
line_length (int): value from config
Returns:
(list of str) individual lines to be sent to stdout representing headers, and
indicating the currently selected header
"""
current_line_length = 0
# the text identifying all tabs
top_text = []
# the text indicating which tab is currently selected
bottom_text = []
# build the list of strings tab by tab
for i, tab in enumerate(tabs):
abbreviation = tab["tab_header_input"]
description = tab.get("tab_header_description", None)
if description is None:
description = ""
# use = for currently selected tab, - for other tabs
if i == current_tab_number:
bottom_char = "="
else:
bottom_char = "-"
# spacer is only required between abbreviation and description if there is a description
if description:
spacer = ":"
else:
spacer = ""
# [ to start first tab, | between tabs and ] to end last tab
if i == 0:
start = "["
else:
start = "|"
if i == len(tabs) - 1:
end = "]"
else:
end = ""
new_top_entry = "{0}{1}{2}{3}{4}".format(start, abbreviation, spacer, description, end)
# add a line return if the curent line with additional text would go over the maximum line length
if current_line_length + len(new_top_entry) > line_length - 1:
top_text.append("\n")
bottom_text.append("\n")
current_line_length = 0
top_text.append(new_top_entry)
# space below brackets or pipes in line above, - or = below text
bottom_text.append(" " + bottom_char * (len(new_top_entry) - 1))
current_line_length += len(new_top_entry)
# take lists with individual line-break members and turn into list with splits at the line breaks
top_text = "".join(top_text).split("\n")
bottom_text = "".join(bottom_text).split("\n")
# add alternating top and bottom lines to multiline string to return
total_text = []
for top, bottom in zip(top_text, bottom_text):
total_text.append(top)
total_text.append(bottom)
return total_text
|
d99d17790ff9580c2a13dd39876270a1b8b6d9d9
| 700,169
|
def counting_sort(values, max_value):
"""Sorts integers using the Counting Sort algorithm.
Args:
values: iterable, contains the integers to sort
should be between 0 and max_value
max_value: maximum value the numbers can take
Returns:
a sorted list of the numbers
"""
counting_list = [0] * (max_value + 1)
values_sorted = []
for number in values:
counting_list[number] += 1
for number, amount in enumerate(counting_list):
for _ in range(amount):
values_sorted.append(number)
return values_sorted
|
fccf1b91bb2c300d22e316057b11dab3bb0ee86f
| 700,170
|
from typing import Optional
def convert_OBvalue(
byte_string: bytes,
is_little_endian: bool,
struct_format: Optional[str] = None
) -> bytes:
"""Return encoded 'OB' value as :class:`bytes`."""
return byte_string
|
aebd92207aedbac0dc0c4cfac3e92ac099c5d640
| 700,172
|
def find_missing_integer(lst):
"""Returns the first missing integer in an ordered list.
If not found, returns the next integer.
"""
try:
return sorted(set(range(lst[0], lst[-1])) - set(lst))[0]
except:
return max(lst) + 1
|
1e8f25f1670933cf57ae042742c175aac7d905fb
| 700,173
|
from typing import Dict
from typing import Any
def is_deprecated(property_dict: Dict[str, Any]) -> bool:
"""Test. Check if a property is deprecated without looking in description"""
return False
|
2c65c4ead0ba216d26257b3622b074e840f107c8
| 700,174
|
import logging
def parse_csv_data(csv_filename: str) -> list:
"""Takes in a csv filename and returns a list with each item being a new line of the file.
:param csv_filename: The name of a csv filename, '.csv' appendix is optional
:type csv_filename: str
:return: A list of strings with each item being a single line of the csv file
:rtype: list
"""
logging.info("starting parse_csv_data function")
csv_filename = str(csv_filename) # Makes sure input is a string
if csv_filename[-4:] != ".csv": # Checks if the filename has the
csv_filename += ".csv" # '.csv' appendix and appends it if necessary
with open(csv_filename, "r", encoding='utf-8') as data:
logging.info("parse_csv_data function finished")
return [word.split()[0] for word in data.read().splitlines()]
# returns data as a list of strings for each row in the csv file
|
ef388507534b6e7e1b82cf5e5f0036e9dd5819dd
| 700,175
|
import os
def _is_win() -> bool:
"""
実行環境がWindowsかどうかを判定します
:return: True Windows, False Windows以外
"""
return os.name == 'nt'
|
f46e13641004cccea4ae4692ca0f8fbcb797cf32
| 700,176
|
def sort_return_tuples(response, **options):
"""
If ``groups`` is specified, return the response as a list of
n-element tuples with n being the value found in options['groups']
"""
if not response or not options.get("groups"):
return response
n = options["groups"]
return list(zip(*(response[i::n] for i in range(n))))
|
14b49449d8fda6050bf4223365ba0f93918fe58a
| 700,177
|
def _common_prefix(string_list):
"""
Given a list of pathnames, returns the longest common leading component
"""
if not string_list:
return ""
min_str = min(string_list)
max_str = max(string_list)
for i, c in enumerate(min_str):
if c != max_str[i]:
return min_str[:i]
return min_str
|
4360e712c6c4d3d650a226c1fe7f3a4941861513
| 700,178
|
def qualify(func: object) -> str:
"""Qualify a function."""
return ".".join((func.__module__, func.__qualname__))
|
bfda7050ff94f407a2a0d4b00b87ecb0370e9110
| 700,179
|
import glob
import os
import re
def get_fit_output_files(models_dir, model_pattern):
"""Get fit output files matching pattern."""
all_files = sorted(glob.glob(os.path.join(models_dir, '*')))
pattern = re.compile(model_pattern)
matching_files = {}
for f in all_files:
match = pattern.search(f)
if not match:
continue
outcome = match[1]
if outcome in matching_files:
matching_files[outcome].append(f)
else:
matching_files[outcome] = [f]
for outcome in matching_files:
matching_files[outcome] = sorted(matching_files[outcome])
return matching_files
|
5832eb5d050eae3fad3e8f710f481181c34017a8
| 700,180
|
def dimension(dim: float, tol: int = 0, step: float = 0.4) -> float:
"""
Given a dimension, this function will round down to the
next multiple of the dimension. An additional parameter
`tol` can be specified to add `tol` additional steps to
add a tolerance to accommodate for shrinking.
"""
# Add small value to reduce risk of the remainder being zero.
dim += 1e-10
return (dim // step) * step + tol * step
|
a63b84bbc73d25da1c86c9919f61bd32071d92f9
| 700,181
|
import json
def get_chosen_file_path(file_path='dataset/json/directory.json', key=None):
"""
This function returns the name of file which is selected from list of
default methods.
:param file_path: path to file from where the filename should be read
against a given key.
:param key: key for which the data must be read.
:return: path to the file (relative to app directory tree.)
"""
try:
with open(file_path) as data_file:
data = json.load(data_file)
return data.get(key)['filePath']
except FileNotFoundError:
return None
except Exception:
return None
|
54336abd282f63e717c17c19b0d7270b28d29ecf
| 700,182
|
def connect_db(path: str) -> str:
""" Set up sqlite3 connection """
return 'sqlite://{}'.format(path)
|
ace41ad5609d927a987a9ed2d18f7abd64439bb3
| 700,183
|
import json
def read_prediction_tokens(pred_file):
"""
Reads in the tokens from the tagger's output file.
Returns: a String list
"""
tokens = []
with open(pred_file, encoding="utf-8") as f:
for line in f:
j = json.loads(line)
tokens.extend(j["words"])
return tokens
|
a8da9ff58a83db4df628f1f98b33133ff94805e0
| 700,184
|
def cli(ctx, name="", deleted=False, slug=""):
"""Get all published histories (by any user), or select a subset by specifying optional arguments for filtering (e.g. a history name).
Output:
List of history dicts.
"""
return ctx.gi.histories.get_published_histories(name=name, deleted=deleted, slug=slug)
|
3a0eca578e105f005d80380043296c9a47ace6a8
| 700,185
|
import os
def parse_error(output_dir):
"""Add contents of eplusout.err and put it in the exception message.
:param output_dir: str
:return: str
"""
err_file = os.path.join(output_dir, "eplusout.err")
if os.path.isfile(err_file):
with open(err_file, "r") as f:
ep_err = f.read()
else:
ep_err = "<File not found>"
message = "\r\nContents of EnergyPlus error file at {err_file}\r\n{ep_err}".format(
**locals()
)
return message
|
3ed08db4162d0a64909e65ce7942110406aad808
| 700,186
|
import numpy
def get_coverage_from_cost(spending, inflection_cost, saturation, unit_cost, popsize, alpha=1.):
"""
Estimate the coverage associated with a spending in a program.
Args:
spending: The amount of money allocated to a program (absolute value, not a proportion of all funding)
inflection_cost: Cost at which inflection occurs on the curve (also the point of maximal efficiency)
saturation: Maximal possible coverage, i.e. upper asymptote of the logistic curve
unit_cost: Unit cost of the intervention
popsize: Size of the population targeted by the intervention
alpha: Steepness parameter determining the curve's shape
Returns:
The proportional coverage of the intervention given the spending
"""
# if cost is smaller thar c_inflection_cost, then the starting cost necessary to get coverage has not been reached
if popsize == 0. or unit_cost == 0. or spending == 0. or spending <= inflection_cost: return 0.
a = saturation / (1. - 2. ** alpha)
b = 2. ** (alpha + 1.) / (alpha * (saturation - a) * unit_cost * popsize)
return a + (saturation - a) / ((1. + numpy.exp((-b) * (spending - inflection_cost))) ** alpha)
|
81ea2c2e2067be8237de3f5f6a134c6c5f1aafee
| 700,187
|
def shortid(obsid):
"""
Compact format for the observation id, like QPT
Parameters
----------
obsid : string
Program id string
Returns
-------
shortid : string
Compact format
"""
idvals = obsid.split('-')
shortid = idvals[0][-1] + idvals[1][2:] + '-' + idvals[2] + '-' + idvals[3] + '[' + idvals[4] + ']'
return shortid
|
cb163886e7612fa46d016f2037b110526967c61f
| 700,190
|
from typing import Counter
def find_children(dancing_brigade):
"""
"aAbaBb" => "AaaBbb".
"""
new = ''
s = [x.lower() for x in dancing_brigade]
d = Counter(sorted(s))
for x in d:
new += (x * d[x]).title()
return new
|
81d7dd9661fc73e6fcb24bc0e5318e298c41c3e5
| 700,191
|
import struct
def read_fmt(fmt, fp):
"""
Reads data from ``fp`` according to ``fmt``.
"""
fmt = str(">" + fmt)
fmt_size = struct.calcsize(fmt)
data = fp.read(fmt_size)
try:
assert len(data
) == fmt_size, 'read=%d, expected=%d' % (len(data), fmt_size)
except AssertionError:
fp.seek(-len(data), 1)
raise
return struct.unpack(fmt, data)
|
3bcee164ca137d64f0a97621f6b3917b7ad60899
| 700,192
|
def fib_g(n) :
"""
The function fib_g is used to find the nth fibonacci number using the golden
ratio i.e. 1.618033 by using the formula :
fibonacci = (phi**n) / 5**(0.5).
This results in a floating answer hence we use round function to round it
off to the nearest integer.
"""
phi = 1.618033
res = round((phi**n)/(5**(0.5)))
return res
|
062cd16968eb34cc5135cd79120ea16fd81f486a
| 700,193
|
import re
def columns_to_add(column_names):
"""Subsets the list for elements that contain mean and then rename them
so they aren't called mean, these will be the new names for columns
to be simluated probabilistically
Inputs:
column_names - a list of parameter names / df indexes
Returns:
new_columns - a list of names of columns to be created
"""
new_columns = [val for val in column_names if re.search('mean', val)]
new_columns = [re.sub('_mean', '', val) for val in new_columns]
return new_columns
|
227dc85dab3fe35f999e416a44208a37d7e77665
| 700,194
|
def get_email_password():
"""Helper function that returns the tuple of defaul email and password"""
email = 'test@greatsoft.uz'
password = 'password1234'
return email, password
|
80fe300fa0e7c316dc484b2d8ed2cb031d0215f1
| 700,195
|
from typing import OrderedDict
def job_prep_release_status_list_table_format(result):
"""Format job prep-release-status list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Pool Id'] = item['poolId']
table_row['Node Id'] = item['nodeId']
table_row['Job Prep State'] = item['jobPreparationTaskExecutionInfo']['state'] \
if item['jobPreparationTaskExecutionInfo'] else ""
table_row['Job Release State'] = item['jobReleaseTaskExecutionInfo']['state'] \
if item['jobReleaseTaskExecutionInfo'] else ""
table_output.append(table_row)
return table_output
|
5b93ade505b166cd45539bfde49e8f51f2ffb9fb
| 700,198
|
def rinko_p_prime(N, t, A, B, C, D, E, F, G, H):
"""
Per RinkoIII manual: 'The film sensing the water is affect by environment
temperature and pressure at the depth where it is deployed. Based on experiments,
an empirical algorithm as following is used to correct data dissolved oxygen.'
Parameters
----------
N : array-like
Raw instrument output
t : array-like
Temperature [degC]
A-H : float
Calibration parameters
"""
p_prime = A / (1 + D * (t - 25)) + B / ((N - F) * (1 + D * (t - 25)) + C + F)
return p_prime
|
482f2286819af3d147cde4dd258c36c04624a6e8
| 700,199
|
def _index(i, size, Cartesian=True):
"""If Cartesian=True, index 0 is swapped with index 1."""
if Cartesian:
if i == 1:
return 0
if i == 0:
if size >= 2:
return 1
return i
|
ace0ff4431b64b545f2857eddce85d555a0cf5f3
| 700,200
|
def get_element(dict, key, default=None):
"""" if dict[key] present, return that. Otherwise return default value"""
value = dict[key] if key in dict else default
return value
|
ac3cc2ea1ff38a42c0e674fee56a43bcb2b71660
| 700,201
|
def nmap(value, fr=(0, 1), to=(0, 1)):
"""
Map a value from a two-value interval into another two-value interval.
Both intervals are `(0, 1)` by default. Values outside the `fr` interval
are still mapped proportionately.
"""
value = (value - fr[0]) / (fr[1] - fr[0])
return to[0] + value * (to[1] - to[0])
|
d7968d7661c2535f5c820087b79b7a8e3667e8e8
| 700,202
|
def get_best_outputs(problem_dir, problem, user):
"""
Gets outputs of best submission.
:param problem_dir: main directory of submissions
:param problem: id of problem
:param user: user who wants to see submission for problem
:return: -1 if no file found, otherwise array of best outputs
"""
outputs = []
try:
for i in range(10):
outputs.append('')
f = open('{0}/best_out/out_{1}_{2}_{3}'.format(problem_dir, problem, user, i))
for line in f:
outputs[i] += line.strip()
f.close()
except IOError:
return -1
return outputs
|
a012b849ec76056067a75a71d80ea2911b2b91fc
| 700,203
|
import torch
def bmv(mat, vec):
"""batch matrix vector product"""
return torch.einsum('bij, bj -> bi', mat, vec)
|
e6f2d95a0aec5239eaa96fe0a6dd371216eb741b
| 700,205
|
import time
def generate_hash():
"""
Function to generate the attendance token hash.
"""
token = hash(time.time()) % 100000000
return token
|
2825549e551e574de5292be1f7b73bc4e180f748
| 700,206
|
def computeFraction(feature_1, feature_2 ):
"""
Parameters:
Two numeric feature vectors for which we want to compute a ratio
between
Output:
Return fraction or ratio of feature_1 divided by feature_2
"""
fraction = 0.
if feature_1 == "NaN":
fraction = 0.0
elif feature_2 == "NaN":
fraction = 0.0
else:
fraction = int(feature_1) / float(feature_2)
return fraction
|
fbce06ab1fea604a3c0e4f0e427dd6560acf80fe
| 700,208
|
def isLNM(filename):
"""
Checks whether a file is ASCII Laser-Niederschlags-Monitor file (Thies).
"""
try:
fh = open(filename, 'rt')
temp = fh.readline()
except:
return False
try:
if not temp.startswith('# LNM '):
return False
except:
return False
return True
|
a94bc44910ed8924f7e0332ef28155634b2eb7ce
| 700,209
|
def _update(dOld, dNew, dType):
"""Function to update the 'prevState' in dOld with the 'prevState' in dNew
Unfortunately this is really similiar to '_rotate', but I didn't want to
jam a square peg in a round hole.
"""
c = 0
# Loop through dictionary
for _d in dOld:
# Loop through phases of each element
for p in dOld[_d]['phases']:
# Update dOld with value from dNew
dOld[_d]['phases'][p]['prevState'] = \
dNew[_d]['phases'][p]['prevState']
# Update count
if dType == 'reg':
# previous position minus new position
c += abs(dOld[_d]['phases'][p]['prevState']
- dOld[_d]['phases'][p]['newState'])
elif dType == 'cap':
if (dOld[_d]['phases'][p]['prevState']
!= dOld[_d]['phases'][p]['newState']):
c += 1
else:
assert False, 'dType must be reg or cap'
return dOld, c
|
7be61238e7338c591b094dd9e4e346f9dc836f44
| 700,210
|
def publication_email_article_do_not_send_list():
"""
Return list of do not send article DOI id
"""
do_not_send_list = [
"00003", "00005", "00007", "00011", "00012", "00013", "00031", "00036", "00047", "00048",
"00049", "00051", "00065", "00067", "00068", "00070", "00078", "00090", "00093", "00102",
"00105", "00109", "00116", "00117", "00133", "00160", "00170", "00171", "00173", "00178",
"00181", "00183", "00184", "00190", "00205", "00218", "00220", "00230", "00231", "00240",
"00242", "00243", "00247", "00248", "00260", "00269", "00270", "00278", "00281", "00286",
"00288", "00290", "00291", "00299", "00301", "00302", "00306", "00308", "00311", "00312",
"00321", "00324", "00326", "00327", "00329", "00333", "00334", "00336", "00337", "00340",
"00347", "00348", "00351", "00352", "00353", "00354", "00358", "00362", "00365", "00367",
"00378", "00380", "00385", "00386", "00387", "00400", "00411", "00415", "00421", "00422",
"00425", "00426", "00429", "00435", "00444", "00450", "00452", "00458", "00459", "00461",
"00467", "00471", "00473", "00475", "00476", "00477", "00481", "00482", "00488", "00491",
"00498", "00499", "00505", "00508", "00515", "00518", "00522", "00523", "00533", "00534",
"00537", "00542", "00558", "00563", "00565", "00569", "00571", "00572", "00573", "00577",
"00590", "00592", "00593", "00594", "00603", "00605", "00615", "00625", "00626", "00631",
"00632", "00633", "00638", "00639", "00640", "00641", "00642", "00646", "00647", "00648",
"00654", "00655", "00658", "00659", "00662", "00663", "00666", "00668", "00669", "00672",
"00675", "00676", "00683", "00691", "00692", "00699", "00704", "00708", "00710", "00712",
"00723", "00726", "00729", "00731", "00736", "00744", "00745", "00747", "00750", "00757",
"00759", "00762", "00767", "00768", "00772", "00776", "00778", "00780", "00782", "00785",
"00790", "00791", "00792", "00799", "00800", "00801", "00802", "00804", "00806", "00808",
"00813", "00822", "00824", "00825", "00828", "00829", "00842", "00844", "00845", "00855",
"00856", "00857", "00861", "00862", "00863", "00866", "00868", "00873", "00882", "00884",
"00886", "00895", "00899", "00903", "00905", "00914", "00924", "00926", "00932", "00933",
"00940", "00943", "00947", "00948", "00951", "00953", "00954", "00958", "00960", "00961",
"00963", "00966", "00967", "00969", "00971", "00983", "00992", "00994", "00996", "00999",
"01004", "01008", "01009", "01020", "01029", "01030", "01042", "01045", "01061", "01064",
"01067", "01071", "01074", "01084", "01085", "01086", "01089", "01096", "01098", "01102",
"01104", "01108", "01114", "01115", "01119", "01120", "01123", "01127", "01133", "01135",
"01136", "01138", "01139", "01140", "01149", "01157", "01159", "01160", "01169", "01179",
"01180", "01197", "01201", "01202", "01206", "01211", "01213", "01214", "01221", "01222",
"01228", "01229", "01233", "01234", "01236", "01239", "01252", "01256", "01257", "01267",
"01270", "01273", "01279", "01287", "01289", "01291", "01293", "01294", "01295", "01296",
"01298", "01299", "01305", "01308", "01310", "01311", "01312", "01319", "01322", "01323",
"01326", "01328", "01339", "01340", "01341", "01345", "01350", "01355", "01369", "01370",
"01374", "01381", "01385", "01386", "01387", "01388", "01402", "01403", "01412", "01414",
"01426", "01428", "01433", "01434", "01438", "01439", "01440", "01456", "01457", "01460",
"01462", "01465", "01469", "01473", "01479", "01481", "01482", "01483", "01488", "01489",
"01494", "01496", "01498", "01501", "01503", "01514", "01515", "01516", "01519", "01524",
"01530", "01535", "01539", "01541", "01557", "01561", "01566", "01567", "01569", "01574",
"01579", "01581", "01584", "01587", "01596", "01597", "01599", "01603", "01604", "01605",
"01607", "01608", "01610", "01612", "01621", "01623", "01630", "01632", "01633", "01637",
"01641", "01658", "01659", "01662", "01663", "01671", "01680", "01681", "01684", "01694",
"01695", "01699", "01700", "01710", "01715", "01724", "01730", "01738", "01739", "01741",
"01749", "01751", "01754", "01760", "01763", "01775", "01776", "01779", "01808", "01809",
"01812", "01816", "01817", "01820", "01828", "01831", "01832", "01833", "01834", "01839",
"01845", "01846", "01849", "01856", "01857", "01861", "01867", "01873", "01879", "01883",
"01888", "01892", "01893", "01901", "01906", "01911", "01913", "01914", "01916", "01917",
"01926", "01928", "01936", "01939", "01944", "01948", "01949", "01958", "01963", "01964",
"01967", "01968", "01977", "01979", "01982", "01990", "01993", "01998", "02001", "02008",
"02009", "02020", "02024", "02025", "02028", "02030", "02040", "02041", "02042", "02043",
"02046", "02053", "02057", "02061", "02062", "02069", "02076", "02077", "02078", "02087",
"02088", "02094", "02104", "02105", "02109", "02112", "02115", "02130", "02131", "02137",
"02148", "02151", "02152", "02164", "02171", "02172", "02181", "02184", "02189", "02190",
"02196", "02199", "02200", "02203", "02206", "02208", "02217", "02218", "02224", "02230",
"02236", "02238", "02242", "02245", "02252", "02257", "02260", "02265", "02270", "02272",
"02273", "02277", "02283", "02286", "02289", "02304", "02313", "02322", "02324", "02349",
"02362", "02365", "02369", "02370", "02372", "02375", "02384", "02386", "02387", "02391",
"02394", "02395", "02397", "02403", "02407", "02409", "02419", "02439", "02440", "02443",
"02444", "02445", "02450", "02451", "02475", "02478", "02481", "02482", "02490", "02501",
"02504", "02510", "02511", "02515", "02516", "02517", "02523", "02525", "02531", "02535",
"02536", "02555", "02557", "02559", "02564", "02565", "02576", "02583", "02589", "02590",
"02598", "02615", "02618", "02619", "02626", "02630", "02634", "02637", "02641", "02653",
"02658", "02663", "02667", "02669", "02670", "02671", "02674", "02676", "02678", "02687",
"02715", "02725", "02726", "02730", "02734", "02736", "02740", "02743", "02747", "02750",
"02755", "02758", "02763", "02772", "02777", "02780", "02784", "02786", "02791", "02792",
"02798", "02805", "02809", "02811", "02812", "02813", "02833", "02839", "02840", "02844",
"02848", "02851", "02854", "02860", "02862", "02863", "02866", "02872", "02875", "02882",
"02893", "02897", "02904", "02907", "02910", "02917", "02923", "02935", "02938", "02945",
"02949", "02950", "02951", "02956", "02963", "02964", "02975", "02978", "02981", "02993",
"02996", "02999", "03005", "03007", "03011", "03023", "03025", "03031", "03032", "03035",
"03043", "03058", "03061", "03068", "03069", "03075", "03077", "03080", "03083", "03091",
"03100", "03104", "03110", "03115", "03116", "03125", "03126", "03128", "03145", "03146",
"03159", "03164", "03176", "03178", "03180", "03185", "03191", "03197", "03198", "03205",
"03206", "03222", "03229", "03233", "03235", "03239", "03245", "03251", "03254", "03255",
"03271", "03273", "03275", "03282", "03285", "03293", "03297", "03300", "03307", "03311",
"03318", "03342", "03346", "03348", "03351", "03357", "03363", "03371", "03372", "03374",
"03375", "03383", "03385", "03397", "03398", "03399", "03401", "03405", "03406", "03416",
"03421", "03422", "03427", "03430", "03433", "03435", "03440", "03443", "03464", "03467",
"03468", "03473", "03475", "03476", "03487", "03496", "03497", "03498", "03502", "03504",
"03521", "03522", "03523", "03526", "03528", "03532", "03542", "03545", "03549", "03553",
"03558", "03563", "03564", "03568", "03573", "03574", "03575", "03579", "03581", "03582",
"03583", "03587", "03596", "03600", "03602", "03604", "03606", "03609", "03613", "03626",
"03635", "03638", "03640", "03641", "03648", "03650", "03653", "03656", "03658", "03663",
"03665", "03671", "03674", "03676", "03678", "03679", "03680", "03683", "03695", "03696",
"03697", "03701", "03702", "03703", "03706", "03711", "03714", "03720", "03722", "03724",
"03726", "03727", "03728", "03735", "03737", "03743", "03751", "03753", "03754", "03756",
"03764", "03765", "03766", "03772", "03778", "03779", "03781", "03785", "03790", "03804",
"03811", "03819", "03821", "03830", "03842", "03848", "03851", "03868", "03881", "03883",
"03891", "03892", "03895", "03896", "03908", "03915", "03925", "03939", "03941", "03943",
"03949", "03952", "03962", "03970", "03971", "03977", "03978", "03980", "03981", "03997",
"04000", "04006", "04008", "04014", "04024", "04034", "04037", "04040", "04046", "04047",
"04057", "04059", "04066", "04069", "04070", "04094", "04105", "04106", "04111", "04114",
"04120", "04121", "04123", "04126", "04132", "04135", "04137", "04147", "04158", "04165",
"04168", "04177", "04180", "04187", "04193", "04205", "04207", "04220", "04234", "04235",
"04236", "04246", "04247", "04249", "04251", "04263", "04265", "04266", "04273", "04279",
"04287", "04288", "04300", "04316", "04333", "04353", "04363", "04366", "04371", "04378",
"04380", "04387", "04389", "04390", "04395", "04402", "04406", "04415", "04418", "04433",
"04437", "04449", "04476", "04478", "04489", "04491", "04494", "04499", "04501", "04506",
"04517", "04525", "04530", "04531", "04534", "04543", "04551", "04553", "04563", "04565",
"04577", "04580", "04581", "04586", "04591", "04600", "04601", "04603", "04605", "04617",
"04629", "04630", "04631", "04645", "04660", "04664", "04686", "04692", "04693", "04711",
"04729", "04741", "04742", "04766", "04775", "04779", "04785", "04801", "04806", "04811",
"04851", "04854", "04869", "04875", "04876", "04878", "04885", "04889", "04901", "04902",
"04909", "04919", "04969", "04970", "04986", "04995", "04996", "04997", "04998", "05000",
"05007", "05025", "05031", "05033", "05041", "05048", "05055", "05060", "05075", "05087",
"05105", "05115", "05116", "05125", "05151", "05161", "05169", "05178", "05179", "05198",
"05216", "05218", "05244", "05256", "05259", "05269", "05289", "05290", "05334", "05352",
"05375", "05377", "05394", "05401", "05418", "05419", "05422", "05427", "05438", "05490",
"05504", "05508", "05553", "05558", "05564", "05570", "05580", "05597", "05614", "05657",
"05663", "05720", "05770", "05787", "05789", "05816", "05846", "05896", "05983", "06156",
"06193", "06200", "06235", "06303", "06306", "06351", "06424", "06430", "06453", "06494",
"06656", "06720", "06740", "06900", "06986"]
# More do not send circa July 2015
# Do not send email if they are revised, since the duplicate check will not
# trigger since they were not sent in the first place
do_not_send_list = do_not_send_list + ["04186", "06416", "06847", "06938", "06959", "07072"]
return do_not_send_list
|
30b813490c3ccd076fc59dd2090c9861f3ab495f
| 700,211
|
import base64
import json
def encode_transaction(value):
"""Encode a transaction (dict) to Base64."""
return base64.b64encode(json.dumps(value).encode('utf8')).decode('utf8')
|
066fa737b9c2d474be500bf2006ce43adea8d4f8
| 700,212
|
def residual_transformation(
model,
blob_in,
dim_in,
dim_out,
stride,
prefix,
dim_inner,
dilation=1,
group=1,
):
"""Add a bottleneck transformation to the model."""
# weight_init = None
# weight_init = ('XavierFill', {})
weight_init = ("MSRAFill", {})
# conv 3x3 -> BN -> ReLU
cur = model.ConvAffine(
blob_in,
prefix + '_branch2a',
dim_in,
dim_out,
kernel=3,
stride=stride,
pad=1 * dilation,
dilation=dilation,
group=group,
inplace=True,
weight_init=weight_init,
)
cur = model.Relu(cur, cur)
# conv 3x3 -> BN -> ReLU
cur = model.ConvAffine(
cur,
prefix + '_branch2b',
dim_out,
dim_out,
kernel=3,
stride=1,
pad=1 * dilation,
dilation=dilation,
group=group,
inplace=False,
weight_init=weight_init,
)
return cur
|
2fc21653e4787b7732910d466005ff67c6c61622
| 700,213
|
import random
def shuffle_sequence(sequence):
"""Shuffle sequence.
Parameters
----------
sequence : str
Sequence to shuffle.
Returns
-------
str
Shuffled sequence.
"""
shuffled_sequence = list(sequence)
random.shuffle(shuffled_sequence)
return "".join(shuffled_sequence)
|
1acb94516a6ed491359538f2016a22fc6d613499
| 700,214
|
def enable_cloud_admin_access(session, confirm, return_type=None, **kwargs):
"""
Enables the ability of a storage cloud administrator to access the VPSA
GUI of this VPSA to assist in troubleshooting. This does not grant access
to any volume data. Enabled by default.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type confirm: bool
:param confirm: If True, cloud admin access will be enabled. This is a
safeguard for this function since it requires no other arguments.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
if not confirm:
raise ValueError('The confirm parameter is not set to True - '
'cloud admin access will not be enabled.')
path = '/api/users/admin_access/enable.json'
return session.post_api(path=path, return_type=return_type, **kwargs)
|
5444ba9f4c917a72c666908bcd4db3b8527d596c
| 700,215
|
import hashlib
def get_icon_hash(fp, fp_name):
"""Returns a file's hashed, fp is passed as a string"""
sha1 = hashlib.sha1(fp)
return sha1.hexdigest()
|
89ecd2292384cf255ecd41bd80006eafbbd13bd9
| 700,216
|
import math
def get_angle(p1, a1, p2) -> float:
"""
Izračunaj kot, za katerega se mora zavrteti robot, da bo obrnjen proti točki p2.
Robot se nahaja v točki p1 in ima smer (kot) a1.
"""
a = math.degrees(math.atan2(p2.y-p1.y, p2.x - p1.x))
a_rel = a - a1
if abs(a_rel) > 180:
if a_rel > 0:
a_rel = a_rel - 360
else:
a_rel = a_rel + 360
return a_rel
|
93ad2d8b1a8a98e1669f2b9c1553e507850f0384
| 700,217
|
from typing import Iterable
from typing import Mapping
from typing import Set
def is_ordered_sequence(caster):
"""Caster can be ordered sequence of another casters."""
return (isinstance(caster, Iterable)
and not isinstance(caster, (Mapping, Set)))
|
cbefae7359f112b5ab13f377b3c9552d286d14d0
| 700,218
|
import re
def parseQuestion(sentence, translate_language='Translate Here...'):
"""
parse sentence and return correct answer for each gap
"""
sentence = sentence.replace("{", "[").replace("}", "]")
gaps = re.findall(r"[^[]*\[([^]]*)\]", sentence)
first_option = gaps[0].split('|')[0]
# textbox = '<input type="text" id="txtTranslation" onchange="handleGapInput();" placeholder="{}">'.format(first_option.strip())
textbox = '<input type="text" class="task-form-control" id="txtTranslation" onchange="handleGapInput();" placeholder="{}" style="text-align:center;">'.format(translate_language)
mark_field = '<mark style="background-color: #337ab7; color:white;"> {} </mark>'.format(first_option.strip())
updated_field = mark_field + '-' + textbox
sentence=sentence.replace(gaps[0],updated_field).replace('[','').replace(']','')
return sentence
|
982a560bd15c00f49322242e970a8c366f0ec3fd
| 700,219
|
import torch
def align(src_tokens, tgt_tokens):
"""
Given two sequences of tokens, return
a mask of where there is overlap.
Returns:
mask: src_len x tgt_len
"""
mask = torch.ByteTensor(len(src_tokens), len(tgt_tokens)).fill_(0)
for i in range(len(src_tokens)):
for j in range(len(tgt_tokens)):
if src_tokens[i] == tgt_tokens[j]:
mask[i][j] = 1
return mask
|
0408ae7148c4bed9e3c24b71acdbd1a182dd6e69
| 700,220
|
from typing import Any
def is_listy(x: Any) -> bool:
"""
Grabbed this from fast.ai
"""
return isinstance(x, (tuple, list))
|
331791b4d1e1f4047ab99b54a58441805bc71311
| 700,221
|
from pathlib import Path
from typing import Dict
import yaml
def _load_yaml_doc(path: Path) -> Dict:
"""Load a yaml document."""
with open(path, "r") as src:
doc = yaml.load(src, Loader=yaml.FullLoader)
return doc
|
4b049909c5e6eac6e7772b3311f928ccd6cf528c
| 700,222
|
def getFloat (Float):
"""
Float input verification
usage: x = getFloat ('mensage to display ')
"""
while True:
try:
user_input = float(input(Float))
return user_input
except ValueError:
print('Use only numbers and separete decimals with point')
|
27d9128441cadd00627d88bbfdb45144bf5a55f3
| 700,224
|
import sys
def find_closest_frame_index(color_time, other_timestamps):
""" finds the closest (depth or NIR) frame to the current (color) frame.
Parameters
----------
color_time : int
Timestamp [ms] of the current color frame
other_timestamps: dict
Dictionary with the frame index and the
corresponding timestamp for the other stream
Returns
-------
int:
Index of the closest frame
"""
return_index = -1
diff = sys.maxsize
for index in other_timestamps:
if abs(other_timestamps[index] - color_time) < diff:
diff = abs(other_timestamps[index] - color_time)
return_index = index
return return_index
|
67cc6f8d4a56057105e820546d5e8720ea2d8786
| 700,225
|
import warnings
def weight_list(spam, weights, warn=True):
""" Returns weighted list
Args:
spam(list): list to multiply with weights
weights (list): of weights to multiply the respective distance with
warn (bool): if warn, it will warn instead of raising error
Returns:
(list): weighted list
"""
if warn:
if len(weights) > len(spam):
warnings.warn("The list of weights is longer than the list, last weights are not used!!", RuntimeWarning)
if len(weights) > len(spam):
warnings.warn("The list of weights is shorter than the list, last items are not weighted!!", RuntimeWarning)
try:
for index, item in enumerate(spam):
spam[index] = float(spam[index]) * float(weights[index])
except IndexError:
pass
return spam
|
6b2258675a5c346c50ecc8f7d8aba466a7b216ef
| 700,226
|
from typing import Callable
import click
def test_env_run_option(command: Callable[..., None]) -> Callable[..., None]:
"""
A decorator for choosing whether to run commands in a test environment.
"""
function = click.option(
'--test-env',
'-te',
is_flag=True,
help=(
'With this flag set, environment variables are set and the '
'command is run in the integration test directory. '
'This means that "pytest" will run the integration tests.'
),
)(command) # type: Callable[..., None]
return function
|
33de24e435aa258f8bd3474ee2ca3a3358651584
| 700,227
|
def bout_boundaries_ts(ts, bname):
"""Gets the bout boundaries for a specific behavior from a male within
a FixedCourtshipTrackingSummary.
Parameters
----------
ts : FixedCourtshipTrackingSummary
Should contain ts.male and ts.female attributes.
bname : string
Behavior to calculate bout boundaries from.
Returns
-------
bout_boundaries : arr of shape [N, 2]
First column of returned array is the start of individual bouts of
specified behaviors. Second column is stop of individual bouts of
behaviors.
"""
return ts.male.get_behavior(bname).ixs()
|
c9c4351e18ff3e089cc7a925101ad77116b1f571
| 700,228
|
def __extract_digits__(string):
"""
Extracts digits from beginning of string up until first non-diget character
Parameters
-----------------
string : string
Measurement string contain some digits and units
Returns
-----------------
digits : int
Digits at start of string
"""
i = 0
while string[i].isdigit():
i += 1
return int(string[0:i])
|
6613e56dc33c88d9196c2ec95412155ad0aaf382
| 700,229
|
def array_rotation():
"""Solution to exercise R-11.10.
Explain why performing a rotation in an n-node binary tree when using
the array-based representation of Section 8.3.2 takes Ω(n) time.
---------------------------------------------------------------------------
Solution:
---------------------------------------------------------------------------
In an array-based binary tree, every position p in the tree is represented
according to a function f(p). The function f(p) performs level numbering,
where the array index of a node is determined by its level in the tree and
its position from left to right. If a node changes positions in the tree,
its level number must be updated and the node must be moved to the
corresponding index in the array.
A rotation swaps the positions of two nodes (x and y) and changes their
level numbering. But the rotation will also change the level numbering of
every node contained in the 3 subtrees rooted at the children of x and y.
This is because two of the three subtrees will change levels, which changes
their level numbering. The middle subtree will not change levels, but it
will shift either left or right in the tree and thus also require
renumbering.
Although it's true that this renumbering will only affect the rotated nodes
and their subtrees, in general the number of nodes affected by a rotation
is proportional to n. As n grows and the tree increases in size the
expected number of nodes affected by a rotation grows as well.
Even if we assume that each node's renumbering and swapping operation is
O(1), the rotation operation's lower bound must be linear in n. As an
example, a rotation involving the root of the tree will cause all n nodes
in the tree to be renumbered. A rotation at a deeper depth in the tree
will still cause some fraction of n nodes to be renumbered.
In other words, a rotation operation on an array-based representation of a
binary tree is Ω(n).
"""
return True
|
f448fede21496701509e2399f1ebc1b3fbf50954
| 700,230
|
def update_sulfuras(item):
"""
sulfuras keeps it quality and has not to be sold
"""
return item.sell_in, item.quality
|
7ed10720aa7543719383f73923f2f64bf1439021
| 700,231
|
from typing import Counter
def filter_adjoined_candidates(candidates, min_freq):
"""
Funcao que filtra apenas os candidatos proximos que aparecem com certa frequencia
"""
candidates_freq = Counter(candidates)
filtered_candidates = []
for candidate in candidates:
freq = candidates_freq[candidate]
if freq >= min_freq:
filtered_candidates.append(candidate)
return filtered_candidates
|
be9c8618dc9e8efd086cc3c8cc51d5ffdb5d8254
| 700,232
|
def create_tdm_tree():
"""Create tdm tree info"""
tree_info = [
[0, 0, 0, 1, 2],
[0, 1, 0, 3, 4],
[0, 1, 0, 5, 6],
[0, 2, 1, 7, 8],
[0, 2, 1, 9, 10],
[0, 2, 2, 11, 12],
[0, 2, 2, 13, 0],
[0, 3, 3, 14, 15],
[0, 3, 3, 16, 17],
[0, 3, 4, 18, 19],
[0, 3, 4, 20, 21],
[0, 3, 5, 22, 23],
[0, 3, 5, 24, 25],
[12, 3, 6, 0, 0],
[0, 4, 7, 0, 0],
[1, 4, 7, 0, 0],
[2, 4, 8, 0, 0],
[3, 4, 8, 0, 0],
[4, 4, 9, 0, 0],
[5, 4, 9, 0, 0],
[6, 4, 10, 0, 0],
[7, 4, 10, 0, 0],
[8, 4, 11, 0, 0],
[9, 4, 11, 0, 0],
[10, 4, 12, 0, 0],
[11, 4, 12, 0, 0],
]
return tree_info
|
03a44baa724135b07b88f8f5bec4834262498320
| 700,234
|
def conv_F2C(value):
"""Converts degree Fahrenheit to degree Celsius.
Input parameter: scalar or array
"""
value_c = (value-32)*(5/9)
if(hasattr(value_c, 'units')):
value_c.attrs.update(units='degC')
return(value_c)
|
1f3ac169942c51b363a7ae7977890be237b95390
| 700,235
|
import os
import re
def mdfile_in_dir(dire):
"""Judge if there is .md file in the directory
i: input directory
o: return Ture if there is .md file; False if not.
"""
for root, dirs, files in os.walk(dire):
for filename in files:
if re.search('.md$|.markdown$', filename):
return True
return False
|
880cd7a823fa4b42d8492882c48f7c5d70f4dc52
| 700,236
|
def status_in_range(value: int, lower: int = 100,
upper: int = 600) -> bool:
"""
Validates the status code of a HTTP call is within the given boundary,
inclusive.
"""
return value in range(lower, upper+1)
|
43bb6f4824b7e42b6620e3ddff853aa65713f145
| 700,237
|
def nodeOrdering(criteria = "const FieldList<%(Dimension)s, %(DataType)s>&"):
"""Compute the order that a given set of nodes should be stepped through
given a FieldList of things to sort them by.
The FieldList returned is the one to N indexing corresponding to sorting the
input in increasing order."""
return "FieldList<%(Dimension)s, int>"
|
70af2ca0549a03e3e6bf9cfd08b6d831770c4a31
| 700,238
|
def _float_to_str(x):
"""
Converts a float to str making. For most numbers this results in a
decimal representation (for xs:decimal) while for very large or very
small numbers this results in an exponential representation suitable for
xs:float and xs:double.
"""
return "%s" % x
|
cb795b9c4778b9a3fda7398024166d86d8458bd3
| 700,239
|
import argparse
def parse_args():
"""Argparser for command line input"""
parser = argparse.ArgumentParser(
description=
"Utility for turning files into JavaScript-embeddable strings.",
epilog=("Before using the program you have to insert "
"stuff2str(\"/path/to/file\") "
"tag into your INFILE."))
parser.add_argument(
'-o',
'--ow',
dest='ow',
action="store_true",
help="overwrites input file")
parser.add_argument("input", metavar='INFILE', type=str, help="input file")
parser.add_argument(
"output", metavar='OUTFILE', nargs="?", type=str, help="output file")
args = parser.parse_args()
if not args.input:
parser.error("You must specify at least one argument to go.")
if args.ow and args.output is not None:
parser.error(
"Utility takes only one additional argument if -o option is specified."
)
if not args.ow and args.input is not None and args.output is None:
parser.error(
"If you want to overwrite your input file, please specify -o option."
)
if args.ow:
args.output = args.input
return args
|
6a0de612dc43baeb5f4eaefc654166de54a7326b
| 700,240
|
def unique_cluster_indices(cluster_indx):
"""
Return a unique list of cluster indices
:param cluster_indx: Cluster index list of ClusterExpansionSetting
"""
unique_indx = []
for symmgroup in cluster_indx:
for sizegroup in symmgroup:
for cluster in sizegroup:
if cluster is None:
continue
for subcluster in cluster:
for indx in subcluster:
if indx not in unique_indx:
unique_indx.append(indx)
return unique_indx
|
36bc5a287d49c6abbd552b9edc0e72675ba82eca
| 700,241
|
def _calculate_atr(atr_length, highs, lows, closes):
"""Calculate the average true range
atr_length : time period to calculate over
all_highs : list of highs
all_lows : list of lows
all_closes : list of closes
"""
if atr_length < 1:
raise ValueError("Specified atr_length may not be less than 1")
elif atr_length >= len(closes):
raise ValueError("Specified atr_length is larger than the length of the dataset: " + str(len(closes)))
atr = 0
for i in range(len(highs)-atr_length, len(highs)):
high = highs[i]
low = lows[i]
close_prev = closes[i-1]
tr = max(abs(high-low), abs(high-close_prev), abs(low-close_prev))
atr += tr
return atr/atr_length
|
f5878eda22c09fa8c428122bd013f9c6088ea0f8
| 700,242
|
def copy_weights(model, source, target, num_layers, shared=False):
"""Copies the weight values of mixture weights and head from source to target
domain.
Arguments:
model: a tf.keras.Model object.
source: source domain name.
target: target domain name.
shared: whether the model is shared.
num_layers: number of resblock layers in the model.
"""
try:
model.get_layer("shared_out")
except ValueError:
try:
source_out = model.get_layer("%s_out" % source)
target_out = model.get_layer("%s_out" % target)
target_out.set_weights(source_out.get_weights())
print("copied head weights.")
except ValueError:
print("No head to copy.")
if shared:
for idx in range(num_layers):
source_mix = model.get_layer("%s_mix_%d" % (source, idx))
target_mix = model.get_layer("%s_mix_%d" % (target, idx))
target_mix.set_weights(source_mix.get_weights())
print("copied weights from %s_mix_%d to %s_mix_%d" %
(source, idx, target, idx))
return model
|
227ea9a45a3c9af2086ded56ce5f9a6c6780d2c9
| 700,243
|
import subprocess
def bash4(text):
"""
Deprecated: use sh lib instead
But it doesn't work on eclipse, use instead a python command line
"""
# text = ['/bin/bash', '-c'] + text.split(" ")
text = text.split(" ")
pipe = subprocess.Popen(text, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, executable='/bin/bash')
stdout, stderr = pipe.communicate()
return stdout
|
14024bf5d1f353ab58fcd06b7c0c145aa6bf47b5
| 700,244
|
import torch
from typing import Tuple
def get_tot_objf_and_finite_mask(tot_scores: torch.Tensor, reduction: str) -> Tuple[torch.Tensor, torch.Tensor]:
"""Figures out the total score(log-prob) over all successful supervision segments
(i.e. those for which the total score wasn't -infinity).
Args:
tot_scores: a Torch tensor of shape (num_segments,) containing total scores
from forward-backward
reduction: a reduction type ('mean', 'sum' or 'none')
Returns:
Returns a tuple of 2 scalar tensors: (tot_score, finite_mask)
where finite_mask is a tensor containing successful segment mask.
Based on get_tot_objf_and_num_frames
from https://github.com/k2-fsa/snowfall/blob/master/snowfall/objectives/common.py
"""
finite_mask = ~torch.isnan(tot_scores) & torch.ne(tot_scores, -float("inf"))
if reduction == "mean":
tot_scores = tot_scores[finite_mask].mean()
elif reduction == "sum":
tot_scores = tot_scores[finite_mask].sum()
return tot_scores, finite_mask
|
d09b95056c2635be22a7db1baaa64b6667955007
| 700,245
|
def PolyExpansion(n_range, s):
"""Polynomial expansion
"""
basis = [s**n_range]
return basis
|
fc76a3b3c76f7f5e12673b9e19a9ca35c3a3853c
| 700,247
|
import requests
def get_weather(furl):
"""
Get the weather of a city given API call.
:param furl: URL of the API call.
:return: JSON response with weather data.
req = requests.get(furl)
"""
req = requests.get(furl)
return req.json()
|
7aa9c484c368fb61b9b6bf360dffa732e661b037
| 700,248
|
import os
def mimic_path_relativity(path, other, default_dir):
"""If 'other' file is relative, make 'path' relative, otherwise make it
absolute.
"""
if os.path.isabs(other):
return os.path.join(default_dir, path)
if os.path.isabs(path):
return os.path.relpath(path, default_dir)
return path
|
5a8243446db11cf13f8fa8f47bac4f05f032c30c
| 700,249
|
def string_set_intersection(set_a, set_b, ignore_case=True, sep=","):
"""
Return intersection of two coma-separated sets
:type set_a str
:type set_b str
:type ignore_case bool
:type sep str
:rtype set
"""
if set_a is None or set_b is None:
return set()
if ignore_case:
set_a = set_a.lower()
set_b = set_b.lower()
set_a = set(set_a.split(sep))
set_b = set(set_b.split(sep))
return set_a & set_b
|
bd444273c17dc747f6856531c12d0b7280fb5aa3
| 700,250
|
def Main(a, b, c, d):
"""
:param a:
:param b:
:param c:
:param d:
:return:
"""
m = 0
if a > b:
if c > d:
m = 3
else:
if b > c:
return 8
else:
return 10
else:
if c > d:
m = 1
else:
if b < c:
return 11
else:
m = 22
return m
|
d3e12e98bda3109f31e9286b8e95c92bb3e416c0
| 700,251
|
def traverse_file_structure(current, function, **inner_function_args):
"""Recursively traverses the given folder and applies the function to every file that it finds.
:param current: Source folder
:type current: stibnite.file_operations.FolderType
:param function: The function that will be applied to files of the current folder
:type function: function
:param inner_function_args: Arguments of the inner function
:type inner_function_args: dictionary of sting to object
:return: The same source folder
:rtype: stibnite.file_operations.FolderType
"""
if len(current.folders) > 0:
for folder in current.folders.keys():
traverse_file_structure(current.get_element(folder), function, **inner_function_args)
if len(current.files) > 0:
for file in current.files.keys():
current.files[file] = function(current.files[file], **inner_function_args)
return current
|
93312cb2792a27c1441a3794ca261bc67c22171a
| 700,252
|
def extract_text(dom, name, wrapper=None):
"""
Function tries to extract text data from the first tag
with a given name and wrapps it in a give function / class.
"""
elements = dom.getElementsByTagName(name)
if elements:
text = elements[0].lastChild.data
else:
text = ""
return wrapper(text) if wrapper else text
|
19edcf8daad4438fc2096cc19dc7091df46bcdc0
| 700,253
|
import argparse
def get_args():
"""
Get arguments of the program
:return: arguments parsed
"""
parser = argparse.ArgumentParser(
"Split a video in several images"
)
parser.add_argument("--path_video", type=str, help="Path to the input video", default="")
parser.add_argument("--path_images", type=str, help="Folder path where to stored output images", default="./images")
parser.add_argument("--basename", type=str, help="Basename for the output images", default="image")
parser.add_argument("--step", type=int, help="Step in ms between each image to dump (default 500ms)", default=500)
parser.add_argument("--size", type=int, nargs="+", help="None, max_dimension or width height, to which the images must be resized (default None)", default=None)
args = parser.parse_args()
return args
|
59c8e728930e534b95e07d0c03fa9c8932fb31de
| 700,255
|
def dedup_whoopsies(sortedWhoopsies):
""" Take whoopsies sorted first by qid, then
magnitude, then return the worst whoopsie
by query """
mergedWhoopsies = iter(sortedWhoopsies)
whoopsies = []
whoopsie = None
lastQid = -1
try:
while True:
# Read ahead to next query
while whoopsie is None or lastQid == whoopsie.qid:
whoopsie = next(mergedWhoopsies)
whoopsies.append(whoopsie)
lastQid = whoopsie.qid
except StopIteration:
pass
return whoopsies
|
dfac3eda077e544df39539c4ea10d0a0eb7c6b7e
| 700,257
|
def prioritize_nodes(graph, where_conditions):
"""Assign high priority to nodes mentioned in where conditions."""
priorities = {}
for node in graph.keys():
priorities[node] = 1
return priorities
|
60ef01d7bba9b03925a35e9fae602118207a208a
| 700,258
|
def no_of_passwords(k=0):
"""
All are lowercase english alphabet. So for each position we have 26 possibilities.
length_of_passwords = 5
each_position_no_of_possibilities = 26
"""
n = 26
k = 5
return n**k
|
01898ae8eda0234858d2be1ff767a13ddb466a31
| 700,259
|
def env_start():
""" returns numpy array """
global maze, current_position
start_position = [0,3] #maze[0][3] # start [x,y]
current_position = start_position
return start_position
|
a12f2ea46c8ea41923842fd79e9ae91f0441a8ea
| 700,260
|
def rdist(x, y):
"""Reduced Euclidean distance.
Parameters
----------
x: array of shape (embedding_dim,)
y: array of shape (embedding_dim,)
Returns
-------
The squared euclidean distance between x and y
"""
result = 0.0
for i in range(x.shape[0]):
result += (x[i] - y[i]) ** 2
return result
|
3caa871145b8ca68c0cd2bd23a183967b7b5b7cc
| 700,261
|
import glob
def find_file(path):
"""
Search file
Parameters
----------
path : str
Path and pattern to find files.
Returns
-------
str or list of str
List of files.
"""
file_path = glob.glob(path)
if len(file_path) == 0:
raise ValueError("!!! No files found in: {}".format(path))
for i in range(len(file_path)):
file_path[i] = file_path[i].replace("\\", "/")
return sorted(file_path)
|
a5176d5caa5cef6ca2724c79e3f920cfc96aea0c
| 700,262
|
import numpy
def compute_ivectors(gmm_stats, ivector_machine):
"""
Given :py:class:`bob.learn.em.GMMStats` and an T matrix, get the iVectors.
"""
ivectors = []
for g in gmm_stats:
ivectors.append(ivector_machine(g))
return numpy.array(ivectors)
|
26d25ebd1acfd65571aa91a292c05ec70c35fc58
| 700,263
|
import os
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = os.path.abspath(os.path.dirname(__file__))
return [cur_dir]
|
9041e859b786d33257ac3e972c223474261bceca
| 700,264
|
import hashlib
import json
def _hasher(obj):
"""Computes non-cryptographic hash of an object."""
h = hashlib.md5(json.dumps(obj).encode())
return h.hexdigest()
|
14262878ac53af8f49f7fef4c028be36e4370725
| 700,265
|
def binh_korn(x, y): # pylint:disable=invalid-name
"""https://en.wikipedia.org/wiki/Test_functions_for_optimization"""
obj1 = 4 * x ** 2 + 4 * y ** 2
obj2 = (x - 5) ** 2 + (y - 5) ** 2
return -obj1, -obj2
|
b15db03f14a21bbbf974c5465d20717efb27837b
| 700,266
|
def match_comment_type(file_name):
"""
Check the type of a single file and return the correct charachter that needs to be checked for comments
# -> python
// -> Java
# -> Textfile (my preference I recognize this loophole)
"""
if(file_name.endswith(".txt")):
return "#"
elif(file_name.endswith(".py")):
return "#"
elif(file_name.endswith(".java")):
return "/"
elif(file_name.endswith(".s")):
return "@"
else:
return "null"
|
20c4c04e8e656862443ea5ce7584e383d6c72842
| 700,267
|
def getDirName():
""" () -> None
Get the directory name for the repo
"""
try:
file = open('dirname', 'r')
return file.read()
except IOError:
return None
|
ca05bbd8da05dd5f06f95bc457e31df9c9b9e45a
| 700,268
|
def get_plannings(client, page, per_page):
"""
Gets the list of all plannings in the database
:param client: the client to make the request
:param page: the page to be shown
:param per_page: the amount of plannings per page
:return:
"""
pagedetails = dict(
page=page,
page_size=per_page
)
return client.get('/api/plannings/', query_string=pagedetails)
|
2ebfd016e4e32ec819f4bc5319cb980a1d4b2d6a
| 700,270
|
import socket
def get_hostname():
"""Get hostname.
"""
return socket.getfqdn(socket.gethostname())
|
42a3ee2304e73c6858553c7fe00edd49c0747826
| 700,271
|
def length_str(msec: float) -> str:
"""
Convert a number of milliseconds into a human-readable representation of
the length of a track.
"""
seconds = (msec or 0)/1000
remainder_seconds = seconds % 60
minutes = (seconds - remainder_seconds) / 60
if minutes >= 60:
remainder_minutes = minutes % 60
hours = (minutes - remainder_minutes) / 60
return '%i:%02d:%02d' % (hours, remainder_minutes, remainder_seconds)
else:
return '%i:%02d' % (minutes, remainder_seconds)
|
7cf6674d68d118c78a2953b3fef873633673bbf0
| 700,272
|
def get_E_E_CG_gen_d_t(E_E_gen_PU_d_t, E_E_TU_aux_d_t):
"""1時間当たりのコージェネレーション設備による発電量 (kWh/h) (2)
Args:
E_E_gen_PU_d_t(ndarray): 1時間当たりの発電ユニットの発電量 (kWh/h)
E_E_TU_aux_d_t(ndarray): 1時間当たりのタンクユニットの補機消費電力量 (kWh/h)
Returns:
ndarray: 1時間当たりのコージェネレーション設備による発電量 (kWh/h)
"""
return E_E_gen_PU_d_t - E_E_TU_aux_d_t
|
46870e6ca7739d34027fa8ac3c2f57b2f27e9a6c
| 700,274
|
def row_sum(lst):
""" Sum of non-missing items in `lst` """
return sum(int(x) for x in lst if x > -1)
|
5fabe4d3487e502dcb82dd452854de3777e1a5a8
| 700,276
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.