content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def _read_indices_names(f, n):
"""Read indices or names."""
if n > 0:
line = f.next(comments="#").strip()
data = [int(x) for x in line.split()]
out = data
return out[:n]
elif n < 0:
line = f.next(comments="#").strip()
out = [line[:20].strip()]
while True:
line = f.next().strip()
if line:
out.append(line[:20].strip())
else:
break
return out
else:
_ = f.next(comments="#")
|
3b05d36685e39ea1f8e3fd4085cfee4d8a803292
| 26,101
|
def find_childs(graph, parent, found_elements):
"""
Recursively iterate through graph to find all list elements
>>> linked_list = {"thirdEntry": "fourthEntry", "firstEntry": "secondEntry", "root": "firstEntry", "secondEntry": "thirdEntry"}
>>> results = []
>>> find_childs(linked_list, "root", results)
['firstEntry', 'secondEntry', 'thirdEntry', 'fourthEntry']
"""
if parent in graph:
found_elements.append(graph[parent])
find_childs(graph, graph[parent], found_elements)
return found_elements
|
bb5e66fd998b0dc9ab08c185a63cca8b3c3937d4
| 26,102
|
import csv
def read_csv(file):
"""
Read a CSV file and return it as a list
"""
with open(file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
return list(reader)
|
31fc6f489cbebdb5e83a05bbb8ea3e09c41df6f1
| 26,104
|
def _fibonacci_memo(n, T):
"""Fibonacci series by top-down memoization.
Time complexity: O(n).
Space complexity: O(n).
"""
if T[n]:
return T[n]
if n <= 1:
T[n] = n
else:
T[n] = _fibonacci_memo(n - 1, T) + _fibonacci_memo(n - 2, T)
return T[n]
|
50b88135dd22ca62ee9468f7e20dd7601059b650
| 26,106
|
def clean_no_brac(ustring):
"""清理字符串内容,删除其中的括号及内部内容"""
seps = []
left=False
for i, char in enumerate(ustring):
if char == u'(':
left = True
continue
if char == u')':
left = False
continue
if left:
continue
seps.append(char)
return u''.join(seps)
|
59c562326719c757d5f3a84d26dea7fe0c61794e
| 26,109
|
def isurl(value):
""" Determine if the parsed string is a url """
url = False
if isinstance(value, str):
if value[:4] == 'http' or value[:3] == 'www':
url = True
return url
|
58ef164aca40c3b6dd379ea20b798759dfd5e0e0
| 26,110
|
import threading
def unserialize_event(is_set: threading.Event) -> threading.Event:
"""
Set the internal flag to true. All threads waiting for it to become true are awakened.
Return a threading event set to True
:param is_set: threading event
:return: return a threading event set to true.
>>> event_ = threading.Event()
>>> u_event = unserialize_event(event_)
>>> assert isinstance(u_event, threading.Event)
>>> event_.set()
>>> event_.isSet()
True
>>> u_event = unserialize_event(event_)
>>> u_event.isSet()
True
>>> event_.clear()
>>> u_event = unserialize_event(event_)
>>> u_event.isSet()
True
"""
assert isinstance(is_set, threading.Event), \
print("Positional argument <is_set> is type %s , expecting threading.Event." % type(is_set))
event_ = threading.Event()
if is_set:
event_.set()
return event_
|
328a383c82aa9ef8e3ee8c804de1aafbe4d16215
| 26,111
|
def ueGas(ub, umf, emf, delta, fw):
"""
This function calculates the velocity of the gas in the emulsion phase with
K/L eqs. 6.39-6.40.
Parameters
----------
ub : float
Bubble rise velocity [m/s]
umf : float
Minimum fluidization velocity [m/s]
emf : float
Void fraction at minimum fluidization [-]
delta : float
Fraction of bed volume in bubbles [-]
fw : float
Ratio of wake volume to bubble volume [-]
Returns
-------
ue : float
Emulsion gas velocity [m/s]
"""
ue = umf / emf - fw * delta * ub / (1 - delta - fw * delta)
return ue
|
c3756c353a7f317cf249db74f8e8f32503b00ea6
| 26,113
|
def input_bias_weighting(bias_strat):
"""
Receives input from experimenter to define the weighting of the bias for biased
strategies b and c. It also computes the respective weighting for the opposite side,
thus returning a list with two values. The first value is the bias towards the side
of interest.
This function is embedded in input_comp_str()
Parameters
----------
bias_strat : STR
Meant to take the variable strategy when 'b' or 'c' have been entered.
Raises error when this argument is not 'b' or 'c'
Returns
-------
bias_weight : LIST with two FLOATs
The first value is the bias towards the side of interest, the second is the remaining
probability for the opposite side
Raises
-------
ValueError when bias_strat is not 'b' or 'c'
"""
if bias_strat == 'b':
txt = 'heads'
elif bias_strat == 'c':
txt = 'tails'
else:
error_msg = "bias_strat entered was {}, but input_bias_weighting() only takes 'b' or 'c' as strategies"
raise ValueError(error_msg.format(bias_strat))
#Ask experimenter for input
bias_weight = input("Enter the bias towards {} as a proportion of 1.0, (e.g. 0.7 means {} will be selected, on average, 70% of the trials): ".format(txt, txt))
#The following loops make sure that the experimenter inputs digits that are <= 1.0
while True:
try:
bias_weight = float(bias_weight)
break
except:
bias_weight = input("Please enter the value in digits (characters are not allowed): ")
while True:
if 0 <= bias_weight <= 1:
break
else:
bias_weight = input("Wrong value. Please enter a number between 0 and 1: ")
while True:
try:
bias_weight = float(bias_weight)
break
except:
bias_weight = input("Please enter the value in digits (characters are not allowed): ")
bias_weight = [bias_weight, 1 - bias_weight]
return bias_weight
|
154caa1146df9f1b43bf404f0fb51e591f9de737
| 26,114
|
def track_objs(frame,frame_id,objects):
"""Perfrom tracking on every object in the frame
Parameters
----------
frame : numpy array
The input image to track the objects within
frame_id : int
The order of the frame in the video
objects : list
list of the current traffic object under tracking
to check where they moved in the frame.
Returns
-------
list
a list of the traffic object classes with updated
postions according to the tracking result.
"""
# return new object
for obj in objects:
obj.track(frame,frame_id)
return objects
|
1b65e8b50efbcd6d31b81208cf7da0f84b588bba
| 26,115
|
def delete_invalid_values(dct):
""" Deletes entries that are dictionaries or sets """
for k, v in list(dct.items()):
if isinstance(v, dict) or isinstance(v, set):
del dct[k]
return dct
|
eeadaeea809e7e38fcac7837981c3710b26817ae
| 26,116
|
def get_unique(list_):
"""Returnerar en lista där varje värde bara förekommer
en gång.
"""
return list(set(list_))
|
b1380522fa407157d03b4d5b87daed73eed70b8d
| 26,117
|
def parse_sacct_output(string):
"""Parses the output of slurm sacct command into a dict
The sacct command needs to be printed with -P flag enabled
"""
delim = '|'
result = {}
string = string.split("\n")
if len(string) <= 1:
return result
header = string.pop(0).split(delim)
for line in string:
if len(line) == 0:
continue
split_line = line.split(delim)
data = {header[i] : str(split_line[i]) for i in range(len(header))}
result[str(split_line[1])] = data
return result
|
b58faf56700672f8cc5fe601639e5f587f77c40c
| 26,118
|
def if_file_is_amis_json(file_path):
"""
判断文件是否是amis文件:根据文件路径 和后缀
:param file_path:
:return:
"""
if file_path[-4:]!='json':
return False
if 'amis_json' not in file_path:
return False
if ('static' not in file_path) and ('templates' not in file_path):
return False
return True
|
6c6e1cc9871f0a5dd37b0f056af94dce7927d7dc
| 26,120
|
import glob
import os
def get_files_in_dir(path):
"""
Get all file names in specified path. (require python 3.5*).
:param path:
:return:
"""
files = [os.path.basename(f) for f in glob.glob(path + "*.pdf")]
return files
|
a12c2643df06b9e7b0fb18f79b8e36f098af5452
| 26,122
|
def part_two():
"""Part two"""
step = 386
rounds = 50000000
length = 0
zero_position = current_position = 0
target = 0
for i in range(rounds):
length += 1
current_position = ((current_position + step) % length) + 1
if current_position == zero_position:
zero_position += 1
elif current_position == (zero_position + 1) % length:
target = i + 1
return target
|
21fe6f83731dce698ba8f93dba7a68834b20c60a
| 26,123
|
def ingestion_manifest_directory(
populated_manifest_dir, populated_nested_manifest_dir, request
):
"""
Allows for parameterization of manifests to ingest by returning
the corresponding fixture
"""
return {
"populated_manifest_dir": populated_manifest_dir,
"populated_nested_manifest_dir": populated_nested_manifest_dir,
}[request.param]
|
2dc15890a38ff4df0f59b22bb581eb5e7fdd3695
| 26,124
|
def get_ranking10():
"""
Return the ranking with ID 10.
"""
return [
("a2", 0.677366),
("a5", 0.675493),
("a3", 0.658395),
("a6", 0.652317),
("a4", 0.622630),
("a1", 0.456501),
]
|
18a3a757182cdce310ba1d1dc2004d2a23cb5962
| 26,125
|
def make_function_listener(events):
"""
Return a simple non-method extension point listener.
The listener appends events to the ``events`` list.
"""
def listener(registry, event):
events.append(event)
return listener
|
b385d25eb98ee50f90d428b59cacb356144b2bb1
| 26,126
|
def check_connect_4(board: list, x: int, y: int) -> int:
"""
@param board: the board data.
@param x: column index.
@param y: row index.
"""
rows = len(board)
cols = len(board[0])
tile = board[y][x]
i, j, count = y-1, y+1, 1
while i >= 0 or j < rows:
count += 1 if i >= 0 and board[i][x] == tile else 0
count += 1 if j < rows and board[j][x] == tile else 0
i -= 1
j += 1
if count >= 4:
return count
i, j, count = x-1, x+1, 1
while i >= 0 or j < cols:
count += 1 if i >= 0 and board[y][i] == tile else 0
count += 1 if j < cols and board[y][j] == tile else 0
i -= 1
j += 1
if count >= 4:
return count
i, j, count = x-1, y-1, 1
while i >= 0 and j >= 0:
count += 1 if board[j][i] == tile else 0
count += 1 if board[j][i] == tile else 0
i -= 1
j -= 1
i, j = x+1, y+1
while i < cols and j < rows:
count += 1 if board[j][i] == tile else 0
count += 1 if board[j][i] == tile else 0
i += 1
j += 1
if count >= 4:
return count
return 0
|
cbed46c7790af0684f6f283db2f2c84b5e3f6a34
| 26,128
|
import os
def path_exists(path):
"""
Tests if a path exists
:param path: Path to test existence of
:return: True if path exists or false if it does not
"""
return os.path.exists(path)
|
36bb56a317b73ab048345f2f0d5885b47a7a22cb
| 26,129
|
def generate_allocation_piechart(risk_prof):
"""
Queries for the ticker names and weights based on the
user's risk profile and the profile's allocation.
Returns a dictionary of dictionaries with "Stocks" and
"Bonds" as the key with a value of ticker name as key
and weight as value.
"""
chart_ticker_data = {}
stock_data = {}
bond_data = {}
# Risk_prof.allocation is list of
# <Risk Profile ID=2 Ticker ID=1 Ticker Weight=25>.
for prof_ticker in risk_prof.allocation:
# Ticker_description is using the ticker relationship
# to get the object <Ticker ID=6 Symbol=VWO
# Name=Vanguard FTSE Emerging Markets ETF (VWO)>.
ticker_description = prof_ticker.ticker
ticker_name = ticker_description.name
ticker_weight = prof_ticker.ticker_weight_percent
# Creates a stocks and bonds dictionary within the
# chart_ticker_data dictionary.
if ticker_description.category == "Stocks":
stock_data[ticker_name] = ticker_weight
chart_ticker_data["Stocks"] = stock_data
else:
bond_data[ticker_name] = ticker_weight
chart_ticker_data["Bonds"] = bond_data
return chart_ticker_data
|
bbbb5b9dff82683fee508e5ac46fbd7e8971f426
| 26,131
|
import requests
def get_target_chemblid(target_upid):
"""Get ChEMBL ID from UniProt upid
Parameters
----------
target_upid : str
Returns
-------
target_chembl_id : str
"""
url = 'https://www.ebi.ac.uk/chembl/api/data/target.json'
params = {'target_components__accession': target_upid}
r = requests.get(url, params=params)
r.raise_for_status()
js = r.json()
target_chemblid = js['targets'][0]['target_chembl_id']
return target_chemblid
|
5f1283ea1814e9c4be5c57af98e10c6cf238414e
| 26,132
|
import sys
import os
import shlex
def generate_scons_cache_expansions():
"""Generate scons cache expansions from some files and environment variables."""
expansions = {}
if sys.platform.startswith("win"):
system_id_path = r"c:\mongodb-build-system-id"
default_cache_path_base = r"z:\data\scons-cache"
else:
system_id_path = "/etc/mongodb-build-system-id"
default_cache_path_base = "/data/scons-cache"
if os.path.isfile(system_id_path):
with open(system_id_path, "r") as fh:
default_cache_path = os.path.join(default_cache_path_base, fh.readline().strip())
expansions["scons_cache_path"] = default_cache_path
scons_cache_mode = os.getenv("SCONS_CACHE_MODE")
if scons_cache_mode in (None, ""):
scons_cache_mode = "nolinked"
if os.getenv("USE_SCONS_CACHE") not in (None, False, "false", ""):
expansions[
"scons_cache_args"] = "--cache={0} --cache-signature-mode=validate --cache-dir={1}".format(
scons_cache_mode, shlex.quote(default_cache_path))
return expansions
|
915694f432263470b7e5bbbb76f25e62954a4f08
| 26,133
|
def load_model(Model, params, checkpoint_path=''):
""" loads a model from a checkpoint or from scratch if checkpoint_path='' """
if checkpoint_path == '':
print('-> model from scratch!')
model = Model(params['model_params'], **params['data_params'])
else:
print(f'-> Loading model checkpoint: {checkpoint_path}')
model = Model.load_from_checkpoint(checkpoint_path)
return model
|
aacccd0adeb06cf1034f9503b5258bf556554911
| 26,134
|
def lcore_core_ids(lcore_mask_host):
""" Convert CPU ID mask from argument 'lcore_mask' to a list of CPU IDs """
lcore_cores = []
binary_mask = bin(int(lcore_mask_host, 16))[2:]
for i, val in enumerate(binary_mask[::-1]):
if val == "1":
lcore_cores.append(i)
return lcore_cores
|
fe16d49b04a4a08fe6c9a0aacca91f5191738aca
| 26,135
|
def nd_normalize(x, mu, sigma):
"""Re-center and scale `x` given the mean (mu) and
standard deviation (sigma) of the distribution of `x`.
:param x:
:param mu:
:param sigma:
"""
return (x - mu) / sigma
|
8e664b1bbd976a353bb10cddd22d040bb7c03672
| 26,136
|
def get_item_by_id(test_item, item_id):
"""Return the requested test item by its identifier.
Goes over the test item's sub tests recursively and returns
the one that matches the requested identifier.
The search algorithm assumes that the identifiers assignment was the
default one (use of default indexer), which identifies the tests in a DFS
recursion.
The algorithm continues the search in the sub-branch which root has the
highest identifier that is still smaller or equal to the searched
identifier.
Args:
test_item (object): test instance object.
item_id (number): requested test identifier.
Returns:
TestCase / TestSuite. test item object.
"""
if test_item.identifier == item_id:
return test_item
if test_item.IS_COMPLEX:
sub_test = max([sub_test for sub_test in test_item
if sub_test.identifier <= item_id],
key=lambda test: test.identifier)
return get_item_by_id(sub_test, item_id)
|
6c27b735cae3ae2f70650a14e9a8483fef7bd05d
| 26,137
|
import re
def remove_nterm_mod(sequence):
"""Remove the nterminal modification.
Meant to be used for "ac" modifications in front of the sequence.
They are not currently supported and need to be removed.
:param sequence: str, peptide sequence
:return:
"""
return re.sub(r'^([a-z]+)([A-Z])', r'\2', sequence, flags=re.MULTILINE)
|
4fc50af78a3b9a2843fae6c217e7b5412eed8a0c
| 26,138
|
import base64
def _encode(data):
"""Safely encode data for consumption of the gateway."""
return base64.b64encode(data).decode("ascii")
|
1b0403a46f65ac90e536c5fd700dfa26fb54865e
| 26,139
|
import typing
def dcpr_request_create_auth(
context: typing.Dict, data_dict: typing.Optional[typing.Dict] = None
) -> typing.Dict:
"""Authorize DCPR request creation.
Creation of DCPR requests is reserved for logged in users that have been granted
membership of an organization.
NOTE: The implementation does not need to check if the user is logged in because
CKAN already does that for us, as per:
https://docs.ckan.org/en/2.9/extensions/plugin-interfaces.html#ckan.plugins.interfaces.IAuthFunctions
"""
db_user = context["auth_user_obj"]
member_of_orgs = len(db_user.get_groups()) > 0
result = {"success": member_of_orgs}
return result
|
840df6cceec69efdf013964107c4b050a0319a67
| 26,141
|
def getNode(rootNode, nameList):
"""Get node from tree. nameList identifies the node."""
currentNode = rootNode
#print 'nameList'
#print nameList
for name in nameList:
#print name
currentNode = currentNode.getChild(name)
return currentNode
|
1a559b7e00b4f680ea31c9c78a7c274af67474e5
| 26,142
|
def __make_threephase_node_name(node_name_prefix, phase):
"""
Returns a node name with "phase" sufix and trailing whitespaces (6
characters).
"""
new_name = node_name_prefix + phase
whitespaces = 6 - len(new_name)
new_name = new_name + (" " * whitespaces)
return new_name
|
580c838dace828c4fe2d35d148ddf2da6aa57a0a
| 26,143
|
import numpy as np
def calcStationEpoch(data,epoch):
"""
Calculate epoch differences
"""
### Import modules
diff = np.empty((len(data)))
for i in range(len(data)):
old = np.nanmean(data[i,:epoch])
new = np.nanmean(data[i,-epoch:])
diff[i] = new - old
return diff
|
5fe5aba269732e403f46cc3009f7341565655878
| 26,145
|
def _compare_trigrams(trig1: set, trig2: set) -> float:
"""
Checks how many trigrams from the first set are present in the second and
returns that value divided by the length of the second set.
"""
count = 0
for i in trig1:
if i in trig2:
count += 1
return count / len(trig2)
|
f6887c1288abb22ca3caac444f20bbc4e51d4541
| 26,146
|
def get_epoch_from_header(sig_header: str)-> str:
"""Extracts epoch timestamp from the X-Telnyx-Signature header value"""
sig_key_value = dict(param.split("=", 1) for param in sig_header.split(","))
epoch = sig_key_value["t"]
return epoch
|
1829807121802d149547ad90c4427a8550e46d85
| 26,147
|
def test_agent(env, agent, num_rollouts=20):
"""
This function runs `num_rollouts` using the current agent's policy.
:param env: environment to test the agent in (gym.Env)
:param agent: Agent to predict actions (DQNAgent)
:param num_rollouts: number of episodes to play (int)
:return: average_reward: average reward from all the roll-outs
"""
total_reward = 0.0
for i in range(num_rollouts):
state = env.reset()
game_reward = 0.0
while True:
action, _ = agent([state])
state, reward, done, _ = env.step(action)
total_reward += reward
game_reward += reward
if done:
# print("dqn-game ", i, "reward: ", game_reward)
break
return 1.0 * total_reward / num_rollouts
|
ba2f65d6220ac225df5189c1d10d55dbe9d608b6
| 26,148
|
import pandas
def dataframe_equals(expected: pandas.DataFrame, actual: pandas.DataFrame) -> bool:
"""DataFrames can't be simply compared for equality so we need a custom matcher."""
if not actual.equals(expected):
print(f'Dataframe mismatch: {expected} vs {actual}')
return False
return True
|
8710a84de82674f754147fdb5ee6907df44ce2be
| 26,149
|
import yaml
def get_dependency_graph(path="configs/dependency-graph.yaml"):
"""Load dependency_graph file
Parameters
----------
path : str, optional
dependency-graph.yaml path, by default 'configs/dependency-graph.yaml'
Returns
-------
dict
variables from dependency-graph.yaml
"""
config = yaml.load(open(path), Loader=yaml.Loader)
return config
|
e1bac88e50e0d7bdf24e2a64d7ae8d2e9d2bd830
| 26,151
|
import sys
def _get_libs():
""" Ontain the list of libraries to link against based on platform.
Returns:
list: List of libraries.
"""
libs = ['ingenialink', 'sercomm', 'xml2']
if sys.platform.startswith('linux'):
libs.extend(['udev', 'rt', 'pthread'])
elif sys.platform == 'darwin':
libs.extend(['pthread'])
elif sys.platform == 'win32':
libs.extend(['user32', 'setupapi', 'advapi32', 'wpcap', 'ws2_32', 'winmm', 'gdi32', 'dxguid'])
return libs
|
eacdc52ccbfa16dc5fd64384453c718b58b25038
| 26,152
|
def stats_to_list(stats_dict):
"""
Parse the output of ``SESConnection.get_send_statistics()`` in to an ordered
list of 15-minute summaries.
"""
result = stats_dict['GetSendStatisticsResponse']['GetSendStatisticsResult']
datapoints = [dp for dp in result['SendDataPoints']]
datapoints.sort(key=lambda x: x['Timestamp'])
return datapoints
|
9a0b1709eedec66edca722d60578a2ba2385cc9d
| 26,153
|
import numpy
def nextpow2(x):
"""Return the first integer N such that 2**N >= abs(x)"""
return int(numpy.ceil(numpy.log2(numpy.abs(x))))
|
34afe44ad72e62e67b873c5c1c538abfd78d058f
| 26,155
|
def indexToSymbol(index):
"""
returns the nucleotide symbol for an index in the alphabetic order
"""
if index == 0: return 'A'
elif index == 1: return 'C'
elif index == 2: return 'G'
elif index == 3: return 'T'
else:
print('number should be 0 to 3, it is ' + str(index))
return 'X'
|
80f693372919abf3090d54c99d394b05afc5351e
| 26,156
|
def to_byte_string(value, count=2, signed=False, byteorder='little'):
"""Take bytes and return string of integers.
Example: to_byte_string(123456, count=4) = '64 226 1 0'
"""
byte_value = value.to_bytes(count, byteorder=byteorder, signed=signed)
return ' '.join([str(x) for x in byte_value])
|
d1b2cc12000958a3858f73271b300ebe15480a43
| 26,157
|
def search_error_for_adams(adams_results: list, adams_results_less: list, m: int) -> list:
"""
Function for calculating errors for Adams method
:param adams_results: results from this method
:param adams_results_less: results from this method with divided step
:param m: value for the fault
:return: list with errors
"""
errors = []
for index in range(len(adams_results)):
error = (adams_results[index][1] - adams_results_less[index * 2][1]) / (2 ** m - 1)
errors.append(error)
return errors
|
d63cffa19c840ca8bf3fe3d7b236e999b214317c
| 26,158
|
def count_increases(depths: list):
"""
Count the number of times a value is greater than the previous value.
:param depths: list of depths
:return: number of times depth increased
:rtype: int
"""
increases = 0
previous = None
for depth in depths:
depth = int(depth)
if previous and depth > previous:
increases += 1
previous = depth
return increases
|
43dc82d403747aaa36f2e0bd794e1d3f840bc5da
| 26,159
|
def get_ha1_file_htdigest(filename):
"""Returns a get_ha1 function which obtains a HA1 password hash from a
flat file with lines of the same format as that produced by the Apache
htdigest utility. For example, for realm 'wonderland', username 'alice',
and password '4x5istwelve', the htdigest line would be::
alice:wonderland:3238cdfe91a8b2ed8e39646921a02d4c
If you want to use an Apache htdigest file as the credentials store,
then use get_ha1_file_htdigest(my_htdigest_file) as the value for the
get_ha1 argument to digest_auth(). It is recommended that the filename
argument be an absolute path, to avoid problems.
"""
def get_ha1(realm, username):
result = None
f = open(filename, 'r')
for line in f:
u, r, ha1 = line.rstrip().split(':')
if u == username and r == realm:
result = ha1
break
f.close()
return result
return get_ha1
|
bd79458b89556b570338f3365d0a4ee6582f8aa0
| 26,160
|
def daysInMonth(month: int, year: int) -> int:
"""
return number of days in the month
"""
def leapYear(year) -> bool:
"""
returns True if leap year
"""
if year % 4 != 0:
return False
elif year % 100 != 0:
return True
elif year % 400 != 0:
return False
else:
return True
days = {
1: 31,
2: 29 if leapYear(year) else 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31
}
return days[month]
|
b4004dbddc23abdf1840683acd6d77c7c6bfc0d5
| 26,161
|
import six
def ExtractSnapshotTtlDuration(args):
"""Extract the Duration string for the Snapshot ttl.
Args:
args: The command line arguments.
Returns:
A duration string for the snapshot ttl.
"""
return six.text_type(args.snapshot_ttl) + 's'
|
0816e2e24ccdeceeeb3154c55288dc6733c042ff
| 26,163
|
def shuffle(n_cards, actions):
""" Return (a, b) such that for any card,
shuffled_position = (a * initial_position + b) % n_cards """
a, b = 1, 0
for action in actions:
if action.startswith("deal with increment"):
n = int(action.split()[-1])
a *= n
b *= n
elif action == "deal into new stack":
a *= -1
b = -b - 1
elif action.startswith("cut"):
n = int(action.split()[-1])
b -= n
else:
raise ValueError("Unknown action:", action)
# Simplify a and b
a %= n_cards
b %= n_cards
return a, b
|
2e3e159ac44b32742015914360d0d6774fc4fddb
| 26,164
|
def init_parser(parser):
"""
Generate stoage options from args
:param parser:
:return:
"""
parser.add_argument('--taxi-trips-path', required=True, help="Taxi trips input path.")
parser.add_argument('--taxi-zones-path', required=True, help="Taxi zones input path.")
parser.add_argument('--output-path', required=True, help="Output input path.")
parser.add_argument('--filter-pickup', required=False, help="Filter by pickup date. Available formats :"
"[YYYY, YYYY-MM, YYYY-MM-DD, YYYY-MM-DD:YYYY-MM-DD]")
parser.add_argument('--filter-dropoff', required=False, help="Filter by dropoff date. Available formats :"
"[YYYY, YYYY-MM, YYYY-MM-DD, YYYY-MM-DD:YYYY-MM-DD]")
parser.add_argument('--azure-tenant-id', required=False, help="Azure Tenant id", default="")
parser.add_argument('--azure-client-id', required=False, help="Azure Client id", default="")
parser.add_argument('--azure-client-secret', required=False, help="Azure Client Secret", default="")
parser.add_argument('--azure-storage-account-name', required=False, help="Azure Storage Account Name", default="")
return parser
|
39fe1fc4270f5dc73d5d84347ee8084eac3106bd
| 26,165
|
from datetime import datetime
import os
def write_file_header(filep, comment_start='#', name=None, length=50):
"""Write header to text files.
Args:
filep: File pointer for opened output text file
comment_start: Characters used to denote the start of a comment
name: Optional string with text description of data set (default None)
length (int): Number of dashes in header lines
"""
today = datetime.now()
user = os.getlogin()
print(comment_start, length*('-'), file=filep)
if name is not None:
print(comment_start, name, file=filep)
print(comment_start, today.strftime("%d. %b %Y %H:%M:%S"), 'by user', user, file=filep)
print(comment_start, length*('-'), '\n', file=filep)
return None
|
d4f51fb0b13dca3da2bd9b5e9595571ccaf9b85a
| 26,166
|
import socket
import struct
def int2ip(addr: int) -> str:
"""convert an IP int to a string
"""
return socket.inet_ntoa(struct.pack("!I", addr))
|
57de2a840a1f1b25ce9de835d29aa88f9955f8ed
| 26,167
|
import operator
def single_valued(iterable, equality_pred=operator.eq):
"""Return the first entry of *iterable*; Assert that other entries
are the same with the first entry of *iterable*.
"""
it = iter(iterable)
try:
first_item = next(it)
except StopIteration:
raise ValueError("empty iterable passed to 'single_valued()'")
def others_same():
for other_item in it:
if not equality_pred(other_item, first_item):
return False
return True
assert others_same()
return first_item
|
001247512cb8a6e84df00a987f26925eb0b38925
| 26,168
|
def phase_LogLinear(phase, slope=0.04):
"""A logLinear phase function, roughly appropriate for cometary nuclei.
An H-G phase function is likely a better approximation.
Parameters
----------
phase : float or array
Phase angle (degrees)
slope : float, optional
The slope for the phase function. Default 0.04.
Returns
-------
phi : float or array
Phase function evaluated at phase
"""
return 10**(-0.4 * slope * phase)
|
1c5cdbf4a41387244d38a0fde368af3ecf224f52
| 26,169
|
from itertools import permutations
def blute(N, M):
"""
>>> blute(2, 2)
2
>>> blute(2, 3)
18
"""
nums = range(M)
count = 0
for a in permutations(nums, N):
for b in permutations(nums, N):
if all(a[i] != b[i] for i in range(N)):
count += 1
return count
|
d33a412a1bc9f8ccb0ef7451e8a59845bb3feb67
| 26,170
|
def get_jinja_variables(pipeline):
"""Gets a dictionary of variables from a SpinnakerPipeline that can be exposed to Jinja templates"""
variables = dict()
# Deprecated variables: Use the app block instead
variables["trigger_job"] = pipeline.trigger_job
variables["group_name"] = pipeline.group_name
variables["app_name"] = pipeline.app_name
variables["repo_name"] = pipeline.repo_name
# Deprecated end
email = pipeline.settings['pipeline']['notifications']['email']
slack = pipeline.settings['pipeline']['notifications']['slack']
deploy_type = pipeline.settings['pipeline']['type']
# Replaces top level variables above which are deprecated
# app block matches non-manual pipeline types like ec2, lambda, etc for consistency
variables["data"] = {
'app': {
'appname': pipeline.app_name,
'group_name': pipeline.group_name,
'repo_name': pipeline.repo_name,
'deploy_type': deploy_type,
'environment': 'packaging',
'triggerjob': pipeline.trigger_job,
'email': email,
'slack': slack,
'pipeline': pipeline.settings['pipeline']
}
}
return variables
|
9c84a42ec1b65e443f5a7fd9a8b8b142aa6b7b7b
| 26,171
|
def _is_section(tag):
"""Check if `tag` is a sphinx section (linkeable header)."""
return (
tag.tag == 'div' and
'section' in tag.attributes.get('class', [])
)
|
8605d8d95e9344c80e91dd1735bec79072507f7b
| 26,172
|
def is_none(string):
"""
Usage::
{{ string|is_none}}
"""
return '---' if string is None else string
|
5192f40c1936db7a215c46411789d336b6212919
| 26,173
|
def event_asn_org(event):
"""Get asn org from event."""
asn_org = event.get("details", {}).get("asn_org")
if not asn_org:
return "UNKOWN"
return asn_org
|
61ad3a5ee61a9ce0a088724c199fe0c432ec900b
| 26,174
|
import platform
def is_windows() -> bool:
"""is the script runnning on a windows system?"""
return platform.system() == 'Windows'
|
a8469c16b4942ec07b8e5c6072327d7eac66ed79
| 26,175
|
from typing import Callable
def exp_decay_with_warmup(warmup: int, gamma: float, min_val: float = 1e-8) -> Callable:
"""Returns exponential decay with warmup function.
The function increases linearly from ``min_val`` to 1.0 until ``step`` is equal
to warmup. For a ``step`` larger than ``warmup``, the function decays with a
given ``gamma`` (last_val * gamma).
Parameters
----------
warmup
The number of steps until which the function increases linearly.
gamma
The parameter of decay in (0, 1). Large numbers for slow decay.
min_val
The minimum lr factor that is used for the 0-th step, a small number > 0.
Returns
----------
A function taking the current step as single argument.
"""
def f(x):
return min_val + x * (1.0 - min_val) / warmup if x < warmup else gamma ** (x - warmup)
return f
|
b26488aab2844521c4fdc9377bb6c90454531984
| 26,176
|
def name_matches(name,pattern):
"""Simple wildcard matching of project and sample names
Matching options are:
- exact match of a single name e.g. pattern 'PJB' matches 'PJB'
- match start of a name using trailing '*' e.g. pattern 'PJ*' matches
'PJB','PJBriggs' etc
- match using multiple patterns by separating with comma e.g. pattern
'PJB,IJD' matches 'PJB' or 'IJD'. Subpatterns can include trailing
'*' character to match more names.
Arguments
name: text to match against pattern
pattern: simple 'glob'-like pattern to match against
Returns
True if name matches pattern; False otherwise.
"""
for subpattern in pattern.split(','):
if not subpattern.endswith('*'):
# Exact match required
if name == subpattern:
return True
else:
if name.startswith(subpattern.rstrip('*')):
return True
else:
return False
|
eb8ceead45cc0766af0aec92ca02b37f387c3311
| 26,177
|
import numpy
def zeroAreas(vertex, triangle_raw, Area_null):
"""
Looks for "zero-areas", areas that are really small, almost zero. It appends
them to Area_null list.
"""
for i in range(len(triangle_raw)):
L0 = vertex[triangle_raw[i,1]] - vertex[triangle_raw[i,0]]
L2 = vertex[triangle_raw[i,0]] - vertex[triangle_raw[i,2]]
normal_aux = numpy.cross(L0,L2)
Area_aux = numpy.linalg.norm(normal_aux)/2
if Area_aux<1e-10:
Area_null.append(i)
return Area_null
|
dc027a1b8e00e3afedba6da693277661c836ade8
| 26,178
|
def _parse_song_number_arg(args: list, default_value: int):
""" Parse command args and get number of songs arg. """
for arg in args:
try:
return int(arg)
except ValueError:
continue
return default_value
|
fc60355c0585fdbf6e14cc0c955b381c93768a85
| 26,179
|
def path_in_cc(path, cc):
"""Determines whether all vertices of a given path belong to a given
connected component.
:param path:
:param cc: list of vertices representing a connected component in a graph
:return: True if the path vertices are found in the connected component,
False otherwise
"""
for node in path:
if node not in cc:
return False
return True
|
9d549234f2a1294380a3e416fea93cdf7944d8b2
| 26,181
|
def filter_to_other_object(row, object_gdf, max_distance):
"""Filter HyDAMO-class-objects within distance to another object-class."""
gdf = object_gdf.loc[
object_gdf["geometry"].centroid.distance(row["geometry"]) < max_distance
]
if not gdf.empty:
gdf = gdf.loc[gdf["branch_id"] == row["branch_id"]]
return gdf.empty
|
949bb6e9d590d91c3c87c827f96ec02bf49c9ffb
| 26,182
|
def suma(a, b):
"""Suma dos valores a y b.
param int a cualquier número entero
param int b cualquier número entero
reuturns la sumatoria de a y b
"""
total = a + b
return total
|
0426b5eb61a45791801758e5d57b0a9b600ade78
| 26,183
|
import urllib.request
import urllib.parse
import re
def search(query , results:int=10) -> list:
"""
Search:
-
Will search `youtube.com` and fetch the number of requested results (default is `10`)
If the number of results are not sufficient then it will return as much as it can find
ie:
if the number of results are `6` but you requested `10` then;
it will return `as many results as possible` or `6` in our case.
Note:
-
- The results is a `list` of (video) links
"""
tmp = list()
format_of_link = r"watch\?v=(\S{11})"
raw_query = query
print()
query = urllib.parse.quote(raw_query)
html = urllib.request.urlopen(f"https://www.youtube.com/results?search_query={query}")
videos = re.findall(format_of_link , html.read().decode())
for video in videos:
tmp.append(f'https://www.youtube.com/watch?v={video}')
while True:
try:
return tmp[:results]
break
except IndexError:
results -= 1
continue
|
8350e9b0fb4b545a5e51df256cb6cd95a8cd355c
| 26,186
|
def find_point(coords, point_list):
"""
Coordinates represent either the source or destination [x,y] coordinates of a line.
Given a list of unique points, map one to the given coordinates.
"""
for p in point_list:
if p["coordinates"] == coords:
return p["idx"]
print("Couldn't find the point -- {}!\n".format(str(point_list)))
|
2d6cb286b630dc49e86532111bba7d0a00f385cc
| 26,187
|
import re
def all_caps(text: str) -> str:
"""annotates all cap characters in a text expression"""
words = re.findall(
r'(\b(?:[A-Z]+[A-Z]*|[A-Z]*[A-Z]+)\b(?:\s+(?:[A-Z]+[A-Z]*|[A-Z]*[A-Z]+)\b)*)', text
)
upper = [word for word in words if len(word) > 1]
if len(upper) == 0:
return text
if len(upper) > 1:
return 'scream ' + text.lower().strip()
return text.replace(upper[0], 'scream ' + upper[0]).lower().strip()
|
1ea4b81a07ce3191d7f059ba1ad599b175348705
| 26,188
|
import argparse
def get_args():
"""
Get command line arguments.
Arguments set the default values of command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--context', '-c', type=str,
default="cudnn", help="Extension path. ex) cpu, cudnn.")
parser.add_argument("--device-id", "-d", type=str, default='0',
help='Device ID the training run on. This is only valid if you specify `-c cudnn`.')
parser.add_argument("--type-config", "-t", type=str, default='float',
help='Type of computation. e.g. "float", "half".')
parser.add_argument("--work-dir", "-w", type=str,
default="tmp.result/")
parser.add_argument("--save-dir", "-s", type=str,
default="params/")
parser.add_argument("--batch-size", "-b", type=int, default=20)
parser.add_argument("--learning-rate", "-l", type=float, default=20)
parser.add_argument("--max-epoch", "-e", type=int, default=40)
parser.add_argument("--monitor-interval", "-m", type=int, default=1000)
parser.add_argument("--num-steps", "-n", type=int, default=35)
parser.add_argument("--state-size", "-S", type=int, default=650)
parser.add_argument("--num-layers", "-a", type=int, default=2)
parser.add_argument("--gradient-clipping-max-norm",
"-g", type=int, default=0.25)
parser.add_argument("--checkpoint", type=str, default=None,
help='path to checkpoint file')
args = parser.parse_args()
return args
|
dcbff9194f17b7dfde06f2d9b99425c2c2f61985
| 26,189
|
def twolens_efl(efl1, efl2, separation):
"""Use thick lens equations to compute the focal length for two elements separated by some distance.
Parameters
----------
efl1 : `float`
EFL of the first lens
efl2 : `float`
EFL of the second lens
separation : `float`
separation of the two lenses
Returns
-------
`float`
focal length of the two lens system
"""
phi1, phi2, t = 1 / efl1, 1 / efl2, separation
phi_tot = phi1 + phi2 - t * phi1 * phi2
return 1 / phi_tot
|
dfcaff2e30f2b6249ec5dc9ec1a4c443fdccd2f5
| 26,191
|
def _match_filter(obj, flt):
"""Match the object *obj* with filter *flt*."""
if isinstance(obj, list):
return [ob for ob in obj if _match_filter(ob, flt)]
if callable(flt):
return flt(obj)
elif not isinstance(flt, dict):
raise TypeError('expecting a callable or a dict')
for fkey, fval in flt.items():
obval = obj.get(fkey)
if obval is None:
return False
elif isinstance(fval, dict):
if not isinstance(obval, dict) or not _match_filter(obval, fval):
return False
elif callable(fval):
return fval(obval)
elif fval != obval:
return False
return True
|
2f9771386293adb616a6d446fbc4e51e2510150d
| 26,194
|
def read_xyz_charge_mult(file):
"""
Reads charge and multiplicity from XYZ files. These parameters should be defined
in the title lines as charge=X and mult=Y (i.e. FILENAME charge=1 mult=1 Eopt -129384.564)
"""
charge_xyz,mult_xyz = None,None
# read charge and mult from xyz files
with open(file, "r") as F:
lines = F.readlines()
for line in lines:
for keyword in line.strip().split():
if keyword.lower().find('charge') > -1:
charge_xyz = int(keyword.split('=')[1])
elif keyword.lower().find('mult') > -1:
mult_xyz = int(keyword.split('=')[1])
elif charge_xyz is not None and mult_xyz is not None:
break
if charge_xyz is None:
charge_xyz = 0
if mult_xyz is None:
mult_xyz = 1
return charge_xyz,mult_xyz
|
9048ff64f5385206ccb8d926776ed5d1ff1c4dc6
| 26,195
|
import base64
def convert_image_file_to_b64_string(filename):
"""Convert a image file to the corresponding b64_string
This function will all the b64encode function from the base64 module and
convert the specified image file to b64_string for saving in data bse and
transmission. The image file could be either JPEG file or PNG file.
Args:
filename (str): The name (path) of image you want to process
Returns:
str : The b64_string that was generated
"""
with open(filename, "rb") as image_file:
b64_bytes = base64.b64encode(image_file.read())
b64_string = str(b64_bytes, encoding="utf-8")
return b64_string
|
3036abf52a38ea7ef3534de4929334a221176fe8
| 26,196
|
import uuid
def validate_uuid_string(uuid_obj, uuid_version=4):
""" Checks whether the provided string is a valid UUID string
:param uuid_obj: A string or stringable object containing the UUID
:param uuid_version: The UUID version to be used
"""
uuid_string = str(uuid_obj).lower()
try:
uuid.UUID(uuid_string, version=uuid_version)
except ValueError:
# If it's a value error, then the string
# is not a valid hex code for a UUID.
return False
return True
|
c83ca62527e05473973d034e03c3eb3b4abec3ad
| 26,197
|
def arrangeByType(service_list, preferred_types):
"""Rearrange service_list in a new list so services are ordered by
types listed in preferred_types. Return the new list."""
def bestMatchingService(service):
"""Return the index of the first matching type, or something
higher if no type matches.
This provides an ordering in which service elements that
contain a type that comes earlier in the preferred types list
come before service elements that come later. If a service
element has more than one type, the most preferred one wins.
"""
for i, t in enumerate(preferred_types):
if preferred_types[i] in service.type_uris:
return i
return len(preferred_types)
# Build a list with the service elements in tuples whose
# comparison will prefer the one with the best matching service
prio_services = sorted((bestMatchingService(s), orig_index, s) for (orig_index, s) in enumerate(service_list))
# Now that the services are sorted by priority, remove the sort
# keys from the list.
for i in range(len(prio_services)):
prio_services[i] = prio_services[i][2]
return prio_services
|
b65999d4fbaa4e6839482c018b1233bbeb301bf1
| 26,199
|
def read_byte(bus, i2caddr, adr):
"""
Read a single byte from the bus
"""
return bus.read_byte_data(i2caddr, adr)
|
2205d26977a92fb6cf6db6226cbf748addb71f5f
| 26,201
|
import subprocess
def checkForBinary(binary):
"""
Look for binary with `which`.
"""
try:
fullPath = subprocess.check_output(['which',binary])
return True
except subprocess.CalledProcessError as e:
return False
|
c987fee513523dd4d33e0afc5e197855e7441eb4
| 26,202
|
def represents_int(s):
"""Returns boolean value if parameter can be cast to int"""
try:
int(s)
return True
except ValueError:
return False
|
356ddc018e5d605e4219cb6356aa8c11437d275b
| 26,204
|
def _store_dir_and_cache(tmpdir_factory):
"""Returns the directory where to build the mock database and
where to cache it.
"""
store = tmpdir_factory.mktemp('mock_store')
cache = tmpdir_factory.mktemp('mock_store_cache')
return store, cache
|
01ef0527e63cb62003c6f0ccb0e63c629c2f1e42
| 26,205
|
import argparse
def parse_args():
"""Commandline argument parser for configuration
Returns: Configuraion object
"""
parser = argparse.ArgumentParser(description='Inference config.')
parser.add_argument('--cfg_path',
type=str,
required=False,
default='',
help='Path to YAML config file.')
parser.add_argument('--file_storage_path',
type=str,
required=False,
default='',
help='FileStorageObserver path.')
return parser.parse_args()
|
ac90f6e045b2f2a85a1e4e9172f6f6b19dcda02d
| 26,206
|
def load_calib_dict(dir_path):
""" """
d = {}
with open(dir_path + "calib.txt") as f:
for line in f:
(key, val) = line.split("=")
d[key] = val.replace("\n","")
return d
|
405b3ad0f9faef1f3398d0077c4788d978ec6e2d
| 26,207
|
def merge_sort(array):
"""Sort your unsorted list with merge sort.
Args:
array (int): Array of ints
"""
if len(array) > 1:
# Divide
mid = len(array)//2
left = array[:mid]
right = array[mid:]
# Sort
merge_sort(left)
merge_sort(right)
# Compare and overwrite
left_index = 0
right_index = 0
array_index = 0
while left_index < len(left) and right_index < len(right):
if left[left_index] < right[right_index]:
array[array_index] = left[left_index]
left_index+=1
else:
array[array_index] = right[right_index]
right_index+=1
array_index+=1
# Copy leftovers from left array
while left_index < len(left):
array[array_index] = left[left_index]
left_index+=1
array_index+=1
# Copy leftovers from right array
while right_index < len(right):
array[array_index] = right[right_index]
right_index+=1
array_index+=1
return None
|
1f9ad31805161aa0e7e2ea2397a36133d84ec380
| 26,208
|
def resize_keypoint(keypoint, in_size, out_size):
"""Change values of keypoint according to paramters for resizing an image.
Args:
keypoint (~numpy.ndarray): Keypoints in the image.
The shape of this array is :math:`(K, 2)`. :math:`K` is the number
of keypoint in the image.
The last dimension is composed of :math:`y` and :math:`x`
coordinates of the keypoints.
in_size (tuple): A tuple of length 2. The height and the width
of the image before resized.
out_size (tuple): A tuple of length 2. The height and the width
of the image after resized.
Returns:
~numpy.ndarray:
Keypoint rescaled according to the given image shapes.
"""
keypoint = keypoint.copy()
y_scale = float(out_size[0]) / in_size[0]
x_scale = float(out_size[1]) / in_size[1]
keypoint[:, 0] = y_scale * keypoint[:, 0]
keypoint[:, 1] = x_scale * keypoint[:, 1]
return keypoint
|
b299a1e2e0031e6ae9111d2261dd98b9d6ce0660
| 26,209
|
def to_bytes(value):
""" returns 8-byte big-endian byte order of provided value """
return value.to_bytes(8, byteorder='big', signed=False)
|
d32fb88c24274d3bdf3fbd8b8c5917e4a0f3bcca
| 26,211
|
def is_timestamp_ms(timestamp):
"""
:param timestamp:
:return:
"""
timestamp = int(timestamp)
timestamp_length = len(str(timestamp))
if timestamp_length != 13:
raise TypeError('timestamp:({}) is not int or len({}) < 13'.format(
type(timestamp), timestamp_length))
return True
|
2399486c2a026f466a6add1de60e283738f94734
| 26,212
|
def abs_fold_change(row, fold_change_column):
"""Add absolute fold change value for two group comparison.
# Arguments:
row: certain row of peak table (pandas dataframe).
fold_change_column: column name in the peak table of the fold change value.
# Returns:
absolute fold change value.
"""
return abs(row[fold_change_column])
|
1189b7411af6c37bede3c601d9325f7a10efa528
| 26,213
|
def without_fixed_prefix(form, prefix_length):
""" Return a new form with ``prefix_length`` chars removed from left """
word, tag, normal_form, score, methods_stack = form
return (word[prefix_length:], tag, normal_form[prefix_length:],
score, methods_stack)
|
b34736da1ce95daeac09a865b6750aea24b52dcd
| 26,214
|
def convert_dms_to_dd(dms):
"""
Convert values expressed in DMS (decimals, minutes, seconds) to decimal degrees.
Parameters
----------
dms: string
DMS value without special symbols as well as hemisphere location
Returns
-------
dd: float
Decimal degree value
"""
# Split DMS into different components
dms_split = dms.split(' ')
degrees = int(dms_split[0])
minutes = int(dms_split[1])
seconds = int(dms_split[2])
hemisphere = dms_split[3]
# Calculate decimal degree value using the DMS
dd = degrees + minutes/60 + seconds/3600
# Calculate the sign of the decimal degree value based on the hemisphere
if hemisphere == 'N' or hemisphere == 'E':
dd = abs(dd)
if hemisphere == 'S' or hemisphere == 'W':
dd = -abs(dd)
return dd
|
4cc84d572ec445ac2ae586f3151edd10f2d3d611
| 26,215
|
def process_small_clause(graph, cycle, cut=True):
"""
Match a cycle if there is a small clause relationship: verb with outgoing edge ARG3/H to a preposition node, the
preposition node has an outgoing edge ARG1/NEQ, and
1) an outgoing edge ARG2/NEQ, or
2) an outgoing edge ARG2/EQ to a noun;
ARG2/NEQ or ARG2/EQ edge is removed if cut is set to True.
:param graph: DmrsGraph object
:param cycle: Set of Node objects in the cycle
:param cut: If True and cycle is matched, the cycle is broken by removing a target edge
:return: True if cycle is matched, otherwise False
"""
verb_nodes = [node for node in cycle if node.pos == 'v']
if len(verb_nodes) == 0:
return False
for verb_node in verb_nodes:
outgoing_edges = [edge for edge in graph.get_outgoing_node_edges(verb_node) if edge.to_node in cycle]
outgoing_labels = dict((edge.label, edge.to_node) for edge in outgoing_edges)
if 'ARG3_H' not in outgoing_labels:
continue
prep_node = outgoing_labels['ARG3_H']
if prep_node.pos != 'p':
continue
prep_outgoing_labels = [edge.label for edge in graph.get_outgoing_node_edges(prep_node) if edge.to_node in cycle]
if 'ARG1_NEQ' not in prep_outgoing_labels:
continue
if 'ARG2_NEQ' in outgoing_labels:
if cut:
arg2_neq_edge = [edge for edge in outgoing_edges if edge.label == 'ARG2_NEQ'][0]
graph.edges.remove(arg2_neq_edge)
return True
if 'ARG2_EQ' in outgoing_labels and outgoing_labels['ARG2_EQ'].pos == 'n':
if cut:
arg2_eq_edge = [edge for edge in outgoing_edges if edge.label == 'ARG2_EQ'][0]
graph.edges.remove(arg2_eq_edge)
return True
return False
|
4cc714838efc47b2de8c35821763249286039299
| 26,216
|
def localhost_url(url, local_hostname):
"""Return a version of the url optimized for local development.
If the url includes the string `localhost`, it will be replaced by
the `local_hostname`.
Parameters
----------
url : str
The url to check
Returns
-------
str : The url, possibly converted to use a different local hostname
"""
return url.replace('localhost', local_hostname)
|
dc2cba2acc89fe4ad7da30da9e6a3a1c16465731
| 26,217
|
def fix_taxon_ids(ids):
"""Fixes list of taxonomy ids by adding [taxid] to each.
Need to add taxid field restriction to each id because NCBI broke taxon
id search around 3/07 and has no plans to fix it.
"""
if isinstance(ids, str):
if not ids.endswith('[taxid]'):
ids += '[taxid]'
transformed_ids = ids
else:
transformed_ids = []
for i in ids:
if not i.endswith('[taxid]'):
i = i.strip() + '[taxid]'
transformed_ids.append(i)
transformed_ids = ' OR '.join(transformed_ids)
return transformed_ids
|
03488dffa5dc19ecfad42591d3aad38fccc2de28
| 26,218
|
def create_window_filebases(wins, filebase):
"""Create list of filebases for each window.
Assumes MWUS simulation conventions for output filenames
"""
win_filebases = []
for win in wins:
postfix = '_win'
for win_min in win[0]:
postfix += '-' + str(win_min)
postfix += '-'
for win_max in win[1]:
postfix += '-' + str(win_max)
win_filebases.append(filebase + postfix)
return win_filebases
|
95734e41cfae2fb5f2d05ca5f0d2d6f1946664b1
| 26,221
|
import math
def euclidean(vector1, vector2):
"""
fungsi untuk menghitung jarak antara 2 vektor dengan rumus euclidean ddistance
:param vector1: vektor 1
:param vector2: vektor
:return:
"""
dist = [(a - b)**2 for a, b in zip(vector1, vector2)]
dist = math.sqrt(sum(dist))
return dist
|
a686aba125a42a4425d8cd4822203d8468102da7
| 26,222
|
def weighted_average(cols):
"""Given tuples of (weight, value),
return weighted average.
>>> weighted_average(((100, 1), (200, 2), (100, 5)))
2.5
"""
return sum(w * v for (w, v) in cols) / sum(w for (w, v) in cols)
|
ef1607dc4e12fc23558dbd642e8fcc6e1692e31f
| 26,224
|
def validate_user_input(user_input):
"""
Validates user input
:param user_input: Input entered by user, in response to primary menu item
:return: (bool) True if user input is expected, False otherwise
"""
if user_input not in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']:
print('\nERROR : Invalid Input!\nEnter from the given options above (ranging 1-10).')
print('Starting Over...')
return False
else:
return True
|
fd1411d23ec09c1132b041e4e407b0999a43999d
| 26,225
|
import json
def get_best_hyperparams(hyperparams_dict, fit_params_dict, best, file_name=None):
"""
Helper function to extract the numerical values of best hyperparameters from hyperopt into a more easily usable format.
:param hyperparams_dict: Dictionary of hyperparameter values
:param fit_params_dict: Dictionary of fit parameter values
:param best: The best hyperparameters as returned by hyperopt
:param file_name: Directory plus name of the file you want to save the best parameters to. File name must end in .json as this is the expected output format
:return: Parameter dictionary. Contains both model hyperparameters and epochs parameter for model fit.
"""
# Extract hyperparameters for the model
best_params = {}
for key, val in best.items():
if key in hyperparams_dict:
input_ = hyperparams_dict[key]
if input_[0] == 'choice':
best_params[key] = input_[1][val]
else:
best_params[key] = val
# The only other parameter I need to get out is the number of epochs to train for.
# I'll put it all into the best_params dictionary, but I'll need to pop it out before defining the model
best_params['num_epochs'] = fit_params_dict['num_epochs'][1][best['num_epochs']]
if file_name is not None:
json_out =json.dumps(best_params)
f = open(file_name, "w")
f.write(json_out)
f.close()
return best_params
|
1bac0d463ba5cf5a69912673d1235794d9a448ff
| 26,226
|
def line_interpolate_y(line, x):
"""Interpolates y values on a line when given x
To avoid duplicate hits for nodes level with the
joints between lines, the end of a line is not
considered an intersection."""
if line[0][0] == x:
return line[0][1]
#if line[1][0] == x:
# return line[1][1]
elif (line[0][0] < x < line[1][0]) and ((line[1][0] - line[0][0]) != 0):
return line[0][1] + (x - line[0][0]) * (line[1][1] - line[0][1]) / (line[1][0] - line[0][0])
else:
return None
|
c696218a55b08c19c515c5560741ccf2b9057253
| 26,227
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.