content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_wind_power(agent):
""" Check if the wind generator is active. If it is, get the power out of it, otherwise return 0"""
wind_power = 0
if agent.wind_generator.is_active():
wind_power = agent.wind_generator.erogate()
return wind_power | 971e70694b53fc7d2189a2eaf102f84ddf04e769 | 37,569 |
async def get_blueprint_params(request, left: int, right: int) -> str:
"""
API Description: Multiply, left * right. This will show in the swagger page (localhost:8000/api/v1/).
"""
res = left * right
return "{left}*{right}={res}".format(left=left, right=right, res=res) | e413cb18df5e40c2ee89fc837a1b820f21eab9f5 | 37,570 |
from datetime import datetime
def get_current_time():
"""
:return: the current time in format %H:%M
"""
return datetime.now().strftime("%H:%M") | 0b46ea3baaaf3b6665ef9b7c948a19e3e39a9f53 | 37,571 |
import mpmath
def cdf(x):
"""
Cumulative distribution function (CDF) of the raised cosine distribution.
The CDF of the raised cosine distribution is
F(x) = (pi + x + sin(x))/(2*pi)
"""
with mpmath.extradps(5):
x = mpmath.mpf(x)
if x <= -mpmath.pi:
return mpmath.mp.zero
if x >= mpmath.pi:
return mpmath.mp.one
return mpmath.mpf('1/2') + (x + mpmath.sin(x))/(2*mpmath.pi) | a1554e207af751fb3fcf85db80ec2c3c760dd551 | 37,572 |
def get_config_doc_section_hdr():
"""
Returns the header dictionary for linking modules in smrf to the
documentation generated by inicheck auto doc functions
"""
hdr_dict = {}
dist_modules = ['air_temp', 'vapor_pressure', 'precip', 'wind', 'albedo',
'thermal','solar','soil_temp']
for d in dist_modules:
if d == 'precip':
sec = 'precipitation'
else:
sec = d
# If distributed module link api
intro = ("The {0} section controls all the available parameters that"
" effect the distribution of the {0} module, espcially the"
" associated models. For more detailed information please see"
" :mod:`smrf.distribute.{0}`").format(sec)
hdr_dict[d] = intro
return hdr_dict | cedfdf00409e12f27f480222d3d6f35ee3a9414f | 37,573 |
def dummyfunc(word: str):
"""I do things"""
print(f'Hello {word}!')
return 0 | f09820deba902afa05eaaac9fdb9b838ba6daa0c | 37,574 |
import base64
import os
def rand_unique_id():
"""Creates a random unique id made of characters safe for urls.
Returns:
A random id as a string.
"""
# For maximum base64 encoding efficiency, the number of bytes should be a
# multiple of 3. 18 bytes should be enough for anyone.
return base64.urlsafe_b64encode(os.urandom(18)) | 089838e76e34850461cd26dbac53c14eb977d647 | 37,576 |
def factorial(n):
"""Return the factorial of n
A factorial is a number multiplied by all the numbers before it until 1
It's written as the number followed by an exclamation mark: 5!
So 5! = 5 * 4 * 3 * 2 * 1 = 120
eg factorial(4) should return:
24
"""
result = 1
for i in range(n):
result = result + (i * result)
return result | d61561f134baced35a6f1bb1c78b5800a7fc0834 | 37,578 |
import sys
import os
def get_network_stats(nodes_list,over_slurm=0,v=0,file_log=sys.stdout):
"""
Obtain network statistics from nodes.
Parameters
----------
nodes_list : list of str
nodes names.
over_slurm : int
used to select network interface.
v : int
verbose if 1.
file_log : file handler
handler to log file.
Returns
-------
stats_params
list of paramters for each node.
stats_values
list of lists with values associted to stats_params.
ping_times
lists of lists with ping results among all pairs of nodes.
Notes
-----
|
| **TO DO:**
|
| Change over_slurm for explicit selection of the interface.
"""
if v==1:
print("\nGetting network statistics...",file=file_log)
stats_params = [ "TX bytes", "RX bytes"]
stats_values = []
stats_values_node =[]
if over_slurm==0:
interface="eth0"
else:
interface="ib0"
# TX and RX bytes
for node in nodes_list:
#txrx="TX" # txrx="RX"
print(" "+node + " " + interface)
command_ssh="ssh "+node+" ifconfig "+interface #+"|grep \""+txrx +"bytes\"|cut -d\"(\" -f1|cut -d: -f2"
#print(command_ssh)
with os.popen(command_ssh,'r',10) as f_out:
# -1 if failed
value_read="-1"
for line in f_out:
for match in stats_params:
if match in line:
value_read=line.split(match+":",1)[1].split(' ')[0]
stats_values_node+=[int(value_read)]
if stats_values_node!=[]:
stats_values+=[stats_values_node]
else:
stats_values+=[[0,0]]
stats_values_node = []
# ping (average for x times) [ms]
do_ping=5
ping_times_node=[]
ping_times=[]
for node_from in nodes_list:
for node_to in nodes_list:
command_ssh="ssh "+node_from+" ping "+node_to + " -c " + str(do_ping)
with os.popen(command_ssh,'r',10) as f_out:
for line in f_out:
if "rtt " in line:
ping_times_node+=[float(line.split('/')[4])]
ping_times+=[ping_times_node]
ping_times_node=[]
return([stats_params,stats_values,ping_times]) | f5be676c8bd70b588de6301cd0e0df96c6c0648b | 37,579 |
import warnings
def _quicksearch(assessment, subject_info, session=False):
"""
quick pass on BIDS compatable assesment file to find subject and session
matched in our list.
assessement: pd.DataFrame
assessment data including "participant_id", "ses" in headers
subject_info:
dictonanry containing:
{"<subject-id>": ["<session-name1>", "<session-name2>"]}
"""
info_header = ["participant_id"]
if session:
info_header.append("ses")
try:
df = assessment.loc[:, info_header]
except KeyError:
raise (KeyError)
match_index = []
for sub in subject_info:
sessions = subject_info[sub]
search_val = [sub] + sessions
data_exist = df.isin(search_val).sum(axis=1) == df.shape[1]
valid_idx = df.index[data_exist].tolist()
if len(valid_idx) == len(sessions):
match_index += valid_idx
elif len(valid_idx) > 1:
warnings.warn(
f"Duplicated entry: {search_val}, please check if your raw data is dirty"
)
return match_index | 379fb9cabdcd4fbed2e384051ea84770a392d5b7 | 37,580 |
import numpy as np
def timing(output_filename):
"""Return the median wall time per simulation timestep."""
output = open(output_filename, 'r')
lines = output.readlines()
coarseSteps = filter(lambda s: "Coarse" in s, lines)
# Extract out the time only
coarseSteps = [float(s.split('Coarse TimeStep time:')[1]) for s in coarseSteps]
med_timestep = np.median(coarseSteps)
output.close()
return med_timestep | c3df8a9dd888af0a40e677a8119f86311ba00282 | 37,581 |
import pathlib
import json
def load_sparameters(filepath_json):
""" returns dict with grating coupler Sparameters """
filepath_json = pathlib.Path(filepath_json)
if filepath_json.suffix == ".dat":
filepath_json = filepath_json.with_suffix(".json")
assert filepath_json.exists(), f"{filepath_json} does not exist"
return json.loads(open(filepath_json).read()) | 37e84f01d9717d70d0649bbaee2617f742dbd6c6 | 37,582 |
def total_penup_travel(gs):
"""
Compute total distance traveled in a given ordering
"""
def distance_between_each_pair(gs):
gs = iter(gs)
prev = next(gs)
for g in gs:
yield prev.distance_to(g)
prev = g
return sum(distance_between_each_pair(gs)) | af7d20a5954dc31e873d9a4148c37d31dc69ed61 | 37,583 |
import re
def getProteinSequences(path_to_data: str) -> dict:
"""
Read protein sequence data file and extract protein sequences
"""
with open(path_to_data) as file:
data = file.read()
# Isolate protein sequences (using key: CRC64 with 12 empty spaces before start)
pstart = [m.start() for m in re.finditer('CRC64', data)]
pend = [m.start() for m in re.finditer("\n//", data)]
proteins = {}
for idx in range(len(pstart)):
proteins[idx] = re.sub(' ', '', re.sub('\n', '', data[pstart[idx]+12:pend[idx]]))
return proteins | 3b9ebe071b16aa3af8cb85560fe71b68c609072a | 37,585 |
def apply_function_on_array(f, input_data):
"""Apply a function on input data.
This method will apply a function on the input data. If the input data
is 1-d, it will expand the data to 2-d before feeding into the function,
and then squeeze the output data back to 1-d if possible.
Parameters
----------
f : (np.ndarray) -> np.ndarray
The function that will be applied to input data.
input_data : np.ndarray
The input data.
Returns
-------
np.ndarray
"""
# expand the input data to 2-d if it is 1-d
if len(input_data.shape) == 1:
input_data = input_data.reshape([-1, 1])
ret = f(input_data)
# revert back to 1-d if necessary.
if len(ret.shape) == 2 and ret.shape[1] == 1:
ret = ret.reshape([-1])
else:
ret = f(input_data)
return ret | b22f955fdd80692719e9ae7422f73a6d09668a38 | 37,587 |
from typing import Dict
from typing import Any
def query_checks(query: Dict[str, Any],
result: Dict[str, Any], test_check_method) -> bool:
"""
Tests the checks specified in the query
"""
if not 'checks' in query:
return True
passed = True
checks = query['checks']
for check in checks:
if not test_check_method(check, result):
passed = False
return passed | dd93951b09a59a98a4f4b6bafe479766d683efbb | 37,588 |
def maybe_mod(val: str, base=0):
"""
Takes an argument, which is a string that may start with + or -, and returns the value.
If *val* starts with + or -, it returns *base + val*.
Otherwise, it returns *val*.
"""
base = base or 0
try:
if val.startswith(('+', '-')):
base += int(val)
else:
base = int(val)
except (ValueError, TypeError):
return base
return base | 4295ae0ed8a3d7260958fb8c404be1fbd30cc6bd | 37,589 |
import time
def get_node_time():
"""Returns the current node time which may be useful for times when the Pi clock might be out of sync"""
return time.time() | 77f5e99ee19f0116036f7d7ec2e2385948d8030d | 37,590 |
import argparse
def create_parser():
"""
Creates the argparse parser with all the arguments.
"""
parser = argparse.ArgumentParser(
description="CLI wrapper around ovs-appctl ofproto/trace.\n"
"To display the Datapath actions of the supplied IMSI",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-i", "--imsi", required=True, help="IMSI of the subscriber")
parser.add_argument(
"-d",
"--direction",
required=True,
choices=["DL", "UL"],
help="Direction - DL/UL",
)
parser.add_argument(
"-I", "--ip", nargs="?", const="8.8.8.8", default="8.8.8.8", help="External IP"
)
parser.add_argument(
"-P", "--port", nargs="?", const="80", default="80", help="External Port"
)
parser.add_argument(
"-UP", "--ue_port", nargs="?", const="3372", default="3372", help="UE Port"
)
parser.add_argument(
"-p",
"--protocol",
choices=["tcp", "udp", "icmp"],
nargs="?",
const="tcp",
default="tcp",
help="Portocol (i.e. tcp, udp, icmp)",
)
return parser | 594f3312e2ca139db08d3f51d10f5bf7ce9fcb47 | 37,591 |
import tempfile
import os
def _testfile():
"""Return platform-appropriate lock file name.
Helper for doctests.
"""
return os.path.join(tempfile.gettempdir(), 'trash-%s' % os.getpid()) | b49e324f472e20d1a8e1440e9f898c81e118513f | 37,593 |
import os
def _find_tcl_tk_darwin_frameworks(binaries):
"""
Get an OS X-specific 2-tuple of the absolute paths of the top-level
external data directories for both Tcl and Tk, respectively.
Under OS X, Tcl and Tk are installed as Frameworks requiring special care.
Returns
-------
list
2-tuple whose first element is the value of `${TCL_LIBRARY}` and whose
second element is the value of `${TK_LIBRARY}`.
"""
tcl_root = tk_root = None
for nm, fnm in binaries:
if nm == 'Tcl':
tcl_root = os.path.join(os.path.dirname(fnm), 'Resources/Scripts')
elif nm == 'Tk':
tk_root = os.path.join(os.path.dirname(fnm), 'Resources/Scripts')
return tcl_root, tk_root | 0eac665e71e363519d6baddc88ef41a563cd6163 | 37,594 |
import requests
def get_seeds():
"""Download, parse and return IDs of seed videos."""
resp = requests.get("http://vireo.cs.cityu.edu.hk/webvideo/Info/Seed.txt")
seeds = {}
for line in resp.text.splitlines():
seed_id, video_id = line.split()
seed_id = seed_id.strip("*")
seeds[seed_id] = video_id
return seeds | df88ea7f3af3b3f775f8ee6ee5fb350823d26738 | 37,595 |
import requests
from bs4 import BeautifulSoup
def __extract_weather_local(url, SEQ_NUM=17) -> list:
"""
Explanation
----
๋ถ์ํ ๊ธฐ์์ฒญ ์ฃผ์: https://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp?sido=2800000000&gugun=2818500000&dong=2818582000&x=21&y=3
์ฐธ๊ณ ํ ์ฌ์ดํธ: https://kocoafab.cc/tutorial/view/595
๋๋ค์๋ณด๋ฅผ ํฌ๋กค๋งํ์ฌ ๊ฐ์ ๊ฐ์ ธ์ค๋ ํจ์์
๋๋ค.
์ค๊ธฐ์๋ณด์๋ ์์์ด ํ๋ฆฌ์ค๋ ์ ์ ํ๊ฒ ์ฌ์ฉํ์๊ธธ ๋ฐ๋๋๋ค.
Tags
-----
`<day>`: ๋ ์ง
- ์ค๋: 0
- ๋ด์ผ: 1
- ๋ชจ๋ : 2
`<temp>`: ์จ๋
`<tmx>`: ์ต๊ณ ๊ธฐ์จ
`<tmn>`: ์ต์ ๊ธฐ์จ
`<sky>`: ํ๋ ์ํ
`<pty>`: ๊ฐ์ ํํ
`<pop>`: ๊ฐ์ ํ๋ฅ
`<ws>`: ํ์
`<wd>`: ํํฅ
`<reh>`: ์ต๋
`<r12>`: 12์๊ฐ ๊ฐ์๋
`<s12>`: 12์๊ฐ ์ ์ ์ค
`<r06>`: 6์๊ฐ ๊ฐ์๋
`<s06>`: 6์๊ฐ ์ ์ ์ค
Return
-------
weather_data (list-dict): ๋ฆฌ์คํธ๋ด๋ถ์ ๋ฆฌ์คํธ ๋ง๋ค ๊ฐ ๋ ์ง๋ณ ๋ฐ์ดํฐ๋ค์ ๋์
๋๋ฆฌ ํํ๋ก ๊ฐ์ง๊ณ ์์ต๋๋ค.
"""
weather_result = requests.get(url)
weather_soup = BeautifulSoup(weather_result.text, "html.parser")
data_list = []
# ์๊ฐ์ ๋ฐ๋ฅธ ๊ธฐ์ ๋ฐ์ดํฐ๋ค์ ๋ฆฌ์คํธ์ ์ ์ฅ
for i in range(SEQ_NUM):
temp = weather_soup.find("data", {"seq": i})
if temp is not None:
data_list.append(temp)
data_length = len(data_list)
weather_data = [[] for i in range(len(data_list))]
for n in range(data_length):
# ๊ฐ ๋ฐ์ดํฐ๋ค์ ์ฐธ์กฐํ์ฌ ๋ ์ง, ์๊ฐ, ์จ๋, ์ต๋ ์์๋ก ์ถ๊ฐ
weather_data[n].append(data_list[n].find("day").string)
weather_data[n].append(data_list[n].find("hour").string)
weather_data[n].append(data_list[n].find("temp").string)
weather_data[n].append(data_list[n].find("reh").string)
return weather_data | aec0f88a392d307ddbe965a69c33357dea202975 | 37,596 |
def next_biggest(target, in_list):
"""
Returns the next highest number in the in_list.
If target is greater the the last number in in_list, will return the last
item in the list.
"""
next_highest = None
for item in in_list:
if item > target:
next_highest = item
break
if next_highest is None:
next_highest = in_list[-1]
return next_highest | 4e5b8602e10fc8e9373c23a931e20bf454b6e21f | 37,598 |
def order_by_dependence(parameters):
"""
Takes a list of parameters from a dynamoDB table and organize them by dependence.
The output is a list of lists; for each sub-list there is a root parameter
(that do not depend on anything) and the parameters that do depend
"""
# Selects all table items that does not depend on others:
roots = [leaf for leaf in parameters if 'dependence' not in leaf.keys() or leaf['dependence']==None]
# Selects all table items that does depend on others:
leafs = [leaf for leaf in parameters if 'dependence' in leaf.keys() and leaf['dependence']!=None]
graphs = []
for root in roots:
# A graph starts with a root:
graph = [root]
branches = [root['name']]
for leaf in leafs:
# If a leaf depends on any parameter present in that graph, add it to that graph:
if leaf['dependence']['name'] in branches:
graph.append(leaf)
branches.append(leaf['name'])
# Put this graph (that starts with a certain root) in the list of graphs:
graphs.append(graph)
return graphs | 2e3f888e80c354bb414955c133da12eb23ed13b0 | 37,600 |
def _keep_running():
"""Patchable version of True"""
return True | d35c5f6758f894e31daacd117ad9c6932529f37d | 37,601 |
import torch
def KMeansRepeatX(X, repeat, train=True):
"""
:param X:Raw data \\in R^{batch_size X n_dim}
:param repeat:้ๅค็ๆฌกๆฐใ้ๆ ทๆฐ
:return: ๅ ไบๅ็ฝฎ้กนๅ้ๅคๆฐๆฎ็ๆ ทๆฌ ็ปดๅบฆ[batch_size,repeat,n_dum+1]
"""
X = X.reshape(len(X), -1)
if train:
repeatX = torch.cat([X] * repeat, dim=0)
one_shape = tuple(repeatX.shape[:-1]) + (1,)
one = torch.ones(size=one_shape, dtype=torch.float)
return torch.cat([repeatX, one], dim=-1)
else:
one = torch.ones(tuple(X.shape[:-1]) + (1,), dtype=torch.float)
return torch.cat([X, one], dim=-1) | 4830966c39874356b4ccc0f131134dfd4dc41e70 | 37,604 |
def log_request_entries(log_file='fastemplate.log'):
"""
Retrieves the amount of log entries.
:param str log_file: name of the log file
:return: int
"""
lines = open(file=log_file, mode='r').readlines()
return len(lines) | b5f0710b9f4f6314c3e7cd1a73d5734c2b763193 | 37,606 |
def _make_errorbar_params(arrays, errorbars):
"""
Determine whether error bars should be plotted or not.
"""
if errorbars is None:
params = {}
else:
if isinstance(errorbars, bool):
params = {name: errorbars for name in arrays}
elif isinstance(errorbars, dict):
params = errorbars
else:
raise TypeError("Unsupported type for argument "
"'errorbars': {}".format(type(errorbars)))
for name, array in arrays.items():
has_variances = array.variances is not None
if name in params:
params[name] &= has_variances
else:
params[name] = has_variances
return params | 15a2de32873ebb4769eab48b5db215bc4c226ce2 | 37,607 |
def find_operation(api, key_op):
"""
Find an operation in api that matches key_op. This method first attempts to find a match
by using the operation name (nickname). Failing that, it attempts to match up HTTP methods.
Args:
api - A Swagger API description (dictionary)
key_op - A Swagger operation description (dictionary)
Returns:
An operation that matches key_op, or None if nothing found
"""
operations = api['operations']
for op in operations:
if op['nickname'] == key_op['nickname']:
return op
for op in operations:
if op['method'] == key_op['method']:
return op
return None | 8d565f97acff1023a7ea68dbcec4335754864d2f | 37,608 |
import math
def volumen_cilindro(radio: float, altura: float) -> float:
""" Volumen de un cilindro
Parรกmetros:
radio (float): Radio de la base del cilindro
altura (float): Altura del cilindro
Retorno:
float: El volumen del cilindro readondeado a un decimal
"""
area_base = math.pi * (radio ** 2)
volumen = area_base * altura
return round(volumen, 1) | f97c187c65ce0e8ac6e3b74d8afa6657bc79bd93 | 37,610 |
import os
import sys
def init(i):
"""
Not to be called directly. Sets the path to the vqe_plugin.
"""
vqe_plugin_directory = os.path.join( os.path.dirname(__file__), '..', '..', 'env', 'vqe_utils' )
sys.path.append( vqe_plugin_directory ) # allow this module to import vqe_utils
return {'return':0} | 28db062704aae87dc8ffc6602b13f95a263e894d | 37,611 |
def convert_dB_to_W(dB_value):
""" Function that converts dB values into Watts!"""
_ = 10 ** (dB_value / 10)
return _ | d6bba7c52c1554bf9419c0a0a8c1166649ffa5d7 | 37,613 |
def signed_number(number, precision=2):
"""
Return the given number as a string with a sign in front of it, ie. `+` if the number is positive, `-` otherwise.
"""
prefix = '' if number <= 0 else '+'
number_str = '{}{:.{precision}f}'.format(prefix, number, precision=precision)
return number_str | 26406b5aab7537a37aa073d21d552f04eb3950e9 | 37,614 |
def _filter_vocab(vocab, min_fs):
"""Filter down the vocab based on rules in the vectorizers.
:param vocab: `dict[Counter]`: A dict of vocabs.
:param min_fs: `dict[int]: A dict of cutoffs.
Note:
Any key in the min_fs dict should appear in the vocab dict.
:returns: `dict[dict]`: A dict of new filtered vocabs.
"""
for k, min_f in min_fs.items():
# If we don't filter then skip to save an iteration through the vocab
if min_f == -1:
continue
vocab[k] = dict(filter(lambda x: x[1] >= min_f, vocab[k].items()))
return vocab | 0854c8a6bbf0c9c3805cc4b733589d36209bff58 | 37,615 |
def cal_recom_result(user_click, user_sim):
"""
recom by usercf algo
Args:
user_click: dict, key userid , value [itemid1, itemid2]
user_sim: key:userid value:[(useridj, score1),(ueridk, score2)]
Return:
dict, key userid value:dict value_key:itemid , value_value:recom_score
"""
recom_result ={}
topk_user = 3
item_num = 5
for user, item_list in user_click.items():
tmp_dict = {}
for itemid in item_list:
tmp_dict.setdefault(itemid, 1)
recom_result.setdefault(user, {})
for zuhe in user_sim[user][:topk_user]:
userid_j, sim_score = zuhe
if userid_j not in user_click:
continue
for itemid_j in user_click[userid_j][:item_num]:
recom_result[user].setdefault(itemid_j, sim_score)
return recom_result | ebc63bf8b8c16e7790461b29e74ffffcbab5a6ee | 37,617 |
def prompt(choices, label='choice'):
"""
Prompt the user to choose an item from the list. Options should be a list
of 2-tuples, where the first item is the value to be returned when the
option is selected, and the second is the label that will be displayed to the
user.
"""
if len(choices) == 0:
raise ValueError('The list of choices is empty')
lines = ['%d) %s' % (i + 1, item[1]) for i, item in enumerate(choices)]
index = input('\n'.join(lines + ['', 'Please select a %s: ' % label]))
while len(index) < 1 or int(index) < 1 or int(index) > len(choices):
index = input('Please enter a valid choice: ')
return choices[int(index) - 1][0] | 9c9c000f03c4e9752780787bd14aac53609f83c7 | 37,618 |
def get_length(packet):
"""
Gets the total length of the packet
"""
hex_str = '0123456789abcdef'
hex_length = packet[32:36]
length = 0
for i,c in enumerate(hex_length[::-1]):
length += pow(16,i)*hex_str.index(c)
return length | c718bb50b2ddbd383f569ec522ca513f65b53298 | 37,620 |
import time
def mqtt_time():
"""Return current time string for mqtt messages."""
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) | 847998a26c103256829ac84986374be57c32a2b2 | 37,621 |
def U_0(phi_0, k, l, om, f):
"""Zonal velocity amplitude. Wavenumber and frequency should be in angular
units."""
return ((k*om + 1j*l*f)/(om**2 - f**2))*phi_0 | a7e54e715a3ffd4c18983ec2b1f7afc13736c5ea | 37,622 |
def create_object_detected_msg(position):
"""creates an xyz message of target location"""
return {'coords':position} | 27158486868fa1793d4b3a87ece8e4630e5ee93f | 37,623 |
def attack(decrypt_oracle, iv, c, t):
"""
Uses a chosen-ciphertext attack to decrypt the ciphertext.
:param decrypt_oracle: the decryption oracle
:param iv: the initialization vector
:param c: the ciphertext
:param t: the tag corresponding to the ciphertext
:return: the plaintext
"""
c_ = iv + c
p_ = decrypt_oracle(bytes(16), c_, c[-16:])
return p_[16:] | 80106b2376a8fa2d30afe96e5a763d6153ed3936 | 37,624 |
import threading
def start_timer(timeout, callback):
"""Start a timer using the threading library (in seconds)."""
tmr = threading.Timer(timeout, callback)
tmr.start()
return tmr | eef2c4ddf512e6111c18fcdbfa65de31c75b4a20 | 37,625 |
def trim_string(text: str, max_length: int = 200, ellipsis: str = "...") -> str:
"""
If text is longer than max_length then return a trimmed string.
"""
assert max_length >= len(ellipsis)
if len(text) > max_length:
return text[: max_length - len(ellipsis)] + ellipsis
else:
return text | 05ae3d1e5d7782e4794408c12eb35c8508f11941 | 37,628 |
def leaf_edge_dict(edg_dic, network):
"""
makes a edg_dic of leaf edges
"""
leaf_ind_edg_dic={}
leaf_ver_lis=network.leaves()
for ind, edg in edg_dic.items():
if edg[0] in leaf_ver_lis or edg[1] in leaf_ver_lis:
leaf_ind_edg_dic[ind]=edg
return leaf_ind_edg_dic | d3b6253955f91deb5e84f3ef367b6ab6dbacb45b | 37,630 |
def prettify(text):
"""Clean up the text to make it more suitable for display."""
return text.replace("&", "&").replace(""", "\"") | ecac7a01c615e28c6878314c90ba31bb54ced157 | 37,631 |
def _file_col(col):
"""
Converts a given column in a maze to the corresponding actual line
number in the maze.
Args:
col (int): The column of the block the maze object.
Returns:
int: The column number in the file corresponding to the given
column in the maze.
"""
return 2 * col + 1 | 85c3ab8c9f2a0608cae61af22b162ddba3d03b5a | 37,632 |
def clean_astrometry(ruwe, ipd_gof_harmonic_amplitude, visibility_periods_used, astrometric_excess_noise_sig, astrometric_params_solved, use_5p = False):
"""
Select stars with good astrometry in Gaia.
"""
labels_ruwe = ruwe <= 1.4
labels_harmonic_amplitude = ipd_gof_harmonic_amplitude <= 0.2 # Reject blended transits Fabricius et al. (2020)
labels_visibility = visibility_periods_used >= 9 # Lindengren et al. (2020)
labels_excess_noise = astrometric_excess_noise_sig <= 2.0 # Lindengren et al. (2020)
labels_astrometric = labels_ruwe & labels_harmonic_amplitude & labels_visibility & labels_excess_noise
if use_5p:
labels_params_solved = astrometric_params_solved == 31 # 5p parameters solved Brown et al. (2020)
labels_astrometric = labels_astrometric & labels_params_solved
return labels_astrometric | 922ff12c10c6fcad164abaf288426de693c08e67 | 37,633 |
import json
def print_data_info(data_path):
"""
ๆๅฐๅบ5ๆกๆฐๆฎ๏ผๆฅ็ๆฐๆฎๆ ผๅผ
:param data_path:
:return:
"""
triples = []
i = 0
with open(data_path, 'r', encoding='utf8') as f:
for line in f.readlines():
data = json.loads(line)
print(json.dumps(data, sort_keys=True, indent=4, separators=(', ', ': '),ensure_ascii=False))
i += 1
if i >=5:
break
return triples | 4d2f45c52ec36ce10de9d1dc9c6770fc72e54860 | 37,635 |
def merge_adjacent_variants(variants):
"""
Merge adjacent variant sequences so that final correction
doesn't fail during iterative mapping. Assumes variants are
ordered by leftmost unchanged nuc from 5-prime to 3-prime.
"""
if len(variants) < 1:
return variants
new_variants = [variants.pop(0)]
for v in variants:
if new_variants[-1].right - 1 == v.left:
new_variants[-1].right = v.right
new_variants[-1].seq += v.seq
else:
new_variants.append(v)
return new_variants | 6d9eba662971ab067de0787de9183a4afa163d19 | 37,636 |
from typing import List
def render_include_source_code(
col_offset: int, include_path: str, include_code: str
) -> List[str]:
"""Annotate included source code with additional information about
the source path of the included source.
Args:
col_offset: a col offset of the whole source code which is included
include_path: a path to the module which is included
include_code: a content of the module at path `include_path`
Returns:
formatted source code
"""
print(f"PyParams: including module source: {include_path}")
s_col_offset = " " * col_offset
comment_line = f"{s_col_offset}PyParams: auto include source of `{include_path}`"
header_lines = [
f'{s_col_offset}"""\n{s_col_offset}' + "-" * (80 - col_offset),
f"{s_col_offset}{comment_line}",
s_col_offset + "-" * (80 - col_offset) + f'\n{s_col_offset}"""',
]
include_lines = header_lines + [s_col_offset + l for l in include_code.split("\n")]
comment_line = f"{s_col_offset}INCLUDE END OF `{include_path}`"
include_lines += [
f'{s_col_offset}"""\n{s_col_offset}' + "-" * (80 - col_offset),
f"{s_col_offset}{comment_line}",
s_col_offset + "-" * (80 - col_offset) + f'\n{s_col_offset}"""',
]
return include_lines | ae578e8369b68ae26008ca4185640c98dceb2b08 | 37,638 |
def _extend_pads(pads, rank):
"""Extends a padding list to match the necessary rank.
Args:
pads ([int] or None): The explicitly-provided padding list.
rank (int): The rank of the operation.
Returns:
None: If pads is None
[int]: The extended padding list.
"""
if pads is None:
return pads
pads = list(pads)
if len(pads) < rank:
pads.extend([0] * (rank - len(pads)))
if len(pads) < (2 * rank):
pads.extend(pads[len(pads) - rank:rank])
return pads | ce1342f3973b852259ea97d710c733ba2d90cace | 37,639 |
def get_row_index(preDict, usrDict):
"""
Get the row positions for all words in user dictionary from pre-trained dictionary.
return: a list of row positions
Example: preDict='a\nb\nc\n', usrDict='a\nc\n', then return [0,2]
"""
pos = []
index = dict()
with open(preDict, "r") as f:
for line_index, line in enumerate(f):
word = line.strip().split()[0]
index[word] = line_index
with open(usrDict, "r") as f:
for line in f:
word = line.strip().split()[0]
pos.append(index[word])
return pos | 1058f5fcad5ab88066312dabf931e1ac5519b193 | 37,640 |
def weekday(dt):
""":yaql:property weekday
Returns the day of the week as an integer, Monday is 0 and Sunday is 6.
:signature: datetime.weekday
:returnType: integer
.. code::
yaql> datetime(2006, 11, 21, 16, 30).weekday
1
"""
return dt.weekday() | 109cf11d3db1d81060a1f654a9af6bbf8a90a727 | 37,641 |
import re
def strip_namespace(path):
"""Removes namespace prefixes from elements of the supplied path.
Args:
path: A YANG path string
Returns:
A YANG path string with the namespaces removed.
"""
re_ns = re.compile(r"^.+:")
path_components = [re_ns.sub("", comp) for comp in path.split("/")]
pathstr = "/".join(path_components)
return pathstr | 5613411de8d796d3b671bbb94f055168eba2f5c7 | 37,642 |
def like_prefix(value, start='%'):
"""
gets a copy of string with `%` or couple of `_` values attached to beginning.
it is to be used in like operator.
:param str value: value to be processed.
:param str start: start place holder to be prefixed.
it could be `%` or couple of `_` values
for exact matching.
defaults to `%` if not provided.
:rtype: str
"""
if value is None:
return None
return '{start}{value}'.format(start=start, value=value) | 8ef7e3fa2fc50723f483cc443e4bcbc098c16d32 | 37,643 |
import platform
def is_bsd():
"""Return True on BSD-based systems."""
system = platform.system()
if system == 'Darwin':
return True
if 'bsd' in system.lower():
return True
return False | 8e095602df84595789ecac2bbb1d7d829a175093 | 37,644 |
import os
def filestat(filename):
""" Get file stats and convert it to a dict.
:params filename: a filename comprehensive of path if necessary
:returns: a dictionary see [1] for more details
[1] https://docs.python.org/3.6/library/os.html#os.stat_result
"""
fs = os.stat(filename)
return {k: getattr(fs, k) for k in dir(fs) if k.startswith('st_')} | 50eec4a129086d0be7e3c9d5171123dac7b57e13 | 37,647 |
import re
def Reorder_AtomMapNum(rsmiles:str,
psmiles:str,
start_num: int = 1):
"""
This function is to reorder the atom map number
:param rsmiles: The reactant(s) smiles
:param psmiles: The product(s) smiles
:param star_num: The start atom number, default = 1
"""
r_nums = re.findall(r"\d+", rsmiles)
replace_nums = [[str(i+start_num), num] for i, num in enumerate(r_nums)]
for replace_num in replace_nums:
rsmiles = rsmiles.replace(":"+replace_num[1] + ']', ":r" + replace_num[0] + ']')
rsmiles = rsmiles.replace(":r", ":")
for replace_num in replace_nums:
psmiles = psmiles.replace(":"+replace_num[1] + ']', ":r" + replace_num[0] + ']')
psmiles= psmiles.replace(":r", ":")
return rsmiles, psmiles | 0494363a9a89537703be041a6ae3b3669c6bea7a | 37,648 |
def check_link_status(host1, host2):
"""
Method used to ensure the link between two hosts does not have defined loss. Testing bandwidth with
defined loss leads to error due to packet loss.
author: Miles Stanley
:param host1: The first host name
:param host2: The second host name
:return: True if the link has no defined loss, False otherwise
"""
return True #testing-------------------------------------------------------------------------------------DELETE
for link in graph_nodes['links']:
if link.get_first() == host1 and link.get_second() == host2:
if link.get_loss() == None:
return True
return False | cc886dd3f9bdc543568af7dda6ccc074b671dbf9 | 37,649 |
def safe_unichr(intval):
"""Create a unicode character from its integer value. In case `unichr` fails, render the character
as an escaped `\\U<8-byte hex value of intval>` string.
Parameters
----------
intval : int
Integer code of character
Returns
-------
string
Unicode string of character
"""
try:
return chr(intval)
except ValueError:
# ValueError: chr() arg not in range(0x10000) (narrow Python build)
s = "\\U%08x" % intval
# return UTF16 surrogate pair
return s.decode('unicode-escape') | 15c42a3ca0c528a1b27a6e1cb87bd2d788fafabf | 37,652 |
def filter_drifting(ds):
"""Remove all records with excessive low-frequency components."""
return ds['sea_state_30m_rel_energy_in_frequency_interval'].sel(meta_frequency_band=1) > 0.1 | e118c2d556b1b62fcca34b5e5bf488405f19a38e | 37,653 |
import os
import re
def make_counted_dir(base):
"""Make a fresh subdirectory of "base", using incrementing numbers.
base -- string -- directory to create the path in
returns -- string -- new, empty subdirectory
"""
if not os.path.isdir(base):
os.makedirs(base)
existing = max([int(d) for d in os.listdir(base) if re.match(r'^\d+$', d)],
default=-1)
path = os.path.join(base, '{:03d}'.format(existing + 1))
os.makedirs(path)
return path | 4b652d0f25a21890410aaaa71f5a4b344f824b10 | 37,655 |
def training_step(x, model, estimator, optimizers, **config):
"""Perform one optimization step of the `model` given the mini-batch of observations `x`,
the gradient estimator/evaluator `estimator` and the list of optimizers `optimizers`"""
loss, diagnostics, output = estimator(model, x, backward=True, **config)
[o.step() for o in optimizers]
[o.zero_grad() for o in optimizers]
return diagnostics | 50e392c5b3c48dd3cc28c5f6c44a5c203ec01c4c | 37,656 |
import subprocess
import sys
def run_cmd(cmd, input=None, exit=False):
"""Run a command, optionally logging error message if return code not zero. Exit program on error if required."""
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = p.communicate(input)
if p.returncode and exit:
sys.exit(1)
return out.decode('utf-8'), p.returncode, err.decode('utf-8') | dcd7ae4e6a364118d3684159f657f38422b8e5b3 | 37,658 |
def create_variant_to_alt_read_names_dict(isovar_results):
"""
Create dictionary from variant to names of alt reads supporting
that variant in an IsovarResult.
Parameters
----------
isovar_results : list of IsovarResult
Returns
-------
Dictionary from varcode.Variant to set(str) of read names
"""
return {
isovar_result.variant: set(isovar_result.alt_read_names)
for isovar_result in isovar_results
} | aeff30e04dd479e020c1fdcea867f0f6c0a17d97 | 37,659 |
def collapse_topology(topology):
"""
Collapse a topology into a compact representation.
"""
# make a copy so that we don't do this in place
topology_full = list(topology)
compact = []
counter = 0
domain = topology_full.pop(0)
while topology_full:
next_domain = topology_full.pop(0)
if next_domain is domain:
counter += 1
else:
compact.append((domain, counter+1))
counter = 0
domain = next_domain
# clean up
compact.append((domain, counter+1))
return compact | a6b932913b0805438ec1bde040c28b6802a5232e | 37,660 |
def _get_is_negative(offset_string: str) -> bool:
"""Check if a string has a negative sign."""
is_negative = False
if offset_string.count('-') > 0 or offset_string.count('โ'):
if offset_string.count('-') == 1 or offset_string.count('โ') == 1:
is_negative = True
if offset_string.count('S') > 0:
if offset_string.count('S') == 1 and offset_string[-1] == "S" and is_negative is False:
is_negative = True
offset_string_negative_check = (
offset_string.replace(' ', '')
.replace('"', '')
.replace('โ', '')
.replace('+', '')
.replace('-', '')
.replace('โ', '')
.replace('โฌ', '')
)
if len(offset_string_negative_check) > 2:
if offset_string_negative_check[0] == '(' and offset_string_negative_check[-1] == ')':
is_negative = True
return is_negative | 589ae036a290f97450a8f32f3ad72acfd8ee267d | 37,661 |
import math
def get_bearing(my_location, tgt_location):
"""
Aproximation of the bearing for medium latitudes and sort distances
"""
dlat = tgt_location.lat - my_location.lat
dlong = tgt_location.lon - my_location.lon
return math.atan2(dlong,dlat) | 0882a81481140bfeeeba07261c8234f5928f4aca | 37,662 |
def xy_split(df):
"""
:param pd.DataFrame df:
:return:
"""
feature_names = [col for col in df.columns if col != "MSRP"] # All columns except MSRP
df_x = df[feature_names]
df_y = df[['MSRP']]
# df.values extract numpy ndarray from pd.DataFrame
# ravel() transforms 2D array to 1D array
return df_x, df_y.values.ravel() | 53441e385287fff95f1bd961c10eabc4253e44be | 37,664 |
from datetime import datetime
def generate_birthdays(birthdays: list, year_to_generate: int):
"""
generate birthdays from lists
:param birthdays:
:param year_to_generate: how many year from this year to add to the event
:return:
"""
this_year = datetime.now().year
event_list = []
for birthday in birthdays:
for year in range(this_year, this_year + year_to_generate):
date = birthday.in_year(year)
event_list.append([birthday, date])
return event_list | 158c68da9f171904f76d2104dd6a6035a6bf7d39 | 37,669 |
def popup_element(value, title=None, format=None):
"""Helper function for quickly adding a popup element to a layer.
Args:
value (str): Column name to display the value for each feature.
title (str, optional): Title for the given value. By default, it's the name of the value.
format (str, optional): Format to apply to number values in the widget, based on d3-format
specifier (https://github.com/d3/d3-format#locale_format).
Example:
>>> popup_element('column_name', title='Popup title', format='.2~s')
"""
return {
'value': value,
'title': title,
'format': format
} | a30c88cd4dec6470643548807cfde9e608a6e8dd | 37,672 |
def write56(lines: list[tuple[int, float]]) -> str:
"""Converts a list of Abundances to a string storable in a .56 file."""
abundances = []
if len(set(line[0] for line in lines)) != len(lines):
raise ValueError("atomic numbers must be unique")
for line in lines:
if line[0] < 1 or line[0] > 118:
raise ValueError(f"atomic number {line[0]} out of range")
if line[1] < 0 or line[1] > 1:
raise ValueError(f"abundance {line[1]} out of range (0 to 1)")
abundances.append(f"{line[0]} {line[1]:.6e}\n")
return f"{len(lines)}\n" + "".join(abundances) | 3cd7bdf6cf274f0e818fc40413ed048e17cb3026 | 37,673 |
import builtins
def missing_cv2(monkeypatch):
"""Monkey patch import to test missing cv2"""
import_og = builtins.__import__
def mocked_import(name, globals, locals, fromlist, level):
if name == 'cv2':
raise ModuleNotFoundError()
return import_og(name, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import) | 3d5d88d4de5d5d0894cbfae2b7b556358aa1863d | 37,674 |
def get_headers():
"""Returns the user agent and header so that urllib can work without a glitch.
Mimics Firefox/Chrome.
"""
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
return hdr | b2d09dd325006e4b053a3b54dc2db15d293953b6 | 37,675 |
import requests
def get_object(
token: str,
url: str = "https://dev-api.aioneers.tech/v1/",
object: str = "dotTypes",
) -> list:
"""Get JSON object.
Parameters
----------
token : str
Token which was returned from the user login.
url : str = "https://dev-api.aioneers.tech/v1/"
Url of the API.
object : str = "dotTypes"
Object to be extracted from the API.
Returns
-------
list
List of JSON objects.
Raises
------
ValueError
Raises ValueError when the input is not correct.
"""
url = url.strip("/")
object = object.lower()
if object == "metrictypes":
url += "/metricTypes"
elif object == "metrics":
url += "/metrics"
elif object == "dottypes" or object == "trackingobjecttypes":
url += "/trackingObjectTypes"
elif object == "dots" or object == "trackingobjects":
url += "/trackingObjects"
elif object == "actions":
url += "/actions"
elif object == "actiontemplates":
url += "/actionTemplates"
elif object == "measuretemplates":
url += "/measureTemplates"
elif object == "measures":
url += "/measures"
elif object == "initiativetemplates":
url += "/initiativeTemplates"
elif object == "initiatives":
url += "/initiatives"
else:
raise ValueError
response = requests.request(
"GET", url, headers={"Authorization": f"Bearer {token}"}
)
response.raise_for_status()
return response.json()["data"]["payload"] | c0ad1ba7caf13aafde888fdaad8a1670f6964c78 | 37,677 |
def get_endpoint(svc):
"""
Given a service object, return a formatted
URL of the service.
"""
return f"{svc.get('protocol')}://{svc.get('host')}:{svc.get('port','80')}{svc.get('path','/')}" | ade630e7dd4446c89382022f06998a5d8918f699 | 37,680 |
import torch
def create_length_mask(data, lengths):
"""
Create lengths mask for data along one dimension.
"""
n_sequences, max_length, _ = data.shape
lengths_mask = torch.zeros(n_sequences, max_length)
for i, length in enumerate(lengths):
lengths_mask[i, :length + 1] = 1
return lengths_mask | 2d0c4e8730f2ddf070fc94f024526fd6a01cda44 | 37,685 |
import torch
def laplacian_1d(window_size):
"""
One could also use the Laplacian of Gaussian formula to design the filter.
"""
filter_1d = torch.ones(window_size)
filter_1d[window_size // 2] = 1 - window_size
laplacian_1d = filter_1d
return laplacian_1d | b98b07d72a4a27e19d2ecaa54389885cc3f1abe8 | 37,686 |
def no():
"""
Simple function that returns False
:return: False
"""
print(' no()')
return False | 5775aeeb7d847dab7f0c65a316e076c06a6094e4 | 37,687 |
def is_supermodular(dswin_u, dswin_uv):
"""
Supermodularity becomes relevant only for k >= 3.
"""
return True | 89e5a47a1e32a65a6b2ed2399c111bc6785a6c3d | 37,688 |
def get_size(img):
"""Return the size of the image in pixels."""
ih, iw = img.shape[:2]
return iw * ih | 392cb997016982d9e9bfaae9b7d202e01e66e8b0 | 37,689 |
def is_pat(s):
"""
all letters in left string are later then all the letters in the right string, and left and right are both pats
"""
if len(s) == 1:
return True
for split_point in range(1, len(s)):
l, r = s[:split_point], s[split_point:]
if min(*l) > max(*r):
if is_pat(list(reversed(l))) and is_pat(list(reversed(r))):
return True
return False | b2c21f7638779f40a97963da7a5392dd0e4038d4 | 37,692 |
def cycle_sort(array: list) -> list:
"""
>>> cycle_sort([4, 3, 2, 1])
[1, 2, 3, 4]
>>> cycle_sort([-4, 20, 0, -50, 100, -1])
[-50, -4, -1, 0, 20, 100]
>>> cycle_sort([-.1, -.2, 1.3, -.8])
[-0.8, -0.2, -0.1, 1.3]
>>> cycle_sort([])
[]
"""
array_len = len(array)
for cycle_start in range(0, array_len - 1):
item = array[cycle_start]
pos = cycle_start
for i in range(cycle_start + 1, array_len):
if array[i] < item:
pos += 1
if pos == cycle_start:
continue
while item == array[pos]:
pos += 1
array[pos], item = item, array[pos]
while pos != cycle_start:
pos = cycle_start
for i in range(cycle_start + 1, array_len):
if array[i] < item:
pos += 1
while item == array[pos]:
pos += 1
array[pos], item = item, array[pos]
return array | b7e67fc22c63da7d4ceaa065740c15ae85208e06 | 37,693 |
def zip1(L1, L2):
"""
>>> zip1(range(2), range(5, 8))
[(0, 5), (1, 6)]
"""
return list(zip(L1, L2)) | b8c91a73777151d81d4d146eeadb33aac5b3be4a | 37,694 |
import os
def get_bound_driver(addr):
"""Retrieve the name of the driver (if any) bound to a PCIe address.
addr - canonical PCIe address.
Returns the driver name or None if addr is not bound.
"""
link = '/sys/bus/pci/devices/{}/driver'.format(addr)
if os.path.islink(link):
driver = os.readlink(link).split(os.sep)[-1]
return driver
return None | bab153b8ce311f704308e15c830bd2a3a206c811 | 37,696 |
def find_section_ranges(transition_points, t2m_fn):
"""
Incoming motion activity should be range or trip. NOT section. The related
ranges should be matched with the sections later as part of `find_ranges`.
"""
start_transition = None
range_list = []
for t in transition_points.to_dict(orient='records'):
if start_transition is None:
start_transition = t
else:
range_list.append({"start_ts": start_transition["ts"], "end_ts": t["ts"], "mode": t2m_fn(start_transition)})
start_transition = t
return range_list | 375b3cc0fd3e1d464c7493c449c19776b73286c8 | 37,697 |
def total_accessibility(in_rsa, path=True):
"""Parses rsa file for the total surface accessibility data.
Parameters
----------
in_rsa : str
Path to naccess rsa file.
path : bool
Indicates if in_rsa is a path or a string.
Returns
-------
dssp_residues : 5-tuple(float)
Total accessibility values for:
[0] all atoms
[1] all side-chain atoms
[2] all main-chain atoms
[3] all non-polar atoms
[4] all polar atoms
"""
if path:
with open(in_rsa, 'r') as inf:
rsa = inf.read()
else:
rsa = in_rsa[:]
all_atoms, side_chains, main_chain, non_polar, polar = [
float(x) for x in rsa.splitlines()[-1].split()[1:]]
return all_atoms, side_chains, main_chain, non_polar, polar | 34c4cba6b8ac5092a1cf5194a6f7d7a3e037477e | 37,698 |
def get_sleepiest_guard(sleep_record):
"""Finds guard in sleep_record who spent the most total minutes asleep.
returns: ('guard', total_minutes_slept)
"""
sleepiest = '', 0
for guard in sleep_record:
sleep_mins = 0
for minute in sleep_record[guard]:
sleep_mins += sleep_record[guard][minute]
if sleep_mins > sleepiest[1]:
sleepiest = guard, sleep_mins
print(sleepiest)
return sleepiest | 060f2d73471e4328100bca1759cf0e4dd6446dd1 | 37,700 |
def int_to_binary(d, length=8):
"""
Binarize an integer d to a list of 0 and 1. Length of list is fixed by `length`
"""
d_bin = '{0:b}'.format(d)
d_bin = (length - len(d_bin)) * '0' + d_bin # Fill in with 0
return [int(i) for i in d_bin] | 297c6633e984143af564d885c523c6f8d719a4e2 | 37,701 |
import jinja2
def render(data, template):
"""render jija2 template
Args:
data(obj): dict with data to pass to jinja2 template
template(str): jinja2 template to use
Returns: string, rendered all, or pukes with error :)
"""
with open(template, 'r'):
templateLoader = jinja2.FileSystemLoader(searchpath="./")
templateEnv = jinja2.Environment(loader=templateLoader)
template = templateEnv.get_template(template)
outputText = template.render(data=data)
return outputText | caf24bccd0351f72f5750bcb7c43e676994fecea | 37,702 |
def findTimeSpaceIndexs(a, timeToSearch):
"""
>>> findTimeSpaceIndexs([1,2,3], 3.6)
(2, 2)
>>> findTimeSpaceIndexs([1,2,3], 2.6)
(1, 2)
>>> findTimeSpaceIndexs([1,2,3], 0.6)
(0, 0)
"""
(i, v) = min(enumerate(a), key=lambda x: abs(x[1] - timeToSearch))
if v > timeToSearch:
if i > 0:
return (i - 1, i)
else:
return (i, i)
else:
if i < len(a) -1:
return (i, i+1)
else:
return (i,i) | 58dd190252fe288605a8b3bf420a43156b45cdb6 | 37,704 |
def get_options_from_json(conf_json, ack, csr, acmd, crtf, chnf, ca):
"""Parse key-value options from config json and return the values sequentially.
It takes prioritised values as params. Among these values, non-None values are
preserved and their values in config json are ignored."""
opt = {'AccountKey':ack, 'CSR':csr, 'AcmeDir':acmd, 'CertFile':crtf, 'ChainFile':chnf, 'CA':ca}
for key in opt:
if not opt[key] and key in conf_json and conf_json[key]:
opt[key] = conf_json[key]
continue
opt[key] = None if opt[key] == '' or opt[key] == '.' or opt[key] == '..' else opt[key]
return opt['AccountKey'], opt['CSR'], opt['AcmeDir'], opt['CertFile'], opt['ChainFile'],\
opt['CA'] | 1833e230750050b13f36156e83c3bf8a973cff62 | 37,705 |
def check_parallel_results(results, op):
"""Function used to check the results of run_parallel.
NOTE: This function was originally located in the shell module of
swift_build_support and should eventually be replaced with a better
parallel implementation.
"""
fail_count = 0
if results is None:
return 0
for r in results:
if r is not None:
if fail_count == 0:
print("======%s FAILURES======" % op)
print("%s failed (ret=%d): %s" % (r.repo_path, r.ret, r))
fail_count += 1
if r.stderr:
print(r.stderr)
return fail_count | 54dd47193e6e727257e43f4fd8d9367770d6065f | 37,706 |
def current_url_name(context):
"""
Returns the name of the current URL, namespaced, or False.
Example usage:
{% current_url_name as url_name %}
<a href="#"{% if url_name == 'myapp:home' %} class="active"{% endif %}">Home</a>
"""
url_name = False
if context.request.resolver_match:
url_name = "{}:{}".format(
context.request.resolver_match.namespace,
context.request.resolver_match.url_name,
)
return url_name | 1ea2b6ef60a532131dc1c519fc3c5208b29c468b | 37,707 |
import hashlib
def HashFile(filename):
"""Returns SHA-256 hash of a given file."""
if isinstance(filename, list):
filename = filename[0]
try:
return hashlib.sha256(
open(filename, "rb").read()).hexdigest()
except IOError:
return "UNKNOWN FILE HASH" | 6529334d79246ad113bed7bfcb3b84cc59679f90 | 37,708 |
def identity(test_item):
"""Identity decorator
"""
return test_item | 799d2325e04066c0dfa405d24d5718b67eb64a00 | 37,709 |
def _tensor_max(*args):
"""Elementwise maximum of a sequence of tensors"""
maximum, *rest = args
for arg in rest:
maximum = maximum.max(arg)
return maximum | 3046b6ae14368a7275f74ade42a1b179ae38b95e | 37,710 |
def doing_pcomp(row_trigger_value: str) -> bool:
"""Indicate whether the row_trigger is for position compare."""
return row_trigger_value == "Position Compare" | b6433fc126bb56eefc4c9c73f21e4fb1a82f65d0 | 37,711 |
def gen_bwt_array(s: str, suf_tab: list[int]) -> list[str]:
"""
computes bwt array using suf_tab
:param s: text for which bwt array is being computed
:param suf_tab: suffix array for the input text
:return: a bwt array for text
"""
s += '$'
bwt_array: list[str] = []
for suf in suf_tab:
bwt_array.append(s[suf - 1])
return bwt_array | a946519f8068e4aba04177afb2db032efc45e82e | 37,712 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.