content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from typing import Tuple
def unit_center(
indices: Tuple[int, int], image_size: Tuple[int, int], stride: int
) -> Tuple[float, float]:
"""Get single prior unit center.
:param indices: current unit's indices tuple
:param image_size: image shape tuple
:param stride: stride for feature map
:return: unit center coords
"""
y_index, x_index = indices
y_scale, x_scale = image_size[0] / stride, image_size[1] / stride
x_center = (x_index + 0.5) / x_scale
y_center = (y_index + 0.5) / y_scale
return y_center, x_center | 91f0a2070a26d8335c0e8d2cfd1c80eb59c98b8d | 43,142 |
import numpy
def filter_dup_points(points):
"""Return a polyline with no duplicate or near-duplicate points."""
points_out = [points[0]]
for point in points[1:]:
if not numpy.allclose(point, points_out[-1]):
points_out.append(point)
return numpy.array(points_out) | 976520ba8cccf1bfa0daeea041b9a530461cd4d7 | 43,143 |
def __get_int_ordinals(string):
"""
Return the integer ordinals of a string.
"""
output = ""
for char in string:
output += str(ord(char)).rjust(3, " ") + ", "
output = output.rstrip(", ")
return output | 8260716a23773bf0eb7434c0601539fcd1fcc285 | 43,145 |
def drastic_t_norm(a, b):
"""
Drastic t-norm function.
Parameters
----------
a:
numpy (n,) shaped array
b:
numpy (n,) shaped array
Returns
-------
Returns drastic t-norm of a and b
Examples
--------
>>> a = random.random(10,)
>>> b = random.random(10,)
>>> c = drastic_t_norm(a, b)
"""
return b * (a == 1) + a * (b == 1) | c29fb2a2069bc6f8742ad30a27697b2e91db0ec0 | 43,146 |
def modulo(a, b, c) :
"""
Calculates modulo
"""
#print "Modulo of %0d^%0d mod %0d is %0d" % (a, b, c, (int(a)**int(b)) % int(c))
#print a,b,c;
return ((int(a)**int(b)) % int(c)) | eba9d0f633eaa307c4f35020c33496968b939fe6 | 43,147 |
def get_distribution_schedules_payload():
"""Return distribution schedule payload."""
return [{
'feeScheduleId': 1
}] | 1ef021526a2b432f42ba181ea9b13123c2c2d548 | 43,148 |
import subprocess
def rpm_installed(rpm_name):
"""
Check to see if given rpm is installed
Arguments:
rpm_name - a string with rpm name to check or a Iteratable with rpms that
should be checked
Returns:
True if rpms are installed, False otherwise
"""
if isinstance(rpm_name, str):
return subprocess.call(["rpm", "-q", rpm_name], stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) == 0
# check with iterable type
for name in rpm_name:
if subprocess.call(["rpm", "-q", name], stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
return False
return True | 08898f5763806a1126666a28fc2eee7073d4e109 | 43,150 |
def check_question(question: str, excluding_words: tuple) -> bool:
"""Returns true to confirm that the question is valid
and lets continue the search if there are no excluding
words in it.Otherwise returns false and stops
the search since it doesn't make sense anymore"""
# Checking if the question has excluding words
for word in excluding_words:
if word in question:
return False
return True | 5ce1d4487c7c45a321940b37345119cd69001aab | 43,151 |
def items_list(mapping, items):
"""Return a list of values from `mapping` in order of the given `items`."""
return [mapping[item] for item in items] | 1707b4339c13aa59c56210c0ae54c882c270d759 | 43,152 |
def prep_net_config(config, spec_len):
""" Prepares dictionary containing arguments for SVD-network. """
net_config = {}
# parameters for Mel transform
net_config.update({'n_mels': config['mel_bands'],
'sample_rate': config['sample_rate'],
'f_min': config['mel_min'],
'f_max': config['mel_max'],
'n_stft': int(config['frame_len'] // 2 + 1),
'frame_len': config['frame_len'],
'filterbank': config['filterbank'],
'spec_len': spec_len})
# parameters for magnitude scaling
net_config.update({'magscale': config['magscale'],
'arch.batch_norm': config['arch.batch_norm'],
'arch.convdrop': config['arch.convdrop'],
'arch': config['arch']})
# arch params
net_config.update({
'arch.firstconv_zeromean': config.get('arch.firstconv_zeromean', 'std')
})
return net_config | a8625557b1e4f9f42c392e6faad85802ad70bee0 | 43,153 |
import re
def _clean_text(text):
"""Clean up the provided cell."""
# remove trailing characters after LayoffBeginDate
if re.match(r"^[0-9]{1,2}/[0-9]{1,2}/[0-9]{4}", text):
text = re.sub(r"(?<=[0-9]{4}).*", "", text)
return text.strip() | c6d2717937bc562c4b25b5ed4c4cd82a4597f70f | 43,154 |
def naics_filter(opps):
"""Filter out opps without desired naics
Arguments:
opps {list} -- a list of sam opportunity api results
naics {list} -- a list of naics to filter with
Returns:
[list] -- a subset of results with matching naics
"""
naics = ('334111', '334118', '3343', '33451', '334516', '334614',
'5112', '518', '54169', '54121', '5415', '54169', '61142')
filtered_opps = []
for opp in opps:
#naics_array = opp.get('data',{}).get('naics')
naics_array = opp.get('naics',{})
if not naics_array:
continue
nested_naics_codes = [c for c in [d.get('code',[]) for d in naics_array]]
#opp_naics = [i for sublist in nested_naics_codes for i in sublist]
opp_naics = [i for i in nested_naics_codes ]
for c in opp_naics:
if any(c.startswith(n) for n in naics):
filtered_opps.append(opp)
break
return filtered_opps | bf9493020561c2f867401c6b9e553d1027cba4c9 | 43,155 |
def time_to_str(seconds):
"""
时间(秒)换算成时间字符串
:param seconds: 时间
:return: 时间字符串
"""
m, s = divmod(seconds, 60)
a = "%02d:%02d" % (m, s)
return a | d7937d5c9936941795326392640777be0e550ed3 | 43,156 |
def get_file_options_string(args):
""" Create a string containing the relevant flags and options based on the
file options from the arguments.
Parameters
----------
args: argparse.Namespace
The parsed arguments
"""
overwrite_str = ""
if args.overwrite:
overwrite_str = "--overwrite"
keep_intermediate_files_str = ""
if args.keep_intermediate_files:
keep_intermediate_files_str = "--keep-intermediate-files"
options_str = "{} {}".format(overwrite_str,
keep_intermediate_files_str)
return options_str | f5939692c0ef43b1ae26b97539d276c88c2503b1 | 43,157 |
def sum_of_ints(n):
"""Return the sum of 1 to n"""
return ((n + 1) * n) >> 1 | fa548544d25303deca43e8ea8e1d2fc8f766252a | 43,158 |
import requests
import re
def zipfile_by_bitsize(binaries_url, headers, zipfile_regex, bitsize):
"""Returns the url linking to the correct zipfile"""
# this is used by ccx and gmsh
res = requests.get(binaries_url, headers=headers)
html = res.text
urls = re.findall(r'href=[\'"]?([^\'" >]+)', html)
pattern = re.compile(zipfile_regex)
urls = [url for url in urls if pattern.match(url)]
urls = urls[-2:]
url_choices = {32: urls[0], 64: urls[1]}
if 'win32' in urls[1] or 'Windows32' in urls[1]:
url_choices = {32: urls[1], 64: urls[0]}
return url_choices[bitsize] | cda5210fa35bdac424d8d74c08398e86fe494ace | 43,159 |
import requests
def get_from_lyrics(url):
""" Get from www.lyrics.com
<meta content="My Sacrifice [Album Version] Lyrics by Creed from the Weathered
album - including song video, artist biography, translations and more: Hello my friend we meet again
It's been a while, where should we begin
Feels like forever
Within my heart are memories
…" name="description"/>
SO THE LYRICS ARE CHOPPED OFF UNLESS YOU BUY A PASS?
"""
#print('===========================================================================')
#print('get_from_lyrics:')
r = requests.get(url)
#soup = BeautifulSoup(r.text, 'lxml')
#print('===========================================================================')
#print('get_from_lyrics: r.text')
#print(soup.prettify()[:2000])
#html_page = urllib.request.urlopen(url)
#soup = BeautifulSoup(html_page, 'html.parser')
#print('===========================================================================')
#print('get_from_lyrics: html_page')
#print(soup.prettify()[:2000])
#print('===========================================================================')
#print('get_from_lyrics: html_page.content')
#html_page = requests.get(url)
#try:
# soup = BeautifulSoup(html_page.content, 'html.parser')
#except AttributeError:
# print('addinfourl instance has no attribute "content"')
#print('===========================================================================')
#print(soup.prettify()[500:5000])
#print('===========================================================================')
#for a in soup.find_all('a', href=True):
# ref = str(a['href']).upper()
# if "CREED" in ref:
# print("Found the URL:", a['href'])
"""
Searching with:
u'https://www.lyrics.com/lyric/5212265/Creed/My%2BSacrifice
Probably want one of these:
Found the URL: https://www.lyrics.com/lyric/5212265/Creed
+
Found the URL: /lyric/5212265/Creed/My+Sacrifice+%5BAlbum+Version%5D
Found the URL: https://www.lyrics.com/lyric/5212265/Creed
+
Found the URL: /lyric/30501201/Creed/My+Sacrifice
"""
#print('===========================================================================')
return r.text, [] | 4e18342a338df83796e5306f448f2115071546a9 | 43,160 |
import os
import json
def read_json_file(directory, filename):
"""Read json file"""
file_path = os.path.join(directory, filename)
with open(file_path) as _file:
data = json.load(_file)
_file.close()
return data | 8b9018e68e275f5c8c5c2861900c20eb04142b15 | 43,162 |
import os
import json
def getJSONFile(filepath):
""" Get Data from cached file """
if not os.path.exists(filepath):
return False
with open(filepath) as stream:
try:
data = json.load(stream)
except ValueError as e:
return False
return data | 7427cff9581f231dac14c624ce08a4251d4b6640 | 43,163 |
import random
def generate_answers(question_3, question_5, range_3, range_5, sample_size=32):
"""
Helper function that generates answers with response times in
a certain range
:param question_3: The question in block 3
:param question_5: The question in block 5
:param range_3: Range of response times in block 3
:param range_5: Range of response times in block 5
:param sample_size: Amount of images to associate
:return: List of answers
"""
block_3_times = random.choices(range_3, k=sample_size)
block_5_times = random.choices(range_5, k=sample_size)
block_3 = list(map(lambda x: {"block_nr": 2, "response_time": x, "question_id": question_3.id},
block_3_times))
block_5 = list(map(lambda x: {"block_nr": 4, "response_time": x, "question_id": question_5.id},
block_5_times))
block_3.extend(block_5)
return block_3 | 6d84284df73cb36bd0602ee4151af72b84bf8ba3 | 43,164 |
def get_date(isoformattime: str) -> str:
"""Extract {YYYY-MM-DD} from ISO 8601 formatted time string.
Parameters
----------
`isoformattime`: `str`\n
An ISO 8601 formatted time string.
Returns
-------
`str`\n
A string containing the date portion of the original time string.
Raises
------
`ValueError`\n
If the 'T' prefacing the time part of the string is missing.
"""
T_index = isoformattime.find('T')
if T_index == -1:
raise ValueError('Improperly formatted time string given.')
date = isoformattime[0: T_index]
return date | 65cf41b841f6133529aada5e914efe25c0bfd503 | 43,166 |
def to_camel_case(text):
"""Convert to camel case.
:param str text:
:rtype: str
:return:
"""
split = text.split('_')
return split[0] + "".join(x.title() for x in split[1:]) | 6a3e7b4ca24ce727fef2a787e172c914833c15ca | 43,167 |
import ast
def ParseTryjobBuildbucketId(msg):
"""Find the buildbucket-id in the messages from `cros tryjob`.
Args:
msg: messages from `cros tryjob`
Returns:
buildbucket-id, which will be passed to `cros buildresult`
"""
output_list = ast.literal_eval(msg)
output_dict = output_list[0]
if 'buildbucket_id' in output_dict:
return output_dict['buildbucket_id']
return None | e664f8ae73ce05d6c87e861e2690fdd06611d6fa | 43,168 |
def to_string(cls):
"""
Return the string representation of a syntax class or a
syntax class instance. Return 'string' by default.
"""
return getattr(cls, 'typed_name', 'string') | 25be3480a9518df3c45137c072f6fe050b4da050 | 43,169 |
import os
def download_file_single_session(my_session, file_url_breezy):
"""
Downloads a file from breezy.hr using an existing session
:param my_session: request session which stores the cookies generated by signing in
:param file_url_breezy: URL of the file to be downloaded
:return:
"""
doc_file_name = str(file_url_breezy).split("/")[-1]
y = my_session.get(url=file_url_breezy)
if y.status_code == 401:
return ["Unauthorised", False]
elif y.status_code != 200:
return ["Unknown Error", False]
if os.path.exists("resumes"):
pass
else:
os.mkdir("resumes")
save_file_path = os.path.join("resumes", doc_file_name)
pdf = open(save_file_path, 'wb')
pdf.write(y.content)
pdf.close()
return [doc_file_name, True] | e9f3b71ae5bd7e441b9d2492e741f81cd952e5c9 | 43,170 |
import re
def _format_kv_name(setting_path):
"""Return normalized name for use as a KeyVault secret name."""
return re.sub("[^0-9a-zA-Z-]", "-", setting_path) | a722253a065abaa7e4e876e9390c24df47be7af5 | 43,172 |
import sys
def IsWindows():
"""Checks if the current platform is Windows.
"""
return sys.platform[:3] == 'win' | 1e4d527ba35a57861a828fb2ccec7dfa61db7c81 | 43,173 |
def created_by(date, user=None, prefix="updated"):
"""
Renders a created by link
"""
return dict(date=date, user=user, prefix=prefix) | d0a21eb22187642db26ad68986a19885062c52d3 | 43,174 |
from typing import Dict
import argparse
from typing import Any
from typing import List
def update_config_from_args(config: Dict, args: argparse.Namespace, *, verbose: bool = True) -> Dict[str, Any]:
"""
Modify config and paths given script arguments.
Args:
config: Config dictionary.
args: Arguments.
verbose: Print message when updating the config.
Returns:
Updated config dict.
"""
# parse the --config inline modifier
if args.config is not None:
# get all fields to update from the argument and loop them
update_fields: List[str] = args.config.split(",")
for field_value in update_fields:
# get field and value
fields_str, value = field_value.strip().split("=")
# convert value if necessary
try:
value = float(value)
if round(value) == value:
value = int(value)
except ValueError:
pass
if str(value).lower() == "true":
value = True
elif str(value).lower() == "false":
value = False
# update the correct nested dictionary field
fields = fields_str.split(".")
current_dict = config
for i, field in enumerate(fields):
if i == len(fields) - 1:
# update field
if field not in current_dict:
assert "same_as" in current_dict, (
f"Field {fields_str} not found in config {list(current_dict.keys())}. "
f"Typo or field missing in config.")
current_dict[field] = value
if verbose:
print(f" Change config: Set {fields_str} = {value}")
break
# go one nesting level deeper
current_dict = current_dict[field]
if args.workers is not None:
config["dataset_train"]["num_workers"] = int(args.workers)
config["dataset_val"]["num_workers"] = int(args.workers)
if verbose:
print(f" Change config: Set dataloader workers to {args.workers} for train and val.")
if args.seed is not None:
if str(args.seed).lower() in ["none", "null"]:
config["random_seed"] = None
else:
config["random_seed"] = int(args.seed)
if verbose:
print(f" Change config: Set seed to {args.seed}. Deterministic")
if args.no_cuda:
config["use_cuda"] = False
if verbose:
print(f" Change config: Set use_cuda to False.")
if args.single_gpu:
config["use_multi_gpu"] = False
if verbose:
print(f" Change config: Set use_multi_gpu to False.")
return config | c24edb73dd70ddc2a9b5b3670f0ab548be0bc44f | 43,176 |
import re
def ellipsicate(message: str, max_length: int = 40, strip: bool = True) -> str:
"""Return a shortened version of a string if it exceeds max_length.
This will turn 'bizbazfrobnicator' into "biz ... tor".
"""
msg = re.sub(r'\s+', ' ', str(message)) # only allow ' ' for whitespace
if strip:
msg = msg.strip()
if len(msg) <= max_length:
return msg
snip_length = int((max_length - 5) / 2) # ellipsis padded with spaces
return str(msg[:snip_length] + ' ... ' + msg[-snip_length:]) | feb61e075421b55cf50f835f7ebe55171485249b | 43,177 |
def createStats():
"""
I just wanted to separate the stats so It is a little cleaner
"""
#span, seed1, seed2
return {"support": [[], [], []], #keep all the flags I have \
"spanCount": 0,
"spanSeedName": None,
"spanSeedScore": 0,
"spanSeedStart": None,
"spanSeedEnd": None,
"spanSeedStrand1": None,
"spanSeedStrand2": None,
"avgSpanBases": 0,
"seed1": None,
"seed2": None,
"predictedGapSize": None,
"sameStrand": None,
"extendF1Count": 0,
"avgExtF1Bases": 0,
"extendF1SeedName": 0,
"extendF1SeedScore": 0,
"extendF1SeedStart": None,
"extendF1SeedEnd": None,
"extendF1SeedStrand": None,
"extendF2Count": 0,
"avgExtF2Bases": 0,
"extendF2SeedName": 0,
"extendF2SeedScore": 0,
"extendF2SeedStart": None,
"extendF2SeedEnd": None,
"extendF2SeedStrand": None,
"extendSeq1": None,
"extendSeq2": None,
"fillSeq": None,
"contribSeqs": 0,
"contribBases": 0,
"fillBases": 0,
"seed1Trim": 0,
"seed2Trim": 0} | 838740539e0db29318c3350c489977a20a9fc4ec | 43,178 |
import ipaddress
def first_subnet(subnets, version=4):
"""
Returns the first subnetwork of a list, filtered by version
"""
for subnet in subnets:
network = ipaddress.ip_network(subnet)
if network.version == version:
return subnet
return "" | 2d60489d0f87f0dade643cccadee281ebabc9c46 | 43,179 |
def tribonacci_1(s, n):
"""Solution used for loop."""
# start from i = 3
# this does not cover when n is less than 3
for i in range(3, n):
s.append(s[i-1] + s[i-2] + s[i-3])
return s[:n] | b4ce70768a9da9b189ba41b320bdca8ee06fc165 | 43,180 |
def create_record_set(model, X, y):
"""Create a record set for AWS model training"""
X_float = X.astype("float32")
if y:
y_float = y.astype("float32")
return model.record_set(X_float, labels = y_float)
else:
return model.record_set(X_float) | fff904a26bd12ec597f998daafa4709fc663f227 | 43,182 |
def get_comb_index(bi_1, bi_2):
"""
:param bi_1:
:param bi_2:
:return:
"""
return bi_1 + (100 * (bi_2 + 1)) | d9fe0502b744c873a9e6ed62f78e22ca0a1affb0 | 43,183 |
import requests
def make_api_call(csv_target):
"""Make http request to IEX Trading API and return useful dataset"""
my_string = ','.join(str(row['symbol']) for row in csv_target)
url = "https://api.iextrading.com/1.0/tops/last?symbols={0}".format(my_string)
response = requests.get(url)
data = response.json()
api_data = [
(item['symbol'], item['price'], item['size'], item['time'])
for item in data
]
return api_data | c067c81b167918707eb18ae8acf7f3d07ad31290 | 43,185 |
def parse_synsetfile(synsetfname):
""" Read ImageNet 2012 file
"""
categorylist = open(synsetfname, 'r').readlines()
imageNetIDs = {}
count = 0
for categoryinfo in categorylist:
wnetid = categoryinfo.split(' ')[0]
categoryname = ' '.join(categoryinfo.split(' ')[1:])
imageNetIDs[str(count)] = [wnetid, categoryname]
count += 1
assert len(imageNetIDs.keys()) == 1000
return imageNetIDs | 737fd54a6d2fd836e764f01ec7bcc2d8b75e7de7 | 43,188 |
import re
def concat_image_name_with_tag(inp_dict, value):
"""
Concatenate image name with tag if `value` has no ':' symbol inside
:param inp_dict:
:param text value:
:return: list array
"""
if not re.search(':', value): # Image has no tag
res_values = []
if 'defaultImageVersion' in inp_dict:
res_values.append('{0}:{1}'.format(value, inp_dict['defaultImageVersion']))
if 'defaultGpuImageVersion' in inp_dict:
res_values.append('{0}:{1}'.format(value, inp_dict['defaultGpuImageVersion']))
return res_values
return [value] | eab37090a9cf4f7658675dab4a6bd039f3cff10b | 43,190 |
def join_a_sentence(sentence_number, data):
"""
Args.:
sentence_number: sentence number we want to join and return.
Returns:
The joined sentence.
"""
sentence_number = str(sentence_number)
the_sentence_words_list = list(data[
data['Sentence #'] == 'Sentence: {}'.format(sentence_number)]['Word'])
return ' '.join(the_sentence_words_list) | adb07e454fa69d8b74640dcb12b02098f317a3f9 | 43,191 |
def buffer(geom, urban_rural):
"""Create DHS cluster buffers
Buffer size:
- 2km for urban
- 5km for rural (1% of rural have 10km displacement, but ignoring those)
Metric units converted to decimal degrees by dividing by width of one decimal
degree in km at equator. Not an ideal buffer created after reprojecting,
but good enough for this application.
"""
if urban_rural == "U":
return geom.buffer(2000)
elif urban_rural == "R":
return geom.buffer(5000)
else:
raise ValueError("Invalid urban/rural identified ({})".format(urban_rural)) | ab5264f85ff3da21c23dba937879550fa3c4ac49 | 43,192 |
import math
def calc_saturated_vapour_pressure_air_FAO(temp_air):
"""Saturated vapour pressure of air at temp_air in kPa
From: http://www.fao.org/3/X0490E/x0490e0k.htm
"""
return 0.611 * math.exp((17.27 * temp_air) / (temp_air + 237.3)) | 722b1f5ae8b9b76c56d7c0a5a5961b53e0328fbe | 43,193 |
def make_grid(n, m):
"""nxm array with labeled coordinates ... pretty superfluous"""
return [[(i,j) for j in range(m+1)] for i in range(n+1)]
#h = []
#for i in range(n+1): h.append([(i, j) for j in range(m+1)])
#return h
#return [(i, j) for i in range(n+1) for j in range(m+1)] | 077b796d500cfa2f9b8d475b2077f6f77203ffc5 | 43,194 |
from typing import Iterable
from typing import Tuple
def get_maximum_path(tree_level_strings: Iterable[str]) -> Tuple[int, Iterable[int]]:
"""Get maximum path value and maximum path for a given tree, represented as level strings.
Solution idea: Compute maximum path from *bottom up*. Hence, the time complexity is linear.
"""
tree_levels = [
[int(number_string) for number_string in tree_level_string.split()] \
for tree_level_string in tree_level_strings
]
max_path_tree_levels = [[(value, -1) for value in tree_levels[-1]]]
for level_idx in range(len(tree_levels) - 2, -1, -1):
current_tree_level = tree_levels[level_idx]
previous_path_tree_level = max_path_tree_levels[-1]
new_path_tree_level = []
for idx, value in enumerate(current_tree_level):
left_val = previous_path_tree_level[idx][0]
right_val = previous_path_tree_level[idx + 1][0]
if left_val >= right_val:
new_path_tree_node = (value + left_val, 0)
else:
new_path_tree_node = (value + right_val, 1)
new_path_tree_level.append(new_path_tree_node)
max_path_tree_levels.append(new_path_tree_level)
max_path_tree_levels.reverse()
max_path_tree = []
node_idx = 0
for level_idx, level in enumerate(max_path_tree_levels):
max_path_tree.append(tree_levels[level_idx][node_idx])
node_idx += level[node_idx][1]
return max_path_tree_levels[0][0][0], max_path_tree | eafc3dfc82bc120742efac5cce793b44db22692a | 43,198 |
def computeAttackPro(harm):
"""
Compute attack success probability for HARM using attack graph as upper layer and attack tree as lower layer.
"""
pro = []
rare_paths = 0 # number of rare paths
unlikely_paths = 0 #number of unlikely paths
possible_paths = 0 #number of possible paths
likely_paths = 0 #number of likely paths
certain_paths = 0 #number of certain_paths
harm.model.calcPro()
print("=================================================")
print("Print attack paths: \n")
for path in harm.model.allpath:
pathPro = 1
for node in path:
if node is not harm.model.s and node is not harm.model.e:
print(node.name, end =' ')
#Exclude the attacker
if node.val > 0:
print('(', node.val, ')', end = ' ')
pathPro *= node.val
if pathPro > 0 and pathPro <= 0.19:
rare_paths = rare_paths + 1
elif pathPro >= 0.2 and pathPro <= 0.39:
unlikely_paths = unlikely_paths + 1
elif pathPro >= 0.4 and pathPro <= 0.59:
possible_paths = possible_paths + 1
elif pathPro >= 0.6 and pathPro <= 0.79:
likely_paths = likely_paths + 1
else:
certain_paths = certain_paths + 1
print('\n')
pro.append(pathPro)
value = max(pro)
print("Maximum attack success probability is: ", value)
return value, rare_paths, unlikely_paths, possible_paths, likely_paths, certain_paths | c20fe853f0d64b3300b5c8a28d50d1b34f8eccc5 | 43,199 |
import six
def reverse_dict(d):
"""reverse the dictionary -- may lose data if values are not unique!"""
return {v: k for k, v in six.iteritems(d)} | 9b04bdda5d08832d26254c08bcf7c8c194b182d9 | 43,200 |
import os
def is_unversioned(name):
"""Check whether unversioned python prefix is used
in the name (e.g. python-foo).
Return: (bool) True if used, False otherwise
"""
if (os.path.isabs(name) or # is an executable
os.path.splitext(name)[1] or # has as extension
name.startswith(('python2-', 'python3-'))): # is versioned
return False
return (
name.startswith('python-') or
'-python-' in name or
name.endswith('-python') or
name == 'python') | febf2f3eee043ea619ef2ed9be1dee20bd3553c7 | 43,201 |
def add_force_targets(cmd, changed_targets):
"""
Add forced targets to command by appending to an existing
--forcerun/-R argument or appending to the end of the command.
cmd (list of strings): passed snakemake commands.
changed_targets: list of targets that have parameter changes.
"""
force_flag = list(set(["--forcerun", "-R"]).intersection(set(cmd)))
if force_flag:
idx = max([cmd.index(c) for c in force_flag]) # index of last occurance
return cmd[: idx + 1] + changed_targets + cmd[idx + 1: ]
else:
# add forced targets to end if arg not already used.
return cmd + ["-R"] + changed_targets | 5766d48ed1e8e9713d16974d397e0780a9526a31 | 43,202 |
import os
import re
def get_filepaths(target_path, file_pattern=None, dir_pattern=None):
"""
get complete list of full file paths with a 'target_path' directory
Can specify an optional 'file_pattern' regex to only match certain file names
Can specify an optional 'dir_pattern' regex to match certain directory names
"""
files = []
for (dirpath, dirnames, filenames) in os.walk(target_path):
for f in filenames:
if file_pattern and not re.match(file_pattern, f):
continue
if dir_pattern:
_, d = os.path.split(dirpath)
if not re.match(dir_pattern, d):
continue
files.append(os.path.join(dirpath, f))
return files | 90d9a790b4e2f2b71958cc06e5a03bbbc417677a | 43,203 |
import os
import re
def find_joblog_file(joblogs_path: str, regexp: str) -> str:
"""
Given path to joblogs files and asset name in form of regexp, will try to find joblog file for provided asset;
if multiple - will return first occurrence
:param joblogs_path: Path to a folder with joblogs files to look for needed file
:param regexp: Python Regexp to find the joblog file for the asset that was processed
:return: Full path to joblog file, empty string if not found
"""
for file_name in os.listdir(joblogs_path):
if re.match(regexp, file_name):
return os.path.join(joblogs_path, file_name)
return "" | 19524e0a6664c08562a78aeb9ad621cb9dfa7cf7 | 43,204 |
def plot_zone_map(gdf, **kwargs):
"""Plot map of zones as choropleth. Has 20 colors; buckets zones in
alphabetical order
:param geopandas.geodataframe.GeoDataFrame gdf: GeoDataFrame with
index = zone names, columns = ['geometry']
:param \\*\\*kwargs: arbitrary keyword arguments passed to matplotlib plotting function
:return: (*matplotlib.axes._subplots.AxesSubplot) -- the plot object
"""
gdf = gdf.copy()
gdf["coords"] = gdf["geometry"].apply(lambda x: x.representative_point().coords[:])
gdf["coords"] = [coords[0] for coords in gdf["coords"]]
# kwargs are used for plot visuals
default_kwargs = {
"figsize": (50, 50),
"linewidth": 1,
"edgecolor": "white",
"cmap": "tab20",
"alpha": 0.66,
}
plt = gdf.plot(**{**default_kwargs, **kwargs})
for idx, row in gdf.iterrows():
plt.annotate(s=idx, xy=row["coords"], horizontalalignment="center")
return plt | 6d594f26c93413b15e0770d5c56a9d20ddbf99f5 | 43,206 |
import zipfile
import os
import errno
import io
def unzip(zip_file, extraction_path):
"""
code to unzip files
"""
print("[INFO] Unzipping")
try:
files = []
with zipfile.ZipFile(zip_file, "r") as z:
for fileinfo in z.infolist():
filename = fileinfo.filename
dat = z.open(filename, "r")
files.append(filename)
outfile = os.path.join(extraction_path, filename)
if not os.path.exists(os.path.dirname(outfile)):
try:
os.makedirs(os.path.dirname(outfile))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
print("\n[WARN] OS Error: Race Condition")
if not outfile.endswith("/"):
with io.open(outfile, mode='wb') as f:
f.write(dat.read())
dat.close()
return files
except Exception as e:
print("[ERROR] Unzipping Error" + str(e)) | 7077085e295f57acd6134c459571963ee04cff37 | 43,207 |
def standardize(data, mean, std):
"""Standardize datasets using the given statistics.
Args:
data (np.ndarray or list of np.ndarray): Dataset or list of
datasets to standardize.
mean (number): Mean statistic.
std (number): Standard deviation statistic.
Returns:
np.ndarray or list of np.ndarray: The standardized dataset(s).
"""
if isinstance(data, list):
return [(x - mean) / std for x in data]
return (data - mean) / std | c5011d651f7b42f1069da6304f0be8d605ec0b53 | 43,208 |
def timeslips_decoder(x: list, t: int) -> float:
"""
The inverse function of to_timeslips. Decodes a list of ints (timeslips)
into a single integer representing milliseconds of time.
"""
return sum(x) * t | 14f694b3be852d032a31b2a65450f94ed9a07774 | 43,209 |
from typing import List
def serie_recursiva(arr: List[int], a: int, b: int) -> List[int]:
"""Devuelve la serie E_i = i^3 + 5, desde 1 hasta n.
:param arr: Lista vacía.
:arr type: List[int]
:param a: Inicio de la serie.
:a type: int
:param b: Fin de la serie.
:b type: int
:return: Arreglo con los elementos de la serie.
:rtype: List[int]
"""
if a > b:
return arr
arr.append(a**3 + 5)
return serie_recursiva(arr, a+1, b) | 80e303d36e41abcd9a4c7868fdd1a35fb3843661 | 43,211 |
def api(uri):
"""
Given a URI that uses the ConceptNet API, such as "/c/en/test", get its
fully-qualified URL.
"""
return "http://api.conceptnet.io" + uri | 07cbc671f5c190ecfbed3d04b00392ce4a393f43 | 43,212 |
def potential_reciprocity(s, G):
"""For authors, check whether an acknowledged commenter is author and
has coauthors; for commenters, check whether she is author and has papers
without authors of the paper she is acknowledged on.
"""
if isinstance(s, list):
return any(c in G.nodes() and len(list(G.neighbors(c))) > 0 for c in s)
else:
authors = set(s['auth'])
return any(c in G.nodes() and
len(set(G.neighbors(c)) - authors) > 0 for c in s['coms']) | ad5a4434a0a0510af719d4d0a964caf11c1a1d24 | 43,213 |
def desc(x):
"""Transform Data Helper Function."""
return " ".join([i["value"] for i in x]) | 52c26a4cb2636c34bbdc64b3c0384a2be69bb0e9 | 43,214 |
def meric_hit(h_rank, t_rank, N=50):
"""evaluate the vector result by hit-N method
N: the rate of the true entities in the topN rank
return the mean rate
"""
print('start evaluating by Hit')
num = 0
for r1 in h_rank:
if r1 <= N:
num += 1
rate_h = num / len(h_rank) * 100
num = 0
for r2 in t_rank:
if r2 <= N:
num += 1
rate_t = num / len(t_rank) * 100
return (rate_h + rate_t) / 2 | 3af29c889d2575d129dfe1c463c24a02e5a4b5dd | 43,216 |
import re
def recursive_attach_unit_strings(smirnoff_data, units_to_attach):
"""
Recursively traverse a SMIRNOFF data structure, appending "* {unit}" to values in key:value pairs
where "key_unit":"unit_string" is present at a higher level in the hierarchy.
This function expects all items in smirnoff_data to be formatted as strings.
Parameters
----------
smirnoff_data : dict
Any level of hierarchy that is part of a SMIRNOFF dict, with all data members
formatted as string.
units_to_attach : dict
Dict of the form {key:unit_string}
Returns
-------
unit_appended_smirnoff_data: dict
"""
# Make a copy of units_to_attach so we don't modify the original (otherwise things like k_unit could
# leak between sections)
units_to_attach = units_to_attach.copy()
# smirnoff_data = smirnoff_data.copy()
# If we're working with a dict, see if there are any new unit entries and store them,
# then operate recursively on the values in the dict.
if isinstance(smirnoff_data, dict):
# Go over all key:value pairs once to see if there are new units to attach.
# Note that units to be attached can be defined in the same dict as the
# key:value pair they will be attached to, so we need to complete this check
# before we are able to check other items in the dict.
for key, value in list(smirnoff_data.items()):
if key[-5:] == "_unit":
units_to_attach[key[:-5]] = value
del smirnoff_data[key]
# Go through once more to attach units as appropriate
for key in smirnoff_data.keys():
# We use regular expressions to catch possible indexed attributes
attach_unit = None
for unit_key, unit_string in units_to_attach.items():
if re.match(f"{unit_key}[0-9]*", key):
attach_unit = unit_string
if attach_unit is not None:
smirnoff_data[key] = str(smirnoff_data[key]) + " * " + attach_unit
# And recursively act on value, in case it's a deeper level of hierarchy
smirnoff_data[key] = recursive_attach_unit_strings(
smirnoff_data[key], units_to_attach
)
# If it's a list, operate on each member of the list
elif isinstance(smirnoff_data, list):
for index, value in enumerate(smirnoff_data):
smirnoff_data[index] = recursive_attach_unit_strings(value, units_to_attach)
# Otherwise, just return smirnoff_data unchanged
else:
pass
return smirnoff_data | 34d0bdf812ff16b53e1f4edafcbcecfd847f81f6 | 43,217 |
import logging
def handle_depricated_arguments(args):
"""Warn about depricated arguments, use them when possible."""
if hasattr(args, 'block') and args.block:
if hasattr(args, 'coverage'):
logging.warning("--block is depricated, using --coverage %s.",
args.block)
args.coverage = [args.block] # block is a string, coverage is a list
else:
logging.warning("--block is depricated, use --coverage instead.")
args.block = None
if hasattr(args, 'htmldir') and args.htmldir:
if hasattr(args, 'reportdir'):
logging.warning("--htmldir is depricated, using --reportdir %s.",
args.htmldir)
args.reportdir = args.htmldir
else:
logging.warning("--htmldir is depricated, use --reportdir instead.")
args.htmldir = None
if hasattr(args, 'srcexclude') and args.srcexclude:
if hasattr(args, 'exclude'):
logging.warning("--srcexclude is depricated, using --exclude %s.",
args.srcexclude)
args.exclude = args.srcexclude
else:
logging.warning("--srcexclude is depricated, "
"use --exclude instead.")
logging.warning("--srcexclude and --exclude use slight different "
"regular expressions.")
args.srcexclude = None
if hasattr(args, 'blddir') and args.blddir:
logging.warning("--blddir is depricated, ignoring --blddir.")
args.blddir = None
if hasattr(args, 'storm') and args.storm:
logging.warning("--storm is depricated, ignoring --storm.")
args.storm = None
return args | 9e10e19e310a241c67c5136085a7fd26b24f60cc | 43,218 |
import inspect
import re
def option_descriptions(operation):
""" Extract parameter help from docstring of the command. """
lines = inspect.getdoc(operation)
if not lines:
return {}
param_breaks = ["'''", '"""', ':param', ':type', ':return', ':rtype']
option_descs = {}
lines = lines.splitlines()
index = 0
while index < len(lines):
l = lines[index]
regex = r'\s*(:param)\s+(.+?)\s*:(.*)'
match = re.search(regex, l)
if not match:
index += 1
continue
# 'arg name' portion might have type info, we don't need it
arg_name = str.split(match.group(2))[-1]
arg_desc = match.group(3).strip()
# look for more descriptions on subsequent lines
index += 1
while index < len(lines):
temp = lines[index].strip()
if any(temp.startswith(x) for x in param_breaks):
break
else:
if temp:
arg_desc += (' ' + temp)
index += 1
option_descs[arg_name] = arg_desc
return option_descs | e22897b77c6b6dd9213087220cdd38169e876ad2 | 43,219 |
def _safe_feedback_arc_set(g, method="ip"):
"""
Compute the feedback arc set for directed graph `g`.
This is a set of edges that, when removed, break cycles and
convert the graph `g` into a DAG.
This function works around a potential segfault in igraph:
https://github.com/igraph/igraph/issues/858
"""
assert g.is_directed()
# No verts? No problem!
if g.vcount() == 0:
return []
orig_g = g
g = g.copy()
# Add a "terminal" node with an edge from every vertex.
# This should not affect the feedback arc set.
new_vertex_id = g.vcount()
g.add_vertices(1)
g.add_edges([(v, new_vertex_id) for v in range(new_vertex_id)])
edge_ids = g.feedback_arc_set(method=method)
# I assume the edge ids are the same between g and its copy?
# Let's do a little bit of checking just in case.
g.delete_vertices([new_vertex_id])
to_check = [g.es[e].source for e in edge_ids]
d1 = orig_g.degree(to_check)
d2 = g.degree(to_check)
assert d1 == d2, "{!r} vs {!r}".format(d1, d2)
return edge_ids | 6b2499fbe440781330604b127f0873a4730e2793 | 43,222 |
def attribute_cell():
"""Returns string see usage"""
first_part = "\n".join(["F1 is a function",
"Fun elem is a functional element", ""])
second_part = "\n".join(["A is an attribute", "B is an attribute. C is an attribute", ""])
third_part = "\n".join(["The A of F1 is 4,2", "The C of F1 is pink",
"The B of Fun elem is 8,5.", "The A of Fun elem is 100", ""])
return first_part, second_part, third_part | daf975e5d7e2bdaf7418e56259254261417c4a77 | 43,224 |
def is_pft(df):
"""Check if df is a per-pft dataframe."""
col_names = df.columns.values
return 'Total' in col_names | 57c5c8f7951f569411e9308809f31aea8a65c160 | 43,225 |
def drop(num, iterator):
"""Drop the first n elements on an iterator"""
try:
for _ in range(num):
next(iterator)
except StopIteration:
return iterator
return iterator | 0e6f05b2a68410523d949e26037ed31dd0409088 | 43,227 |
def find_best_location(all_text, search_string, start_idx, end_idx):
"""When all_text[start_idx, end_idx] != search_string, we use this method to find the location of
search_string within all_text, that is closest to the given (start_idx, end_idx)."""
search_string = search_string.strip()
best_match = (len(all_text), None)
start = 0
while True:
match_pos = all_text.find(search_string, start)
if match_pos < 0:
break
dist = abs(start_idx - match_pos)
if dist < best_match[0]:
best_match = (dist, match_pos)
start = match_pos + 1
if best_match[1] is not None:
#if config.verbose:
# print(u' Search string and indices mismatch: ' +
# u'"{0}" != "{1}". Found match by shifting {2} chars'.format(
# search_string, all_text[start_idx:end_idx],
# start_idx - best_match[1]))
start_idx = best_match[1]
end_idx = best_match[1] + len(search_string)
else:
raise Exception(u'Search string ({0}) not in text.'.format(search_string))
return (start_idx, end_idx) | a8987f6b8895f4a8d7d859e43cbfe17ffd41468c | 43,228 |
def uniform(feature, bins):
"""Equal width bin, take a uniform distribution for the sample value range.
Args:
feature: pd.Series, model feature values.
bins: int, split bins of feature.
Returns:
the list of split threshold of feature.
"""
t = (feature.max()-feature.min())/bins
m = feature.min()
return [t*i+m for i in range(bins)]+[feature.max()] | 1ea90dbc477457499a2ceb2c20ac9bca34e5e7a9 | 43,229 |
def _sane_narrow_connection( \
arch, cnode, enode, narrow=0):
"""
Recursively check if the architecture doesn't include more than
one 'narrow' computation in a short path (subpath by a branch)
"""
# base condition 1:
# the narrow appear twice, then the arch. is messed-up
if (narrow > 1): return False
# base condition 2:
# the search has reached its end...
elif (cnode == enode): return True
# recursive search, first, check the successors
# one successor: do the recursive search
# two successor: reset the counter
else:
successors = list(arch.successors(cnode))
sane_iness = True
if len(successors) > 1:
for each_successor in successors:
# : set to one since a branch starts
if 'narrow' in cnode: narrow = 1
sane_iness = (sane_iness and \
_sane_narrow_connection( \
arch, each_successor, enode, narrow=narrow))
else:
for each_successor in successors:
# : add one to the counter
if 'narrow' in cnode: narrow += 1
sane_iness = (sane_iness and \
_sane_narrow_connection( \
arch, each_successor, enode, narrow=narrow))
return sane_iness
# done. | 508ff408a7321ef979263450679b70cf92de28b5 | 43,230 |
def printdict(d: dict) -> str:
"""print a dict in a json-like format"""
outstr = ""
for item in d.keys():
outstr += "\t" + item + ": " + str(d[item]) + "\n"
return outstr.rstrip("\n") | 0eb2cf024b2094cf90576b981f32c7067c820cd2 | 43,231 |
def identity(x, *arg, **kw):
""" Identity layer that returns the first input, ignores the rest arguments. """
return x | 7b041be55defb0d9e82f0d028745b15b13ac9df5 | 43,232 |
def getBasePath(request):
""" Get base path where page dirs for attachments are stored. """
return request.rootpage.getPagePath('pages') | c640a83bea5109cfd8652cf3e2e6237f188a10e4 | 43,233 |
def HasAbstractFieldPath(abstract_path, store):
"""Whether a store contains abstract_path.
Makes no provision for repeated fields. I suppose if we did we'd
have it mean, that /any/ of the repeated subfields had such a
subpath but, we happen to not need it.
Args:
abstract_path: the path to test.
store: the store.
Returns:
Whether the store contains abstract_path.
"""
tokens = abstract_path.split(".")
for key in tokens:
if not isinstance(store, dict) or key not in store:
return False
store = store.get(key, {})
# It's ok (maybe) to even /prematurely/ reach the end of abstract_path.
return True | 465f4c8cc98cf557c9c2036254b985b132071d93 | 43,234 |
def render_dashboard(category, tabs, prefix):
"""Renders a dashboard config string.
Follows this format:
{
name = 'dashboard_name'
dashboard_tab = [
tab('tab-name', 'test-group-name'),
...
]
}
"""
if '\'' in prefix:
raise ValueError(prefix)
if '\'' in category:
raise ValueError(category)
for tab in tabs:
if '\'' in tab:
raise ValueError(tab, tabs)
return """{
name = '%(prefix)s-%(category)s'
dashboard_tab = [
%(tabs)s
]
},""" % dict(
prefix=prefix,
category=category,
tabs='\n '.join('tab(\'%s\', \'%s\'),' % (tab, path)
for (tab, path) in sorted(tabs))) | 64be8cab5e93f53ad2f9b46ed6c21d483b90def5 | 43,235 |
def validizeCompany(company):
"""
Formats strings to allow for them to be included in the url of a search
:param company: The string, in this case, generally the name of the company
:return: The string, formatted to be in a query in the url.
"""
return "%27"+company.replace(" ", "+")+"%27" | a0e597bfa2a1e5ea6dfa558b3dc39921afa3b444 | 43,236 |
def append_cpu_req(req, n):
"""
Construct a request for the CPU test
Args:
req: the basic request
n: the CPU test will calculate n! and record the time
"""
req["cmds"]["cpu"] = dict(n=n)
return req | 45df047dfa7db5fb2f4dda08121ff1e014284285 | 43,238 |
def read_config():
"""
read JSON config file for topic options
"""
topic_data = {
"platform_type": ["buoy", "station", "glider"],
"ra": [
"aoos",
"caricoos",
"cencoos",
"gcoos",
"glos",
"maracoos",
"nanoos",
"neracoos",
"pacioos",
"secoora",
"sccoos",
],
"platform": ["a", "b", "c", "d", "e", "f", "g", "h"],
"sensor": ["met", "ctd", "adcp", "wave", "bio"],
"variable": [
"air_temperature",
"air_pressure_at_sea_level",
"sea_water_practical_salinity",
"sea_water_temperature",
"sea_surface_wave_significant_height",
"mass_concentration_of_chlorophyll_in_sea_water",
"eastward_sea_water_velocity",
"northward_sea_water_velocity",
"mass_concentration_of_chlorophyll_in_sea_water",
],
}
# json.dumps(topic_data, sort_keys=True, indent=4)
return topic_data | fe2bed60ab8965f8a932bb5e36770813e9250953 | 43,239 |
import pickle
def unpickle(filename: str) -> object:
"""
Unpickles a file and returns the object
"""
pickleIn = open(filename, "rb")
pickledObject = pickle.load(pickleIn)
pickleIn.close()
return pickledObject | 891347cfc1f491a40d797332c2967f7b293630af | 43,240 |
def get_loadings_for_created_order(language, parameters_from_text):
"""
Creates string with loadings information
:param language: language from request
:param parameters_from_text: order parameters gathering from previous contexts
:return: string with loadings information
"""
loadings_keys = list()
keys = list(parameters_from_text)
for item in keys:
if "loading" in item:
loadings_keys.append(item)
number_of_dates = 0
for item in loadings_keys:
if "date" in item:
number_of_dates += 1
dicts_list = list()
for i in range(0, number_of_dates):
temp_dict = dict()
for item in loadings_keys:
if str(i) in item:
temp_dict[item] = parameters_from_text[item]
dicts_list.append(temp_dict)
output_string = ""
counter = 1
parameter_index = 0
for dictionary in dicts_list:
if language == 'pl':
output_string += "{}. {} godz. {}, ulica: {} {} {}, liczba palet: {}\n".format(
counter, dictionary["my_date_loading{}".format(parameter_index)][0:10],
dictionary["my_time_loading{}".format(parameter_index)][11:16],
dictionary["my_loading_street{}".format(parameter_index)],
dictionary["my_loading_post_code{}".format(parameter_index)],
dictionary["my_loading_post_place{}".format(parameter_index)],
dictionary["my_number_loading{}".format(parameter_index)])
elif language == 'en':
output_string += "{}. {} at {}, {} street, {} {}, pallets: {}\n".format(
counter, dictionary["my_date_loading{}".format(parameter_index)][0:10],
dictionary["my_time_loading{}".format(parameter_index)][11:16],
dictionary["my_loading_street{}".format(parameter_index)],
dictionary["my_loading_post_code{}".format(parameter_index)],
dictionary["my_loading_post_place{}".format(parameter_index)],
dictionary["my_number_loading{}".format(parameter_index)])
counter += 1
parameter_index += 1
return output_string | 533e8ade12cd343c806450f26984cb1c76018c43 | 43,241 |
def double_from_string(str):
"""
Функция, возвращающая число с плавающей точкой из строки.
Предусмотрена обработка разделителя в виде запятой или точки.
:param str: строка для преобразования
:return: возвращаемый результат - число
"""
str = str.replace(",", ".")
try:
return float(str)
except ValueError:
return "not a double in string" | 9144f49169814c48d216ee41e264f1ff8de65193 | 43,242 |
import numpy
def score_to_empirical_kl(score, count):
"""
Convert total log score to KL( empirical || model ),
where the empirical pdf is uniform over `count` datapoints.
"""
count = float(count)
return -score / count - numpy.log(count) | 87290768066af204eb2a279991f1123b3c345aa8 | 43,243 |
def custom_submit_line(context):
"""
Displays a row of custom action buttons
"""
ctx = {
"custom_admin_actions": context.get("custom_admin_actions", []),
"custom_admin_actions_prefix": context.get(
"custom_admin_actions_prefix", "custom_admin_actions_"
),
}
if context["original"]:
ctx["original"] = context["original"]
return ctx | 038ae11f425375dbc7228634d7249e797eda6bae | 43,246 |
def highcharts_plot_view(context):
"""
Dependencies for highcharts_plot_view gizmo.
"""
return ('tethys_gizmos/vendor/highcharts/js/highcharts.js',
'tethys_gizmos/vendor/highcharts/js/highcharts-more.js',
'tethys_gizmos/vendor/highcharts/js/modules/exporting.js') | 05be05ed63964d14e76576ba64134053b7a2745f | 43,247 |
import itertools
def fast_forward_to_length(sequences, length):
"""
Return an itertools.dropwhile that starts from
the first sequence that has the given length.
>>> list(fast_forward_to_length([list(range(n)) for n in range(6)], 4))
[[0, 1, 2, 3], [0, 1, 2, 3, 4]]
"""
return itertools.dropwhile(lambda seq: len(seq) != length, sequences) | 41650d1bede05d96bfb1c1ceb4b94eee2a1c6f53 | 43,248 |
import re
def parse_text_annotations(ann_file):
""" Parses BRAT annotations provided in the .ann file and converts them
to annotation spans of (start_position, end_position, entity_class).
Args:
ann_file (str): full path to the BRAT .ann file.
Returns:
annotations (list((int, int, str))): list of annotation spans.
Spans are triples of (start_offset, end_offset, entity_class)
where offset is relative to the text.
"""
annots = []
fann = open(ann_file, "r")
for line in fann:
cols = re.split(r"\s+", line.strip())
if not cols[0].startswith("T"):
continue
annots.append((int(cols[2]), int(cols[3]), cols[1]))
fann.close()
return annots | 1537f2c044b4562bdc5b2ff89ee74254c399192f | 43,249 |
def _evaluate_expression(frame, expression):
"""Helper function to evaluate expression in the context of input frame
and throw error if evaluation failed. The evaluated SBValue is returned.
"""
result_value = frame.EvaluateExpression(expression)
if result_value is None or (
result_value.GetError() and result_value.GetError().Fail()
):
raise Exception(
"Fail to evaluate {}: {}".format(
expression, result_value.GetError().GetCString()
)
)
return result_value | ef1e51443c0a22b61e1e0a0b9ea2703f8411321a | 43,250 |
def from_uint16(traces, scalers):
""" Converts a das data back into float format."""
mask = traces < 1
min_val, max_val = scalers
traces -= 1
traces = traces.astype(float) * max_val / (65535 - 1)
traces += min_val
return traces, mask | 8a061c33fe897b9f63b5f67d31da35ea24524dfe | 43,251 |
import hashlib
def gravatar(email, size=48):
"""
Simply gets the Gravatar for the commenter. There is no rating or
custom "not found" icon yet. Used with the Django comments.
If no size is given, the default is 48 pixels by 48 pixels.
Template Syntax::
{% gravatar comment.user_email [size] %}
Example usage::
{% gravatar comment.user_email 48 %}
"""
hash = hashlib.md5(email).hexdigest()
return """<img src="http://www.gravatar.com/avatar/%s?s=%s" width="%s"
height="%s" alt="gravatar" class="gravatar" />""" % (hash, size, size, size) | 62a6e47047c5be668995ce8c283f424b2dd28594 | 43,252 |
import argparse
def init_args():
"""Initialize user input."""
# create a arguments parser
parser = argparse.ArgumentParser(
description="Convert geodetic coordinate system to cartesian system.")
# add arguments
parser.add_argument('-v', '--version', action='version',
version='%(prog)s 0.0.1')
parser.add_argument('-lat', metavar='lat', dest='b', type=float,
required=True, help='the lattitude')
parser.add_argument('-lon', metavar='lon', dest='l', type=float,
required=True, help='the longitude')
parser.add_argument('-hgt', metavar='height', dest='h', type=float,
required=True, help='the height')
# parse arguments
return parser.parse_args() | c870df1d2d92915a8b22b6b89dc595f9b69d1b28 | 43,253 |
import subprocess
def auth_cluster(CID, ZID):
"""
Given zone id and cluser id, make authentication
Parameters
------------
CID: string
cluster ID
ZID: string
zone ID
returns: integer
return code from gcloud call
"""
cmd = "gcloud container clusters get-credentials {0} --zone {1}".format(CID, ZID)
rc = subprocess.call(cmd, shell=True)
return rc | bd23efe20b5a0fd98a9f547a5d46acb08874c603 | 43,254 |
def get_user_path(path, root):
"""
Gets the path used as the key in the database,
e.g. "/2017/2017 08-19 Yosemite"
:param path: path on the local disk where the photo or dir
is located
:param root: path on the local disk that is the root of all
photos and dirs
"""
user_path = path.lstrip(root)
if not user_path.startswith('/'):
user_path = "/{}".format(user_path)
return user_path | 68d41043bf8cc3f168e50fc597e32d144d88655a | 43,255 |
import socket
def hostname(ip):
""" Attempts a reverse lookup of the hostname.
There does exist a GeoIP2 domain database but it's not free.
"""
try:
return socket.gethostbyaddr(ip)
except Exception:
return None, None, None | 81c7e5e7f94d434eae72d3c871ab37f344439b4c | 43,256 |
from typing import Any
def get_public_attributes(node: Any) -> Any:
"""Get the public attributes ('children') of the current node, accessible from this node.
"""
return [getattr(node, a) for a in dir(node) if not a.startswith("_")] | c2622dda8a12905cc61848f11932004faa9fe532 | 43,257 |
import math
def euc_dst(pnt0, pnt1):
"""return the distance between pnt0 and pnt1,
using the euclidean formula.
`pnts` are geographic and result is in meters.
Args:
pnt0 (list): an xyz data list
pnt1 (list): an xyz data list
Returns:
float: the distance beteween pnt0 and pnt1
"""
rad_m = 637100
distance = math.sqrt(sum([(a-b) ** 2 for a, b in zip(pnt0, pnt1)]))
return(rad_m * distance) | 8bfb4cd2bb30e2c448e4ec9ea63e0cc7c655b50c | 43,258 |
def tuple_eq_empty_not_eq( t_1, t_2):
""" those which are empty are in fact not equal"""
return len( t_1) > 0 and t_1 == t_2 | 2108a373ea135d6544bcc865d84f3f8930abb062 | 43,259 |
def get_int_ip(ip):
"""get ip address from ip/mask info
Args:
ip (str): ip with mask
Returns:
str: ip address
"""
return ip.split("/")[0] | 15f7f6dd6b3a8dfdd6b664eba1487eeb33404130 | 43,261 |
def getinterval():
"""
This will load interval time from interval file
:return: lst1: list of interval time that will be loaded to main function
"""
with open("./res/interval/interval.txt") as f:
lst0 = f.readlines()
str1 = "".join(lst0)
lst1 = str1.split("\n")
return lst1 | 4fb6802a30710de48efb8b3074baf1b8881ac006 | 43,262 |
def get_stop_index(msa_seqs):
"""Get stop indices from MSA"""
stop_indices = []
for sequence in msa_seqs:
reverse_seq = sequence[::-1]
for nucleotide in reverse_seq:
if nucleotide != '-':
index=reverse_seq.index(nucleotide)
stop = len(reverse_seq) - index
stop_indices.append(stop)
break
return stop_indices | 1e04bae63dc63a84a61d18d3dbadf68b228e0d95 | 43,263 |
def convert_coordinates(coords, stac=False):
"""
Converts footprint coordinates that have been retrieved from the metadata of source SLC scenes stored in an
:class:`~pyroSAR.drivers.ID` object OR a product extent retrieved using :func:`spatialist.vector.Vector.extent` to
either `envelop` and `center` for usage in the XML metadata files or `bbox` and `geometry` for usage in STAC
metadata files. The latter is returned if the optional parameter `stac` is set to True, else the former is returned.
Parameters
----------
coords: list[tuple(float, float)] or dict
List of coordinate tuple pairs as retrieved from an :class:`~pyroSAR.drivers.ID` objects of source SLC scenes
OR the product extent retrieved using :func:`spatialist.vector.Vector.extent` in the form of a dictionary with
keys: xmin, xmax, ymin, ymax
stac: bool, optional
If set to True, `bbox` and `geometry` are returned for usage in STAC metadata file. If set to False (default)
`envelop` and `center` are returned for usage in XML metadata files.
Returns
-------
envelop: str
Acquisition footprint coordinates for the XML element 'eop:Footprint/multiExtentOf'.
center: str
Acquisition center coordinates for the XML element 'eop:Footprint/centerOf'.
Notes
-------
If `stac=True` the following results are returned instead of `envelop` and `center`:
bbox: list[float]
Acquisition bounding box for usage in STAC Items. Formatted in accordance with RFC 7946, section 5:
https://datatracker.ietf.org/doc/html/rfc7946#section-5
geometry: dict
Acquisition footprint geometry for usage in STAC Items. Formatted in accordance with RFC 7946, section 3.1.:
https://datatracker.ietf.org/doc/html/rfc7946#section-3.1
"""
if isinstance(coords, (list, tuple)) and len(coords) == 4:
c = coords
x = [c[0][0], c[1][0], c[2][0], c[3][0]]
y = [c[0][1], c[1][1], c[2][1], c[3][1]]
xmin = min(x)
xmax = max(x)
ymin = min(y)
ymax = max(y)
elif isinstance(coords, dict) and len(coords.keys()) == 4:
xmin = coords['xmin']
xmax = coords['xmax']
ymin = coords['ymin']
ymax = coords['ymax']
x = [xmin, xmin, xmax, xmax]
y = [ymin, ymax, ymax, ymin]
else:
raise RuntimeError('Coordinates must be provided as a list of coordinate tuples OR as a dictionary with '
'keys xmin, xmax, ymin, ymax')
if stac:
bbox = [xmin, ymin, xmax, ymax]
geometry = {'type': 'Polygon', 'coordinates': (((x[0], y[0]), (x[1], y[1]), (x[2], y[2]), (x[3], y[3]),
(x[0], y[0])),)}
return bbox, geometry
else:
x_c = (xmax + xmin) / 2
y_c = (ymax + ymin) / 2
center = '{} {}'.format(y_c, x_c)
envelop = '{} {} {} {} {} {} {} {} {} {}'.format(y[0], x[0], y[1], x[1], y[2], x[2], y[3], x[3], y[0], x[0])
return center, envelop | 361eeef4322fd1976f025e51835c049df23eafa7 | 43,265 |
import os
def file_upload_location(instance, filename):
"""
Function which is used to get rename the file based on pet's slug field,
and get the location to store the uploaded file.
"""
file_root, file_ext = os.path.splitext(filename)
file_name = '%s%s' %(instance.slug, file_ext) # e.g. henrys-cat.jpg
return os.path.join(instance._meta.app_label, file_name).lower() | 1316b252caf62b137af4d9fdf8e589c83d0ac368 | 43,267 |
import math
def angle_between_2d_vectors(v1, v2):
"""
return the angle from v1 to v2, with signs
:param v1: 2d vector
:param v2:
:return:
author: weiwei
date: 20210530
"""
return math.atan2(v2[1] * v1[0] - v2[0] * v1[1], v2[0] * v1[0] + v2[1] * v1[1]) | 7f925e1dcdef1e8c6b10cbc4b359a2495613f69d | 43,268 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.