content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import tempfile
import requests
import os
import zipfile
def download_dataset(url):
"""Download zipped file from given URL, unzip
:param url: URL of zipped archive
:return: filename
"""
global TEMPDIR
TEMPDIR = tempfile.mkdtemp(prefix="dronedeploy-")
r = requests.get(url)
zip_file = os.path.join(TEMPDIR, "output.zip")
with open(zip_file, "wb") as out_zip:
out_zip.write(r.content)
zf = zipfile.ZipFile(zip_file)
zf.extractall(TEMPDIR)
file_name = None
for f in os.listdir(TEMPDIR):
if f.endswith(".tif"):
file_name = os.path.abspath(os.path.join(TEMPDIR, f))
assert file_name
return file_name | dc1375413ff5565527f3e965c8a7347cab93f601 | 45,716 |
import os
def check_file_existence(path_to_file):
"""
Check that the given file path exists.
If not, raise RuntimeError.
:param path_to_file: File path to check.
:type path_to_file: string
"""
if not os.path.exists(path_to_file):
raise RuntimeError("Cannot find file: " + path_to_file)
return True | 56c23141afb807875eea1be040f11ce16115fdbc | 45,718 |
from typing import Union
def strint(value: Union[int, str, float]) -> str:
"""
If the passed value is a number type, return the number as a string with no
decimal point or places.
Else just return the string.
"""
if type(value) in (int, float):
return str(int(value))
else:
return str(value) | 271424978e0816d9e998221b18c86cc88c9adfb0 | 45,719 |
def batch_data(data, batch_size):
"""Given a list, batch that list into chunks of size batch_size
Args:
data (List): list to be batched
batch_size (int): size of each batch
Returns:
batches (List[List]): a list of lists, each inner list of size batch_size except possibly
the last one.
"""
batches = [data[i : i + batch_size] for i in range(0, len(data), batch_size)]
return batches | 1404f67ff4a2e2515c555905e9b5ec5fdf560dd6 | 45,720 |
def get_current_system_from_log(entries_parsed: list, log_function=print, verbose=False) -> str:
"""Return name of the last star system the commander visited"""
if verbose:
log_function("Looking for current system")
all_session_systems = []
for entry in entries_parsed:
if "StarSystem" in entry:
if len(all_session_systems) == 0 or all_session_systems[-1] != entry["StarSystem"]:
all_session_systems.append(entry["StarSystem"])
if len(all_session_systems) > 0:
current_system = all_session_systems[-1]
if verbose:
log_function(f"Found system {current_system}")
else:
current_system = ""
if verbose:
print("No current system found")
return current_system | ec7621b66cf235f098996f9ecb5e58749fc23b7d | 45,721 |
def _get_flags(flag_val, flags):
"""
Return a list of flag name strings.
`flags` is a list of `(flag_name, flag_value)` pairs. The list must be in sorted in order of the
highest `flag_value` first and the lowest last.
"""
if not flags:
return "Flags not parsed from source."
ret = []
for name, hex_val in flags:
dec_val = int(hex_val, 16)
if flag_val < dec_val:
continue
ret.append(name)
flag_val -= dec_val
return ret | 654a052e0840a4d2ae038bcde5070a59f3e41443 | 45,723 |
import argparse
def handle_arguments():
"""
Method to Set CLA
Parameters:
None
Returns:
parser (ArgumentParser object)
"""
parser = argparse.ArgumentParser(description="Take 3MileBeach trace and convert to DAG")
parser.add_argument("-i", "--input", dest="input_file",
help="input json file")
parser.add_argument("-o", "--output", dest="output_file",
help="output DAG graph file")
parser.add_argument("-l", "--lineage", dest="lineage", action="store_true")
return parser | 4f8b4bf3c01ad2212071048cfcce8b958ed9448e | 45,724 |
def compute_scores_on_distance_measure(first_data, memento_data, distance_function):
"""Calculates the distance between scores for those measures that use
functions from the distance library.
"""
score = None
if len(memento_data) == 0:
if len(first_data) == 0:
score = 0
else:
score = distance_function(first_data, memento_data)
else:
score = distance_function(first_data, memento_data)
return score | 4e411d88567b2e2de4d61ccd02102aa894c5190e | 45,725 |
import os
def get_corpus(data_dir):
"""
Get the corpus data with retrieve
:param data_dir:
:return:
"""
words = []
labels = []
for file_name in os.listdir(data_dir):
with open(os.path.join(data_dir, file_name), mode='r', encoding='utf-8') as f:
for line in f:
# label in first sep
parts = line.rstrip().split(',', 1)
if parts and len(parts) > 1:
# keras categorical label start with 0
lbl = int(parts[0]) - 1
sent = parts[1]
sent_split = sent.split()
words.append(sent_split)
labels.append(lbl)
return words, labels | 1204958cac7343dbeeca77ec3e13337ddf24baad | 45,726 |
def parse_account(config, auth, account):
""" Breaks a [account:advertiser@profile] string into parts if supplied.
This function was created to accomodate supplying advertiser and profile
information
as a single token. It needs to be refactored as this approach is messy.
Possible variants include:
* [account:advertiser@profile]
* [account:advertiser]
* [account@profile]
Args:
* auth: (string) Either user or service.
* account: (string) A string represeting [account:advertiser@profile]
Returns:
* ( network_id, advertiser_ids, profile_id) after parsing the account token.
"""
network_id = account
advertiser_ids = None
profile_id = None
# if exists, get profile from end
try:
network_id, profile_id = network_id.split('@', 1)
except:
profile_id = None
# if exists, get avertiser from end
try:
network_id, advertiser_ids = network_id.split(':', 1)
except:
pass
# if network or advertiser, convert to integer
if network_id is not None:
network_id = int(network_id)
if advertiser_ids is not None:
advertiser_ids = [
int(advertiser_id.strip())
for advertiser_id in advertiser_ids.split(',')
]
return network_id, advertiser_ids | a1c00ebc9f03358d7864765bac5ad545497444ad | 45,728 |
def get_intel_doc_label_item(intel_doc_label: dict) -> dict:
""" Gets the relevant fields from a given intel doc label.
:type intel_doc_label: ``dict``
:param intel_doc_label:
The intel doc label obtained from api call
:return: a dictionary containing only the relevant fields.
:rtype: ``dict``
"""
return {
'ID': intel_doc_label.get('id'),
'Name': intel_doc_label.get('name'),
'Description': intel_doc_label.get('description'),
'IndicatorCount': intel_doc_label.get('indicatorCount'),
'SignalCount': intel_doc_label.get('signalCount'),
'CreatedAt': intel_doc_label.get('createdAt'),
'UpdatedAt': intel_doc_label.get('updatedAt'),
} | 578467798f08bfd0776aa285790dc95d4686a830 | 45,731 |
def removeExposures(plates, startDate):
"""Removes all real exposures taking after startDate."""
for plate in plates:
for ss in plate.sets:
ss.isMock = True
validExps = [exp for exp in ss.totoroExposures
if exp.getJD()[0] < startDate]
if len(validExps) != len(ss.totoroExposures):
plate._modified = True
plate._useOnlyCompletion = True
ss.totoroExposures = validExps
plate.sets = [ss for ss in plate.sets if len(ss.totoroExposures) > 0]
return plates | a5753da31f7f5b4c37f21b9acb362cfee4a7a1dd | 45,734 |
import pkg_resources
import os
def get_sql(fname, sql_dir='sql', pkg_name='alpaca2pg') -> str:
"""Reads SQL query from file at `sql_dir`/`fname`."""
fp = pkg_resources.resource_filename(pkg_name, os.path.join(sql_dir, fname))
with open(fp, 'r', encoding='utf-8') as f:
return f.read() | 82883859c3d54ff00fea5fe269206d52aef9123d | 45,735 |
import os
def _pid():
"""
Return the current process pid
"""
return os.getpid() | ee7c0ace3f2d171eda9f0e322b5108c2c3f7a7b2 | 45,736 |
def Extensible(cls):
"""Returns a subclass of cls that has no __slots__ member.
This allows you to set arbitrary members in each instance, even if they
don't exist already in the class.
This is useful for making one-off Exporter() instances in tests,
for example.
Args:
cls: a class to inherit from.
Returns:
A new class derived from cls.
Example:
o = Extensible(object)
o.Foo = 5
"""
class Ext(cls):
pass
Ext.__name__ = 'Ext_' + cls.__name__
return Ext | ce579072a24652fa3195cbb809978bc21fef5c29 | 45,737 |
def make_grid(gates):
"""
Generate a grid that can be used my the A star search algorithm. It has an added heuristic function
causing A star to avoid lower layers.
"""
# Change grid size based on the netlist
if len(gates) == 25:
y_range = 13
else:
y_range = 17
grid = {}
for x in range(18):
for y in range(y_range):
for z in range(8):
# Make lower layers more expensive
g = (8 - z) * 2
# In the grid dictionary: each key is a coordinate, each value is a list of a boolean and a G score
grid[(x, y, z)] = [True, g]
return grid | c9115118d3361c7ef2b00171d2faacb2c1d9441f | 45,738 |
async def mock_successful_connection(*args, **kwargs):
"""Return a successful connection."""
return True | fc3801c8be6a98033c265a97de3bb413d0c249cc | 45,739 |
import os
from pathlib import Path
def get_image_path(image_name: str) -> str:
"""Return the path to the test image."""
# get this file folder 'test'
working_path = os.path.dirname(os.path.realpath(__file__))
# get object detection folder
object_detection_path = Path(working_path).parent.absolute()
# get object recognition folder
object_recognition_path = Path(object_detection_path).parent.absolute()
# get perception_layer folder
perception_layer_path = Path(object_recognition_path).parent.absolute()
# get bt_learning --> root folder
root_path = Path(perception_layer_path).parent.absolute()
data_directory = os.path.join(root_path, 'hri/disambiguate/disambiguate/data')
image_path = os.path.join(data_directory, image_name)
return image_path | f657230b3a0886ac5693558a3c4ef6b2f8ed9c23 | 45,742 |
def filter_scopes(scopes, *args):
"""
:param scopes:
:param args:
:return:
"""
if not args:
return scopes
return [scope for scope in scopes if any(arg in scope for arg in args)] | 77a9971452fe338577429e1d0b5c9120e42dbd97 | 45,743 |
import re
def validate_password(passw):
"""Validates password."""
return True if re.match("^(?=.*[A-Za-z])(?=.*[\d!@#$%&*?.])[A-Za-z\d!@#$%&*?.]{6,12}$", passw) else False | 2edba4ea6fa697616d8f0611cb66149d972adbb7 | 45,744 |
def _norm(x, max_v, min_v):
""" we assume max_v > 0 & max_v > min_v """
return (x-min_v)/(max_v-min_v) | 6b1e010106fe16828106642a0d9bc6cae69b0642 | 45,745 |
def get_challenge():
"""Read lines from the challenge input file"""
lines = list()
with open("./input", 'r') as hdl:
for line in hdl.readlines():
lines.append(line.strip())
return lines | af367685f9f5a20b058d224e6a951ca73eb50021 | 45,746 |
def process(max_slices, no_of_types, slices_list):
"""
The main program reads the input file, processes the calculation
and writes the output file
Args:
max_slices: Maximum number of slices allowed
no_of_types: Number of Pizza to be selected
slices_list: List of number of slices of each pizza
Returns:
total number of slices
, the list of the types of pizza to order
"""
global_slices_sum = 0
global_slices_ordered = []
# Check each pizza from the most slices to the least
for pizza_idx in range(1, len(slices_list) + 1):
slices_sum = 0
slices_ordered = []
# try sum as much as possible
for slice_idx in range(len(slices_list) - pizza_idx, -1, -1) :
if slices_sum + slices_list[slice_idx] > max_slices:
continue # skip if over the max
slices_sum += slices_list[slice_idx]
slices_ordered.insert(0, slice_idx)
if slices_sum == max_slices:
break # stop when max is reached
if slices_sum > global_slices_sum:
global_slices_sum = slices_sum
global_slices_ordered = slices_ordered.copy()
if global_slices_sum == max_slices:
break # stop when max is reached
# Remove the last one to select another combination
while len(slices_ordered) > 0 and global_slices_sum < max_slices:
last_idx = slices_ordered[0]
slices_sum -= slices_list[last_idx]
slices_ordered = slices_ordered[1:]
for slice_idx in range(last_idx - 1, -1, -1):
if slices_sum + slices_list[slice_idx] > max_slices:
continue # skip if over the max
slices_sum += slices_list[slice_idx]
slices_ordered.insert(0, slice_idx)
if slices_sum == max_slices:
break
if slices_sum > global_slices_sum:
global_slices_sum = slices_sum
global_slices_ordered = slices_ordered.copy()
if global_slices_sum == max_slices:
break
return global_slices_sum, global_slices_ordered | d5fc22b5485e684304a483c63ff461f2b805bee4 | 45,748 |
import socket
import ipaddress
def _verify_hostname(host):
"""Verify a hostname is resolvable."""
try:
resolved = socket.getaddrinfo(host, None)[0][4][0]
ip = ipaddress.ip_address(resolved)
return ip
except (socket.gaierror, ValueError):
return False | d0b74fdcf7fabec0083cd5de295199077ad6e676 | 45,749 |
def is_gcs_path(path):
# type: (str) -> bool
"""Returns True if given path is GCS path, False otherwise."""
return path.strip().lower().startswith("gs://") | c6b8035d685264a206555abf219c67bb5e04d340 | 45,750 |
def read(file):
"""Read a $file to memory"""
with open(file, 'rb') as f:
buffer = f.read()
return buffer | 09dcf131cb8a3899a02bb279bc49cd83e06f6df4 | 45,751 |
def load_w2v_vocab(fname):
"""Load vocabulary file generated by gensim as a dictionary.
Note that this does not correspond to the gensim vocabulary
object.
Parameters
----------
fname: string
Filename where the w2v model is stored.
Returns
-------
vocab: dict{str: int}
A dictionary containing the words as keys and their counts as
values.
"""
lst = []
with open(fname, 'r') as f:
for line in f:
word, count = line.split()
lst.append((word, int(count)))
vocab = dict(lst)
return vocab | a53c1f88ec6f67d5ad4173316ac4e4975bbd487f | 45,752 |
def gene_push_text(item, desc_len: int):
"""
生成推送内容
"""
text = f"标题:{item.title}\n链接:{item.link}"
if desc_len != 0:
text += f"\n描述:{item.summary[:desc_len]}"
return text | c1ecfcca6e4834b7a7ba0a1a245c2e9a8637ab0e | 45,754 |
def create_mw_bw_short_tab(res):
"""create the short table for mw-bauwerk data
"""
df = res["mischwasserbauwerke"]
# print(list(df.columns))
df["QF"] = df["QF24"] / df["AUA128"]
# calculate spezific volume
df["SPEZFRACHT"] = df["SFUEIN128"] / df["AUA128"]
df["SPEZVOL"] = df["VOLUMEN"] / df["AUA128"]
df = df[["BEZEICHNUNG_1", "AUA128", "QF24", "QR", "NA198", "VOLUMEN",
"VBECKENPROHEKTAR", "E0", "NUED", "TUE", "MMIN", "MVORH",
"X", "QF", "TYPMISCHWASSERBAUWERKASSTRING", "SFUEIN128",
"SPEZFRACHT", "SPEZVOL"]]
df = df.round(decimals=3)
# change column names
return df | 6f09f728cd8594a24e686b4bd47a498edc949555 | 45,755 |
def service(container, name=None):
"""A decorator to register a service on a container.
For more information see :meth:`Container.add_service`.
"""
def register(service):
container.add_service(service, name)
return service
return register | 4d372ba99f8a494b2b8ad781c1501047c9049c9e | 45,756 |
def aspectRatio(size, maxsize, height=False, width=False, assize=False):
"""Resize size=(w,h) to maxsize.
use height == maxsize if height==True
use width == maxsize if width==True
use max(width,height) == maxsize if width==height==False
"""
w, h = size
scale = 1.0
if width !=False:
currmax = w
elif height !=False:
currmax = h
else:
currmax = max( (w,h) )
if width and height:
currmax = min( (w,h) )
if currmax == maxsize:
# return 1.0
pass
elif maxsize == 0:
#return 1.0
pass
else:
scale = float(maxsize) / currmax
w = int( round( w*scale ) )
h = int( round( h*scale ) )
size = (w,h)
if assize:
return size
return scale | ce6dcbb191533c50343d1c10968a19b0713d2d1e | 45,758 |
def nestedmaps():
""" Nested-dictionary fixture function """
return {'body': {'declare_i': {'id': {'name': 'i', 'type': 'Identifier'},
'init': {'type': 'Literal', 'value': 2},
'type': 'VariableDeclarator'},
'kind': 'var',
'type': 'VariableDeclaration',
'declare_j': {'id': {'name': 'j', 'type': 'Identifier'},
'init': {'type': 'Literal', 'value': 4},
'type': 'VariableDeclarator'},
'kind': 'var',
'type': 'VariableDeclaration',
'declare_answer': {'id': {'name': 'answer', 'type': 'Identifier'},
'init': {'left': {'name': 'i',
'type': 'Identifier'},
'operator': '*',
'right': {'name': 'j',
'type': 'Identifier'},
'type': 'BinaryExpression'},
'type': 'VariableDeclarator'},
'kind': 'var',
'type': 'VariableDeclaration'},
'type': 'Program'} | 2cc7d8a1a4f9b335e65842c72708e59245c3a561 | 45,759 |
def find_easy(control, length):
"""Find numbers that can be deduced from the number of switched on lights."""
matches = list(filter(lambda x: len(x) == length, control))
if len(matches) != 1:
raise ValueError('Something went wrong. Expected a single match.')
return ''.join(sorted(matches[0])) | 2a3f9293a5915570ae8f80daa0e2d7ad7faa69b5 | 45,760 |
import sys
def _common_():
"""Stuff common to _demo_ and _tool()
"""
script = sys.argv[0]
return script | 6122f9c091e89bc4408904720c9e81196e73a808 | 45,762 |
import re
def slurp_word(s, idx):
"""Returns index boundaries of word adjacent to `idx` in `s`."""
alnum = r"[A-Za-z0-9_]"
start, end = idx, idx
while True:
if re.match(alnum, s[start - 1]):
start -= 1
else:
break
end = idx
while True:
if re.match(alnum, s[end]):
end += 1
else:
break
return start, end | 0b04de59cc1a848fac02bf58081ec990c8aa245b | 45,763 |
def get_user_rating_max(ratings,n=20):
"""Return the keys of users with at most ratings"""
return [key for key,value in ratings.iteritems() if len(value)<=n] | 9b4ba9c0ee6e11d1d5ec14ac41b1b93f029d6db4 | 45,764 |
def hybrid_collision_handler(slug, node1, node2):
"""This collision handler allows a static file to shadow a dynamic resource.
Example: ``/file.js`` will be preferred over ``/file.js.spt``.
"""
if node1.type == 'directory' and node2.type != 'directory':
if not node1.children:
# Ignore empty directory
return 'replace_first_node'
if '' not in node1.children:
# Allow `/bar.spt` to act as the index of `/bar/`
return 'set_second_node_as_index_of_first_node'
elif node1.type == 'static' and node2.type == 'dynamic':
# Allow `/foo.css` to shadow `/foo.css.spt`
return 'ignore_second_node'
return 'raise' | d1dea686cb89baa7eca849e195488d02a31169ab | 45,766 |
def _calculate_rtb_ensemble_size(_CWPBN_, _Beams_,
IsE0000001, IsE0000002, IsE0000003,
IsE0000004, IsE0000005, IsE0000006,
IsE0000007, IsE0000008, IsE0000009,
IsE0000010, IsE0000011, IsE0000012,
IsE0000013, IsE0000014, IsE0000015):
"""
Calculate the number of bytes for the RTB ensemble based off the parameters.
Value given in bytes.
:param _CWPBN_: Number of bins.
:param _Beams_: Number of beams.
:param IsE0000001: Flag if IsE0000001 is enabled.
:param IsE0000002: Flag if IsE0000002 is enabled.
:param IsE0000003: Flag if IsE0000003 is enabled.
:param IsE0000004: Flag if IsE0000004 is enabled.
:param IsE0000005: Flag if IsE0000005 is enabled.
:param IsE0000006: Flag if IsE0000006 is enabled.
:param IsE0000007: Flag if IsE0000007 is enabled.
:param IsE0000008: Flag if IsE0000008 is enabled.
:param IsE0000009: Flag if IsE0000009 is enabled.
:param IsE0000010: Flag if IsE0000010 is enabled.
:param IsE0000011: Flag if IsE0000011 is enabled.
:param IsE0000012: Flag if IsE0000012 is enabled.
:param IsE0000013: Flag if IsE0000013 is enabled.
:param IsE0000014: Flag if IsE0000014 is enabled.
:param IsE0000015: Flag if IsE0000015 is enabled.
:return: Number of bytes for the ensemble.
"""
MATLAB_OVERHEAD = 7
# E0000001
E0000001 = 0
if IsE0000001:
E0000001 = 4 * (_CWPBN_ * _Beams_ + MATLAB_OVERHEAD)
# E0000002
E0000002 = 0
if IsE0000002:
E0000002 = 4 * (_CWPBN_ * _Beams_ + MATLAB_OVERHEAD)
# E0000003
E0000003 = 0
if IsE0000003:
E0000003 = 4 * (_CWPBN_ * _Beams_ + MATLAB_OVERHEAD)
# E0000004
E0000004 = 0
if IsE0000004:
E0000004 = 4 * (_CWPBN_ * _Beams_ + MATLAB_OVERHEAD)
# E0000005
E0000005 = 0
if IsE0000005:
E0000005 = 4 * (_CWPBN_ * _Beams_ + MATLAB_OVERHEAD)
# E0000006
E0000006 = 0
if IsE0000006:
E0000006 = 4 * (_CWPBN_ * _Beams_ + MATLAB_OVERHEAD)
# E0000007
E0000007 = 0
if IsE0000007:
E0000007 = 4 * (_CWPBN_ * _Beams_ + MATLAB_OVERHEAD)
#region E0000008
E0000008 = 0
if IsE0000008:
E0000008 = 4 * (23 + MATLAB_OVERHEAD)
# E0000009
E0000009 = 0
if IsE0000009:
E0000009 = 4 * (19 + MATLAB_OVERHEAD)
#E0000010
E0000010 = 0
if IsE0000010:
E0000010 = 4 * (14 + 15 * _Beams_ + MATLAB_OVERHEAD)
# E0000011
E0000011 = 0
if IsE0000011:
E0000011 = 0
# E0000012
E0000012 = 0
if IsE0000012:
E0000012 = 4 * (23 + MATLAB_OVERHEAD)
# E0000013
E0000013 = 0
if IsE0000013:
E0000013 = 4 * (30 + MATLAB_OVERHEAD)
# E0000014
E0000014 = 0
if IsE0000014:
E0000014 = 4 * (25 + MATLAB_OVERHEAD)
# E0000015
E0000015 = 0
if IsE0000015:
E0000015 = 4 * (8 * _Beams_ + 1 + MATLAB_OVERHEAD)
bytes_per_ensemble = E0000001 + E0000002 + E0000003 + E0000004 + E0000005 + E0000006 + E0000007 + E0000008 + E0000009 + E0000010 + E0000011 + E0000012 + E0000013 + E0000014 + E0000015
checksum = 4 # Checksum
wrapper = 32 # Header
return bytes_per_ensemble + checksum + wrapper | 7a49d0cd360db6d4e475a839003cd0bf1e89a44c | 45,770 |
def strxor(str1, str2):
"""Xors 2 strings character by character.
"""
minlen = min(len(str1), len(str2))
ans = ""
for (c1, c2) in zip(str1[:minlen], str2[:minlen]):
ans += chr(ord(c1) ^ ord(c2))
return ans | 05051bd6938726f4ce222ec01c063f53a43536cb | 45,774 |
import os
def find_workflow(repo_directory):
"""Determine if `repo_directory` contains a `cookiecutter.json` file.
:param repo_directory: The candidate repository directory.
:return: True if the `repo_directory` is valid, else False.
"""
if not os.path.isdir(repo_directory):
return ""
_byaml = os.path.join(repo_directory, "cookiebakery.yaml")
if os.path.isfile(_byaml):
return _byaml
_cjson = os.path.join(repo_directory, "cookiecutter.json")
if os.path.isfile(_cjson):
return _cjson | bb90b00cd290af7391b50cfa2d7a18e0075974fb | 45,775 |
def delFunc(change):
""" diffable del """
return change.wasDeleted() and not change.isDirectory() | 60e3cb141cc75b3aa58b3bbb5fd3706c88209df9 | 45,776 |
def string_from_source(source) :
"""Returns string like "CxiDs2.0:Cspad.0" from "Source('DetInfo(CxiDs2.0:Cspad.0)')" or "Source('DsaCsPad')"
"""
str_in_quots = str(source).split('"')[1]
str_split = str_in_quots.split('(')
return str_split[1].rstrip(')') if len(str_split)>1 else str_in_quots | 741ea85cda2e197f6b882852f32f4df5e5338355 | 45,777 |
def _linked_feature_label(linked_feature):
"""Generates the label on edges between components.
Args:
linked_feature: spec_pb2.LinkedFeatureChannel proto
Returns:
String label
"""
return """<
<B>{name}</B><BR />
F={num_features} D={projected_dim}<BR />
{fml}<BR />
<U>{source_translator}</U><BR />
<I>{source_layer}</I>
>""".format(
name=linked_feature.name,
num_features=linked_feature.size,
projected_dim=linked_feature.embedding_dim,
fml=linked_feature.fml,
source_translator=linked_feature.source_translator,
source_layer=linked_feature.source_layer) | b5235d6a0559ca7fdef2655071e3ca84db62328a | 45,778 |
def make_values(repo_pkg, cur_ver, new_ver, branch, check_result):
"""
Make values for push_create_pr_issue
"""
values = {}
values["repo_pkg"] = repo_pkg
values["cur_version"] = cur_ver
values["new_version"] = new_ver
values["branch"] = branch
values["check_result"] = check_result
return values | 38d5cbc983c57c04bbcf1efcddcc67cabb02b4fb | 45,779 |
def get_coordinates_from_token(token, mod):
""" A function which takes a mod and a token, finds the token in the mod,
and then returns the coordinate (tuple) at which the token is found. If it
is not found, it returns None. """
for coordinates, token_in_mod in mod.items():
# No possibility of duplicate tokens. If found, return the token.
if token_in_mod == token:
return coordinates
return None | 429e42a6439972d23bc54ac5108f196aa0ca93b7 | 45,780 |
import json
def failed_validation(*messages, **kwargs):
"""Return a validation object that looks like the add-on validator."""
upload = kwargs.pop('upload', None)
if upload is None or not upload.validation:
msgs = []
else:
msgs = json.loads(upload.validation)['messages']
for msg in messages:
msgs.append({'type': 'error', 'message': msg, 'tier': 1})
return json.dumps({'errors': sum(1 for m in msgs if m['type'] == 'error'),
'success': False,
'messages': msgs,
'prelim': True}) | fc9b54d5ef480ccaf0943f75042b3619a56a0924 | 45,781 |
import time
def current_time_millis():
"""
File times are in integer milliseconds, to avoid roundoff errors.
"""
return int(round(time.time() * 1000)) | 14b58cee1cb33a90c12aacd7a03c8b293824f2e2 | 45,782 |
def make_unique(arr):
"""Choose only the unique elements in the array"""
return list(set(list(arr))) | e1ef04f55f1c132e54d3db4c7118469979303876 | 45,784 |
def total_occurences(s1, s2, ch):
"""(str, str, str) -> int
Precondition: len(ch) == 1
Return the total number of times ch appears in s1 and s2.
>>> total_occurences('red', 'blue', 'u')
1
"""
# total_occurences('Yellow', 'Blue', 'u')
return (s1 + s2).count(ch) | 03340fdf269a9ddabb6eaaad37bb168bd3a627e0 | 45,786 |
def ensure_list(obj, tuple2list=False):
"""
Return a list whatever the input object is.
Examples
--------
>>> ensure_list(list("abc"))
['a', 'b', 'c']
>>> ensure_list("abc")
['abc']
>>> ensure_list(tuple("abc"))
[('a', 'b', 'c')]
>>> ensure_list(tuple("abc"), tuple2list=True)
['a', 'b', 'c']
>>> ensure_list(None)
[]
>>> ensure_list(5.0)
[5.0]
"""
if obj is None:
return []
if isinstance(obj, list):
return obj
elif tuple2list and isinstance(obj, tuple):
return list(obj)
return [obj] | 9f14560525f39951e3296b606c232d3fdb806f07 | 45,787 |
import os
def make_directory(conf_data):
"""Making directory to save results and trained-model.
Parameters
----------
conf_data: dict
Dictionary containing all parameters and objects.
Returns
-------
conf_data: dict
Dictionary containing all parameters and objects.
"""
if not os.path.exists(conf_data['result_path']):
os.makedirs(conf_data['result_path'])
if not os.path.exists(conf_data['save_model_path']):
os.makedirs(conf_data['save_model_path'])
if not os.path.exists(conf_data['performance_log']):
os.makedirs(conf_data['performance_log'])
if not os.path.exists(conf_data['save_model_path']+'/Seq'):
os.makedirs(conf_data['save_model_path']+'/Seq')
if not os.path.exists(conf_data['save_model_path']+'/Seq'):
os.makedirs(conf_data['save_model_path']+'/Seq')
return conf_data | fad9af0d08f60e9c21dc6aa37c5f4f96a6bc9453 | 45,788 |
import re
def parse_chapter_name(name):
"""
解析章节名字符串
:param name: 章节名字符串
:return: 章节编号, 章节名
"""
pattern = r'^(\u7b2c[\u4e00-\u9fa50-9]+\u7ae0)\s?([\u4e00-\u9fa5]+)\s?'
result = re.findall(pattern, name)
if result is not None and len(result) == 1:
chapter_num = result[0][0]
chapter_title = result[0][1]
return chapter_num, chapter_title
return None, None | 721468c7e2db19abe26cbef9513174142b742231 | 45,790 |
def env_chk(val, fw_spec, strict=True, default=None):
"""
env_chk() is a way to set different values for a property depending
on the worker machine. For example, you might have slightly different
executable names or scratch directories on different machines.
env_chk() works using the principles of the FWorker env in FireWorks.
This helper method translates string "val" that looks like this:
">>ENV_KEY<<"
to the contents of:
fw_spec["_fw_env"][ENV_KEY]
Otherwise, the string "val" is interpreted literally and passed-through as is.
The fw_spec["_fw_env"] is in turn set by the FWorker. For more details,
see: https://materialsproject.github.io/fireworks/worker_tutorial.html
Since the fw_env can be set differently for each FireWorker, one can
use this method to translate a single "val" into multiple possibilities,
thus achieving different behavior on different machines.
Args:
val: any value, with ">><<" notation reserved for special env lookup values
fw_spec: (dict) fw_spec where one can find the _fw_env keys
strict (bool): if True, errors if env format (>><<) specified but cannot be found in fw_spec
default: if val is None or env cannot be found in non-strict mode,
return default
"""
if val is None:
return default
if isinstance(val, str) and val.startswith(">>") and val.endswith("<<"):
if strict:
return fw_spec["_fw_env"][val[2:-2]]
return fw_spec.get("_fw_env", {}).get(val[2:-2], default)
return val | b33218f924064beda1dc0c8f922d4577bf4bd307 | 45,791 |
def _clean_subpath(subpath, delim="/"):
"""
Add / to the subpath if needed to avoid partial s-exon matching.
"""
first = subpath[0]
last = subpath[len(subpath)-1]
cleaned_subpath = ""
if first != "s": # start
cleaned_subpath += delim
cleaned_subpath += subpath
if last != "p": # stop
cleaned_subpath += delim
return cleaned_subpath | 2195bfea9f798a88e688de62eafa75e1519ab647 | 45,792 |
def preconvert_str(value, name, lower_limit, upper_limit):
"""
Converts the given `value` to an acceptable string by the wrapper.
Parameters
----------
value : `str`
The string to convert,
name : `str`
The name of the value.
lower_limit : `int`
The minimal length of the string.
upper_limit : `int`
The maximal length of the string.
Returns
-------
value : `str`
Raises
------
TypeError
If `value` was not passed as `str` instance.
ValueError
If the `value`'s length is less than the given `lower_limit` or is higher than the given than the given
`upper_limit`.
"""
if type(value) is str:
pass
elif isinstance(value, str):
value = str(value)
else:
raise TypeError(f'`{name}` can be `str` instance, got {value.__class__.__name__}.')
length = len(value)
if (length != 0) and (length < lower_limit or length > upper_limit):
raise ValueError(f'`{name}` can be between length {lower_limit} and {upper_limit}, got {length!r}; {value!r}.')
return value | 226d9109b9056057f0998634d10f3f5801a2db09 | 45,793 |
import logging
import torch
def restore_checkpoint(model, optimizer, checkpoint_file, device):
"""
Restores model and optimizer from a checkpoint file and returns checkpoint information.
Has side effect of loading the state_dict for model and optimizer (i.e. modifies the instances).
:param model: [class], torch model instance
:param optimizer: [class], torch optimizer instance
:param checkpoint_file: string, full file path
:param device: [class], torch device instance
:return: Tuple of the checkpoint values
"""
assert checkpoint_file
logging.info("** ** * Restore from checkpoint: %s" % checkpoint_file)
checkpoint_state = torch.load(checkpoint_file, map_location=device)
model.load_state_dict(checkpoint_state["model_state_dict"])
optimizer.load_state_dict(checkpoint_state["optimizer_state_dict"])
last_epoch = checkpoint_state["epoch"]
global_step = checkpoint_state["global_step"]
logging.info(" RESTORED AT epoch:%d-%s, global_step:%d" % (last_epoch, global_step))
logging.info("** ** * Model restored! ** ** * ")
# model.train() # Do this in calling code for now, maybe want model.eval() there instead
return last_epoch, global_step | 8a620531bded9000c6f030d45d65df27a261a67a | 45,795 |
import re
import subprocess
def get_dpkg_package_version(package_name):
"""Get the version of an installed package.
Args:
package_name (str): The package name
Returns:
str: The version of the package
"""
return re.findall(r"(?<=Version: ).+", subprocess.check_output(["dpkg", "-s", package_name]).decode())[0] | 74957eea39d2283d3092edcec47e01f052de4d9f | 45,796 |
def reorder_elements(data_frame):
"""Reorder elements dataframe for readability.
Args:
data_frame ([type]): [description]
Returns:
pd.DataFrame: Elements get team and web_name first.
"""
try:
_elements_columns = sorted(data_frame.columns.tolist())
_elements_columns.insert(0, _elements_columns.pop(_elements_columns.index("team")))
_elements_columns.insert(0, _elements_columns.pop(_elements_columns.index("web_name")))
return data_frame[_elements_columns]
except:
print("Not able to shuffle elements table, check if data_frame holds elements") | 9559a3151d75b0fc0319c2614847b53a7dcea718 | 45,797 |
def check_smtp(email):
"""[check if smtp from string email is valid]
Args:
email ([string]): [email adress]
Raises:
KeyError: [in case the email inputed is not accounted for]
Returns:
[string]: [provider]
"""
smtp_dict = {"gmail": ['smtp.gmail.com', 587],
"yahoo": ['smtp.mail.yahoo.com', 465],
"hotmail": ['smtp.live.com', 587]}
for key in smtp_dict.keys():
if key in email:
return smtp_dict[key]
else:
raise KeyError("Key Not Found") | d3563a5d40b79e7b14dccc67bbe91d889920b9aa | 45,798 |
def singleton_observable(observable):
"""
This function defines a decorator to declare some observable classes as
*Singleton observables*. This means that in a specific interpretation, only
one observation of that observable can be present.
This characteristic is implemented as a decorator because it is more
flexible and cleaner than a inherit-based method.
Usage
-----
Simply put the annotation @singletonobservable before the class
declaration.
"""
observable.__singletonobservable__ = True
return observable | 60fd106b222418086560a623336f14a10177f473 | 45,799 |
import re
def get_testcase_summary(output):
"""! Searches for test case summary
String to find:
[1459246276.95][CONN][INF] found KV pair in stream: {{__testcase_summary;7;1}}, queued...
@return Tuple of (passed, failed) or None if no summary found
"""
re_tc_summary = re.compile(r"^\[(\d+\.\d+)\][^\{]+\{\{(__testcase_summary);(\d+);(\d+)\}\}")
for line in output.splitlines():
m = re_tc_summary.search(line)
if m:
_, _, passes, failures = m.groups()
return int(passes), int(failures)
return None | e1d0b1afaaa0cf0862dc0b86c930ced59f75688d | 45,800 |
def import_tnmr_pars(path):
"""
Import parameter fields of tnmr data
Args:
path (str) : Path to .tnt file
Returns:
params (dict) : dictionary of parameter fields and values
"""
params = {}
with open(path, "rb") as f:
params["version"] = f.read(8).decode("utf-8")
return params | 19a7043fb261d7d909d453f7ee2f7518a26a7ec7 | 45,801 |
def download(remote, path, pkg):
"""Downloads package"""
if not pkg.package:
return False
res = remote.download_packages(pkg, path)
if res['errors']:
return False
pkg_path = res['downloaded'] and res['downloaded'][0] or res['skipped'][0]
if not pkg_path:
return False
return pkg_path | a5b83987df1c6c57357c6ff619753a4739cf1416 | 45,803 |
def bash_and_fish_comment(line: str) -> str:
"""Make a bash/fish comment.
line: Text to comment. Must not contain newlines.
"""
return f"# {line}" | 4b1662912ab3bfcaab688ca4a38840e8acbbe186 | 45,804 |
from typing import Mapping
def recursive_round(ob, dp, apply_lists=False):
""" map a function on to all values of a nested dictionary """
if isinstance(ob, Mapping):
return {k: recursive_round(v, dp, apply_lists) for k, v in ob.items()}
elif apply_lists and isinstance(ob, (list, tuple)):
return [recursive_round(v, dp, apply_lists) for v in ob]
elif isinstance(ob, float):
return round(ob, dp)
else:
return ob | 686e9cc2270606c3b9fa6c65467ec1427fa73746 | 45,806 |
import os
import shutil
def copy_dir(src, dest, enforce=True):
"""Copy the directory if necessary."""
if os.path.exists(dest):
if enforce:
shutil.rmtree(dest)
shutil.copytree(src, dest)
else:
shutil.copytree(src, dest)
return dest | 0d165a510b21063cde4d53409f65772f35ab1311 | 45,807 |
def format_time(seconds):
"""Simple text formating of time"""
if seconds < 60:
return '%ds' % seconds
if seconds > 60 and seconds < 3600:
minutes = seconds / 60
seconds = seconds % 60
return '%dm %02ds' % (minutes, seconds)
if seconds > 3600:
hours = seconds / 3600
minutes = (seconds % 3600) / 60
return '%dh %02dm' % (hours, minutes) | 77b05c1d6434d8f996d7e15075824ff3f47a146c | 45,808 |
def porcentual(valor, porcentaje, incremento=False, decremento=False):
""" returns the porcuentual of a value """
if incremento and decremento:
raise ValueError("No se puede incrementar y decrementar a la vez.")
_multiplo = porcentaje / 100
if incremento:
_multiplo += 1
if decremento:
_multiplo = 1 - _multiplo
return round(float(valor) * float(_multiplo), 2) | 9054769cee4c31869949ab9b88ca0254c11edc31 | 45,809 |
def bingpg2(bingpg_maker):
""" return an initialized bingpg instance different from the first. """
return bingpg_maker() | d7118873533adc2f8b13edf941910e33468c3df5 | 45,810 |
import os
import shutil
def theme_project_tmpdir(tmpdir):
"""Copy themes-project to a temp dir, and copy demo-project content to it."""
themes_dir = os.path.join(os.path.dirname(__file__), "themes-project")
content_dir = os.path.join(os.path.dirname(__file__), "demo-project", "content")
temp_dir = tmpdir.mkdir("temp").join("themes-project")
shutil.copytree(themes_dir, str(temp_dir))
shutil.copytree(content_dir, str(temp_dir.join("content")))
return temp_dir | b5c48c6fbde8ac4ea870ba3f055fbbf6de40fedf | 45,811 |
from typing import Optional
def _extract_sequence_identifier(description: str) -> Optional[str]:
"""Extracts sequence identifier from description. Returns None if no match."""
split_description = description.split()
if split_description:
return split_description[0].partition('/')[0]
else:
return None | b6f927daf3726a8a933eb47d066ed2aeee82fc70 | 45,812 |
import uuid
def parse_uid(message:dict, uid_columns:list=['user_id_got', 'user_id_set']):
"""Parce uuid fields"""
for column in uid_columns:
if 'uid' in message[column]:
message[column] = str(uuid.UUID(message[column].replace('uid=','')))
return message | 0baf6208cfb6454479f4726e1faaa0824750d349 | 45,813 |
def render_fobi_forms_list(context, queryset, *args, **kwargs):
"""Render the list of fobi forms.
:syntax:
{% render_fobi_forms_list [queryset] [show_edit_link] \
[show_delete_link] \
[show_export_link] %}
:example:
{% render_fobi_forms_list queryset show_edit_link=True \
show_delete_link=False \
show_export_link=False %}
"""
request = context.get('request', None)
show_edit_link = kwargs.get('edit_link', False)
show_delete_link = kwargs.get('delete_link', False)
show_export_link = kwargs.get('export_link', False)
return {
'show_custom_actions': (
show_edit_link or show_delete_link or show_export_link
),
'show_edit_link': show_edit_link,
'show_delete_link': show_delete_link,
'show_export_link': show_export_link,
} | 0e2df881729ec6bf0cd2a9798a67de2c2973526c | 45,815 |
def cents_to_hz(F_cent, F_ref=55.0):
"""Converts frequency in cents to Hz
Notebook: C8/C8S2_FundFreqTracking.ipynb
Args:
F_cent (float or np.ndarray): Frequency in cents
F_ref (float): Reference frequency in Hz (Default value = 55.0)
Returns:
F (float or np.ndarray): Frequency in Hz
"""
F = F_ref * 2 ** (F_cent / 1200)
return F | c73c67bb931d07743ee3b53a662485924b0c3f56 | 45,816 |
def get_continuous_column_by_class(dataset):
"""Separates continuous column by binary class"""
# Separate continuous column by class
continuous_column = dataset.columns_that_are('continuous')[0]
continuous_true = []
continuous_false = []
for cont_value, class_value in zip(continuous_column, dataset.cls):
if class_value:
continuous_true.append(cont_value)
else:
continuous_false.append(cont_value)
continuous_column_by_class = [continuous_true, continuous_false]
return continuous_column_by_class | 71c52e3703f6e9eea60c6dbdba839ef45365b3fa | 45,817 |
def get_bibtex(citation):
"""将网页返回的索引格式转换为bibtex格式"""
paper_bibtex={
'Author':'',
'Title':'',
'Journal':'',
'Year':'',
'Volume':'',
'Issue':'',
'Pages':'',
'DOI':''
}
if len(citation)==0:#网页没有返回索引信息
return paper_bibtex
elif citation[:5]=="Error":#网页没有找到文章信息
return paper_bibtex
else:
#Michael, A. (2019). Orientation of Hydraulic Fracture Initiation from Perforated Horizontal Wellbores. SPE Annual Technical Conference and Exhibition. doi:10.2118/199766-stu
paper_bibtex['Author']=citation[:citation.find('(')].strip()#文章作者姓名是年份括号左边的字符
paper_bibtex['Year']=citation[citation.find('(')+1:citation.find('(')+5]#年份信息是括号之后4个字符,目前年份只能看是4位
sub_temp=citation[citation.find('(')+7:].strip()#先获取从年份往后两个字符开始往后的字符串
paper_bibtex['Title']=sub_temp[:sub_temp.find(r'.')]#在获取的这个字符串中获取从开始到第一个'.'开始的字符为标题
#期刊、卷和期比较复杂,存在如下情况:
#1.只有期刊名,没有期号和卷号,也没有页码。此时在两个'.'之间的字符即为期刊名称.这种情况一般是返回值出问题,而不是实际情况
#2.有期刊名,只有卷号没有期号(某些期刊特征,不是返回值的问题,是实际情况),此时格式为Journal,Volume,pages1-pages2
#3.有期间名,有期号和卷号(这是最常见的格式),此时字符串的格式为Journal,Volume(Issue),pages1-pages2
#4.可能存在没有页码的情况
#需要处理这四种情况
sub_temp1=sub_temp[sub_temp.find(r'.')+1:].strip()#返回从文章标题后边的'.'开始到结束的字符串
if sub_temp1.count(',')==2:#如果有两个逗号,则说明期刊,期号,卷号完整
paper_bibtex['Journal']=sub_temp1[:sub_temp1.find(',')].strip()
sub_temp2=sub_temp1[sub_temp1.find(',')+1:]#返回期刊名后的第一个逗号(不包括逗号)以后的字符串
if(sub_temp2[:sub_temp1.find('.')].find('(') != -1):#如果在期刊名后到doi号之前的这段字符串中找的了括号,说明属于第3种情况,即有期号,也有卷号
paper_bibtex['Volume']=sub_temp2[:sub_temp2.find('(')].strip()#卷号是期刊名后第一个逗号至后边左括号之间的字符
paper_bibtex['Issue']=sub_temp2[sub_temp2.find('(')+1:sub_temp2.find(',')-1].strip()#期号是从左括号以后至后边逗号之前的字符
paper_bibtex['Pages']=sub_temp2[sub_temp2.find(',')+1:sub_temp2.find('.')].strip()
else:#没有发现括号,说明只有卷,而没有期
paper_bibtex['Volume']=sub_temp2[:sub_temp2.find(',')].strip()#此时卷号为期刊第一个逗号往后的字符(不包括逗号)开始直下一个逗号之前的字符
paper_bibtex['Pages']= sub_temp2[sub_temp2.find(',')+1:sub_temp2.find('.')].strip()
elif sub_temp1.count(',')==1:#如果有1个逗号,则是情况4,比较复杂,不做其他处理,只输出作者、标题和doi号,后期自动更新
pass
else:#如果没有逗号,则是情况1,只有期刊名
paper_bibtex['Journal']=sub_temp1[:sub_temp1.find('.')].strip()
pass
paper_bibtex['DOI']=citation[citation.rfind(':')+1:]#doi号是整个字符串从末尾往前直到出现':'
return paper_bibtex | c072d65924cad8df3dc396b7c4d2801a9b0a55b2 | 45,818 |
import torch
def _solarize(inp, thresholds=0.5):
"""
For each pixel in the image, select the pixel if the value is less than the threshold.
Otherwise, subtract 1.0 from the pixel.
"""
if not isinstance(inp, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(inp)}")
if not isinstance(thresholds, (float, torch.Tensor,)):
raise TypeError(f"The factor should be either a float or torch.Tensor. "
f"Got {type(thresholds)}")
if isinstance(thresholds, torch.Tensor) and len(thresholds.shape) != 0:
assert inp.size(0) == len(thresholds) and len(thresholds.shape) == 1, \
f"threshholds must be a 1-d vector of shape ({inp.size(0)},). Got {thresholds}"
thresholds = thresholds.to(inp.device).to(inp.dtype)
thresholds = torch.stack([x.expand(*inp.shape[1:]) for x in thresholds])
return torch.where(inp < thresholds, inp, 1.0 - inp) | 78d071f3c272f4af2f8c9d5cb8a697c38a186224 | 45,820 |
import os
def match_key_from_match_dir(match_dir):
"""Converts the match directory into a key for the match."""
return os.path.split(os.path.normpath(match_dir))[-1] | 594336036440d29d77d3da359ef46825a19ad13e | 45,821 |
def get_mac_s(output: bytes) -> bytes:
"""Support function to get the 64-bit resynchronisation authentication code
(MAC-S) from OUT1, the output of 3GPP f1* function.
:param output: OUT1
:returns: OUT1[64] .. OUT1[127]
"""
edge = 8 # = ceil(63/8)
return output[edge:] | c5f1d2d14819e9a9bf660aeb82dd24b8111263b5 | 45,823 |
def custom_sort(dictionary, sort_top_labels, sort_bottom_labels):
""" Given a dictionary in the form of
{'<a_label>': {
'label': '<a_label>'
'value': '<a_value>'
},
...
}
and two lists (for top and bottom)
['<a_label>', '<c_label>', '<b_label>', ...]
return a list of the dictonaries values ordered
<all top items found in dictionary, in the given order>
<others>
<all bottom items found in dictionary, in the given order>
"""
ret = []
for l in sort_top_labels:
if l in dictionary:
ret.append(dictionary[l])
for label, facet in dictionary.items():
if label not in sort_top_labels + sort_bottom_labels:
ret.append(facet)
for l in sort_bottom_labels:
if l in dictionary:
ret.append(dictionary[l])
return ret | 41d8b12fcf397e416ba9c2adb5bd8f2cafee36b1 | 45,824 |
def reconstruct_path(current, came_from):
"""
Reconstruct path using last node and dictionary that maps each node on path
to its predecessor.
Args:
current (int): Last node in discovered path
came_from (dict): Dictionary mapping nodes on path to their predecessors
Retuns:
(tuple): Path in the form of a list and the same path encoded in an edge list
"""
# Initialize path and add last found node.
path = [current]
# Reconstruct.
while current in came_from:
current = came_from[current]
path.insert(0, current)
# Construct edgelist.
edgelist = [(path[idx], path[idx+1]) for idx in range(len(path)-1)]
# Return path and edge list.
return path, edgelist | 43d05b50987d3022f748d40b76942de38f866ac5 | 45,826 |
def gcd_steps(a, b):
""" Return the number of steps needed to calculate GCD(a, b)."""
# GCD(a, b) = GCD(b, a mod b).
steps = 0
while b != 0:
steps += 1
# Calculate the remainder.
remainder = a % b
# Calculate GCD(b, remainder).
a = b
b = remainder
# GCD(a, 0) is a.
#return a
return steps | 768d52c795c8c8eb20f4adfaf26f10da12962534 | 45,827 |
import math
def avp_from_temperature_min(temperature_min):
"""
Estimate actual vapour pressure (*ea*) from minimum temperature.
This method is to be used where humidity data are lacking or are of
questionable quality. The method assumes that the dewpoint temperature
is approximately equal to the minimum temperature (*temperature_min*), i.e. the
air is saturated with water vapour at *temperature_min*.
**Note**: This assumption may not hold in arid/semi-arid areas.
In these areas it may be better to subtract 2 deg C from the
minimum temperature (see Annex 6 in FAO paper).
Based on equation 48 in Allen et al (1998).
:param temperature_min: Daily minimum temperature [deg C]
:return: Actual vapour pressure [kPa]
:rtype: float
"""
return 0.611 * math.exp((17.27 * temperature_min) / (temperature_min + 237.3)) | 2eeeaac62d228c6fb05ff583a5e539ab3bafffe4 | 45,829 |
def sort_items(sort=None):
"""
Function to generate the sort query. Code is from https://stackoverflow.com
questions/8109122/how-to-sort-mongodb-with-pymongo
"""
sort_file = {
'featured': [('_id', 1)],
'date-added': [('date_added', -1), ('name', 1)],
'price-asc': [('price', 1), ('name', 1)],
'price-desc': [('price', -1), ('name', 1)],
'rating': [('overall_rating', -1), ('name', 1)],
'cat_asc': [('category', 1)],
'cat_desc': [('category', -1)],
'a-to-z': [('name', 1)],
'z-to-a': [('name', -1)],
}
if sort:
return sort_file[sort]
else:
return sort_file['featured'] | 26f8799fca1751d17159c4c8526047d42ac22a23 | 45,830 |
def divideGT(GT, X, Y):
"""
LT - left top;
RT - right top;
LB - left bottom;
RB - right bottom;
"""
# width and height of the GT
hei, wid = GT.shape
area = float(wid * hei)
# copy 4 regions
LT = GT[0:Y, 0:X]
RT = GT[0:Y, X:wid]
LB = GT[Y:hei, 0:X]
RB = GT[Y:hei, X:wid]
# The different weight (each block proportional to the GT foreground region).
w1 = (X * Y) / area
w2 = ((wid - X) * Y) / area
w3 = (X * (hei-Y)) / area
w4 = 1.0 - w1 - w2 - w3
return LT, RT, LB, RB, w1, w2, w3, w4 | a4fb1d43205b9c33dd828aba6365cd85d12695c9 | 45,831 |
def make_mock_video_ids(num_videos):
"""Makes a list of video ids used for unit tests.
num_videos: an integer; the number of mock video ids to make.
Returns: a list of strings of that can be used as video ids for unit tests.
"""
video_ids = []
for video_num in range(num_videos):
video_ids.append(f"video{video_num}")
return video_ids | 14ec177575d4a11aa44a0e42e70cfd5791e38ad2 | 45,832 |
import binascii
def int_to_bytes(i: int):
"""
Convert an integer to a byte(s)
Args:
i: Integer to convert
Returns:
bytes
"""
width = i.bit_length()
width += 8 - ((width % 8) or 8)
fmt = '%%0%dx' % (width // 4)
return b"\x00" if i == 0 else binascii.unhexlify(fmt % i) | be4bc40913fa0c61f117a1f4ec7b10402e413b6e | 45,833 |
import logging
import sys
import os
def check_project_config(config):
"""
Validation checks on the project config. At least one project must exist
(otherwise exit) and the paths for each project should exist, otherwise the
project entry is removed.
Args:
config (ConfigObj):
The ConfigObj instance.
"""
# Check that at least one project exists
if "projects" not in config:
logging.error(
'There are currently no projects. Use "gmrecords '
'projects -c <project>" to create one.'
)
sys.exit(1)
# Check that the paths for each project exist
for project in config["projects"].keys():
data_exists = os.path.isdir(
os.path.join(
os.path.abspath(os.path.join(config.filename, os.pardir)),
config["projects"][project]["data_path"],
)
)
delete_project = False
if not data_exists:
logging.warn(f"Data path for project {project} does not exist.")
delete_project = True
conf_exists = os.path.isdir(
os.path.join(
os.path.abspath(os.path.join(config.filename, os.pardir)),
config["projects"][project]["conf_path"],
)
)
if not conf_exists:
logging.warn(f"Install path for project {project} does not exist.")
delete_project = True
if delete_project:
logging.warn(f"Deleting project {project}.")
del config["projects"][project]
config["project"] = config["projects"].keys()[0]
config.write()
return config | c281a26e3b32259dcad884a5689aec37f6320385 | 45,834 |
def path_decoder(url):
"""Grab the last component of a url as the path."""
components = url.split('/')
if components[-1]:
return components[-1]
else:
return components[-2] | d35112082facb7c378201cac44557c0628ba4563 | 45,835 |
from typing import Tuple
import os
def _get_credentials_as_smart_contract() -> Tuple[str, str]:
"""Attempt to get credentials for a dragonchain from the standard location for a smart contract
Returns:
Tuple of auth_key_id/auth_key of credentials from environment. Empty strings in tuple if not found
"""
try:
base_path = os.path.join(os.path.abspath(os.sep), "var", "openfaas", "secrets")
auth_key = open(os.path.join(base_path, "sc-{}-secret-key".format(os.environ.get("SMART_CONTRACT_ID"))), "r").read()
auth_key_id = open(os.path.join(base_path, "sc-{}-auth-key-id".format(os.environ.get("SMART_CONTRACT_ID"))), "r").read()
return auth_key_id, auth_key
except OSError:
return "", "" | 7e06340446da282999c116aeee4fac1ebda931c1 | 45,836 |
def get_dir(path):
"""
Returns the directory of a file, or simply the original path if the path is a directory (has no extension)
:param Path path:
:return: Path
"""
extension = path.suffix
if extension == '':
return path
else:
return path.parent | ce40de2223de46be91ca28fba01c4153679a693c | 45,837 |
def insert_output_start_stop_indicators(src):
"""
Insert identifier strings so that output can be segregated from input.
Parameters
----------
src : str
String containing input and output lines.
Returns
-------
str
String with output demarked.
"""
lines = src.split('\n')
print_producing = [
'print(',
'.setup(',
'.run_model(',
'.run_driver(',
'.check_partials(',
'.check_totals(',
'.list_inputs(',
'.list_outputs(',
'.list_problem_vars(',
]
newlines = []
input_block_number = 0
in_try = False
in_continuation = False
head_indent = ''
for line in lines:
newlines.append(line)
# Check if we are concluding a continuation line.
if in_continuation:
line = line.rstrip()
if not (line.endswith(',') or line.endswith('\\') or line.endswith('(')):
newlines.append('%sprint(">>>>>%d")' % (head_indent, input_block_number))
input_block_number += 1
in_continuation = False
# Don't print if we are in a try block.
if in_try:
if 'except' in line:
in_try = False
continue
if 'try:' in line:
in_try = True
continue
# Searching for 'print(' is a little ambiguous.
if 'set_solver_print(' in line:
continue
for item in print_producing:
if item in line:
indent = ' ' * (len(line) - len(line.lstrip()))
# Line continuations are a litle tricky.
line = line.rstrip()
if line.endswith(',') or line.endswith('\\') or line.endswith('('):
in_continuation = True
head_indent = indent
break
newlines.append('%sprint(">>>>>%d")' % (indent, input_block_number))
input_block_number += 1
break
return '\n'.join(newlines) | 162724502e581901629bae764a235ca0f71e0c61 | 45,839 |
from typing import Sequence
from typing import AnyStr
from typing import cast
import subprocess
def invoke_formatter(formatter_args: Sequence[str], code: AnyStr) -> AnyStr:
"""
Given a code string, run an external formatter on the code and return new
formatted code.
"""
# Make sure there is something to run
if len(formatter_args) == 0:
raise Exception("No formatter configured but code formatting requested.")
# Invoke the formatter, giving it the code as stdin and assuming the formatted
# code comes from stdout.
work_with_bytes = isinstance(code, bytes)
return cast(
AnyStr,
subprocess.check_output(
formatter_args,
env={},
input=code,
universal_newlines=not work_with_bytes,
encoding=None if work_with_bytes else "utf-8",
),
) | 9b0b994d3c8f1da0cec068812623a3b2b5370b61 | 45,840 |
def get_images(directory):
"""
Gets all PNG, JPG, GIF, and BMP format files at path
:param Path directory: location to search for image files
"""
files = []
patterns = ('*.png', '*.jpg', '*.gif', '*.bmp')
for pattern in patterns:
files.extend(directory.glob(pattern))
return files | e6c599814d5d31de5a64e502d88f40996d49888f | 45,841 |
def prepare_table_rows(rows):
"""Given a list of lists, make sure they are prepared to be formatted into a table
by making sure each row has the same number of columns and stringifying all values."""
rows = [list(map(str, r)) for r in rows]
max_cols = max(list(map(len, rows)))
for row in rows:
pad = max_cols - len(row)
for _ in range(pad):
row.append('')
return rows | 8f4730d08400a88234e9ac0137d58d255e1fbddb | 45,842 |
import re
def replace_all(src_string, regex, replacement):
"""
Replace each occurrence of regular expression match in
the first string with the replacement string and return the
replaced string. A 'regex' string with null value is considered as
no change. A 'replacement' string with null value is consider as an empty string.
"""
if not regex:
return src_string
if not replacement:
replacement = ""
return re.sub(regex, replacement, src_string) | 410e6c8f333e9776daad08c9bbbd39831640d350 | 45,844 |
def parse_kid(line, kid):
""" parses a "@ ale01.cha 1;4.28" line into name/month ("ale", 16) """
tmp = line.split('\t')
if len(tmp) < 3 or not (".cha" in tmp[1] or ".txt" in tmp[1]):
return kid
else:
date = tmp[2].split(';')
return (tmp[1][:3], int(date[0]) * 12 + int(date[1].split('.')[0])) | 3fe1af7f741cb27eb36159ed211049e6e14f67c2 | 45,845 |
import math
def compare_lines(l1, l2, mode='p'):
"""
Compare 2 lines.
Returns true if they are similar, false otherwise
"""
if mode == 'p':
dist_error = 50 # pixels, max distance between points
if (math.sqrt((l1[0] - l2[0]) ** 2 + (l1[1] - l2[1]) ** 2) < dist_error and \
math.sqrt((l1[2] - l2[2]) ** 2 + (l1[3] - l2[3]) ** 2) < dist_error) or \
(math.sqrt((l1[0] - l2[2]) ** 2 + (l1[1] - l2[3]) ** 2) < dist_error and \
math.sqrt((l1[2] - l2[0]) ** 2 + (l1[3] - l2[1]) ** 2) < dist_error):
return True
return False
elif mode == 'm':
m_error = 5 * math.pi / 180 # max difference between slopes
b_error = 2 # max difference between intersection with axes
if abs(l1[4] - l2[4]) < m_error: # and abs(l1[5] - l2[5]) < b_error:
return True
return False | e6c61d13deda5f7cd178eb3a2b32e14aa0f9474c | 45,846 |
import re
def make_filename(url):
"""
Extracts domain from a url and append '.html'
:param url: string
return <domain>.html string
"""
rx = re.compile(r'^https?:\/\/(?:www.)?([^\/]+)\/?')
m = rx.search(url)
if m:
return m[1] + '.html'
else:
print(f'Can not get domain from {url}')
exit(0) | 12ce1ec1acb77c2147a361f563c051edf6f73e83 | 45,848 |
import ast
def _log_function(item):
"""
Handler function for log message types.
:param item: ast object being inspected for certain properties
:type item: :class:`~ast.AST`
:return: Returns the descriptor and arg offset of the item.
:rtype: tuple (str, int)
"""
level_arg = item.args[0]
if isinstance(level_arg, ast.Str):
level = level_arg.s.lower()
else:
any_call_or_name_elements = any(
isinstance(element, (ast.Call, ast.Name))
for element in ast.walk(level_arg)
)
if any_call_or_name_elements:
level = "(dynamic)"
else:
level = ', '.join(
element.s.lower()
for element in ast.walk(level_arg)
if isinstance(element, ast.Str)
)
integer_arg_offset = 1
return level, integer_arg_offset | cd7f798b5e28d1835d270e8d2b64b73faef4f853 | 45,850 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.