content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import torch
def generate_response(tokenizer, model, chat_round, chat_history_ids, question):
"""
Generate a response to some user input.
"""
# Encode user input and End-of-String (EOS) token
new_input_ids = tokenizer.encode(">> You: "+ question + tokenizer.eos_token, return_tensors='pt')
# Append tokens to chat history
bot_input_ids = torch.cat([chat_history_ids, new_input_ids], dim=-1) if chat_round > 0 else new_input_ids
# Generate response given maximum chat length history of 1250 tokens
chat_history_ids = model.generate(bot_input_ids, max_length=1250, pad_token_id=tokenizer.eos_token_id)
# Print response
print("DialoGPT: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
response = format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True))
# Return the chat history ids
# return chat_history_ids
return response
|
138299247b6b5b46b8999489e44676a6819e3e10
| 700,726
|
import math
def str_to_feet(value="0'-0"):
"""
Returns string converted into decimal feet.
Acceptible formats include:
1. 5'-7"
5'-7 1/2"
5'-7 1/2''
5'-7
5'-7 1/2
2. 7 3/4
-8
The trailing quotation mark can be omitted.
"""
# Remove optional inches mark
value = value.replace('"', '')
value = value.replace("''", "")
if value.find("'") != -1:
split_str = value.split("'")
whole_feet = float(split_str[0])
in_str = split_str[1]
if in_str[0] == '-':
a = len(in_str)
in_str = in_str[1:a]
else:
whole_feet = 0.0
in_str = value
split_in_str = in_str.split(" ")
whole_inches = float(split_in_str[0])
if len(split_in_str) > 1:
frac_split = split_in_str[1].split("/")
numer = float(frac_split[0])
denom = float(frac_split[1])
sign = int(whole_inches/math.fabs(whole_inches))
inches = sign*(math.fabs(whole_inches) + numer/denom)
else:
inches = whole_inches
# Convert the inches portion
# See if it is decimal form or fraction form"
if whole_feet < 0:
sign = -1
else:
sign = 1
return sign*(math.fabs(whole_feet) + inches/12.0)
|
3d9e17f3a900d4962ab65e5fdac3dcd44fa49399
| 700,727
|
def zpadlist(values: list, inputtype: str, minval: int, maxval: int) -> list:
"""Return a list of zero padded strings and perform input checks.
Returns a list of zero padded strings of day numbers from a list of
input days. Invalid month numbers (e.g. outside of 1-31) will raise
an exception.
Parameters
----------
values: list(int)
List of integers that will be zero-padded.
inputttype: str
String identifying the input data used in error messages.
minval: int
Minimum value that all elements in `values` are checked against.
maxval: int
Maximum value that all elements in `values` are checked against.
Returns
-------
list(str)
List of zero-padded strings (e.g. ['01', '02',..., '31']).
Raises
------
AssertionError
If any value in the list is not within `minval<=value<=maxval`.
"""
returnlist = []
for value in values:
assert (int(value) >= minval), (
'invalid value specified for {}: {}'.format(inputtype, value))
assert (int(value) <= maxval), (
'invalid value specified for {}: {}'.format(inputtype, value))
returnlist += [str(int(value)).zfill(2)]
return returnlist
|
bcc06dfb36b93af69d031b44f64dfd3ee7d082c3
| 700,728
|
def TASK_JUMP_FWD(step=1):
"""Jumps to the next task - eng.jumpCallForward()
example: A, B, TASK_JUMP_FWD(2), C, D, ...
will produce: A, B, D
@var step: int
"""
def _x(obj, eng):
eng.jumpCallForward(step)
_x.__name__ = 'TASK_JUMP_FWD'
return _x
|
de2bb5e71d54ea8cfcef61fa3ba687695b00315c
| 700,729
|
def fib_iterative(n):
"""Calcualte n-th element of Fibonacci sequence. Assumes n >= 2
Returns: Fibonacci sequence up to element n, and n-th element of the seq.
"""
fibSeq = [0, 1] # base case
for ii in range(2,n): # note: list(range(2,2)) is an empty list []
fibSeq.append(fibSeq[ii-2]+fibSeq[ii-1])
nthElem = fibSeq[-1] # last element of the list
return fibSeq, nthElem
|
f28c44bc277c8f5e97e507461f3321bc0aa0510e
| 700,730
|
def bytescale(im):
"""
The input should be between [0,1]
output is [0,255] in a unsigned byte array
"""
imout = (im*255).astype('u1')
return imout
|
d3de7ebeb7601235c91d2ea345925ae0e11351ad
| 700,732
|
import hashlib
def gen_md5(src_byte):
""" gen md5
:param src_byte:
:return:
"""
m2 = hashlib.md5()
m2.update(src_byte.encode("utf-8"))
return m2.hexdigest()
|
3243606076735065c87c28eed473d47c6166b0b0
| 700,733
|
def _mean_of_cycle(data, setup_time=30, data_dict={}):
"""
Calculates mean of raw and reference signal during a zero cycle
setup_time in seconds
"""
row_selection = (data.zerocycle_runtime >= setup_time) & (data.State_Zero == 1)
cycle = data.loc[row_selection].agg(
{
"timestamp": "max",
"Signal_Raw": "mean",
"Signal_Ref": "mean",
"Longitude": "mean",
"Latitude": "mean",
}
)
timestamp = cycle.timestamp
Signal_Raw = cycle.Signal_Raw
Signal_Ref = cycle.Signal_Ref
if data_dict:
if timestamp not in data_dict.keys():
data_dict["timestamp"] = {}
if "Signal_Raw_Z" not in data_dict["timestamp"].keys():
data_dict["timestamp"]["Signal_Raw_Z"] = Signal_Raw
if "Signal_Ref_Z" not in data_dict["timestamp"].keys():
data_dict["timestamp"]["Signal_Ref_Z"] = Signal_Ref
return (
data.loc[row_selection]
.agg(
{
"timestamp": "max",
"Signal_Raw": "mean",
"Signal_Ref": "mean",
"Longitude": "mean",
"Latitude": "mean",
}
)
.to_frame()
.transpose()
)
|
789cce42efebfa4b46b19e7be991cb1fa433ddae
| 700,734
|
def replace_string_newline(str_start: str, str_end: str, text: str) -> str:
"""
re.sub function stops at newline characters, but this function moves past these
params: str_start, the start character of the string to be delted
str_end, the end character of the string to be deleted
text, the string to be edited
return: text, a string without the string between str_start and str_end
"""
while text.find(str_start) != -1:
begin = text.find(str_start) # also just starts w/o http, has (/
nextp = text.find(str_end, begin, len(text))
text = text.replace(text[begin-1:nextp+1], ' ')
return text
|
c2f4888ed285dc6a48b6a839ff62dbe50865472c
| 700,735
|
from datetime import datetime
def get_visit_date(fecha_inicio):
"""
:param fecha_inicio:
:return: date string YYYY-mm-dd
"""
if not fecha_inicio.strip():
return ""
try:
date_obj = datetime.strptime(fecha_inicio, "%m/%d/%y %H:%M %p")
except ValueError:
try:
date_obj = datetime.strptime(fecha_inicio, "%m/%d/%y %H:%M")
except ValueError:
try:
date_obj = datetime.strptime(fecha_inicio, "%m/%d/%Y %H:%M")
except ValueError:
try:
date_obj = datetime.strptime(fecha_inicio, "%m/%d/%Y")
except ValueError:
date_obj = datetime.strptime(fecha_inicio, "%d/%m/%Y %H:%M")
return date_obj.strftime("%Y-%m-%d")
|
a64f739b471e5c7d4f9bd20812e7670c6b679845
| 700,736
|
def _ExtStorageEnvironment(unique_id, ext_params,
size=None, grow=None, metadata=None,
name=None, uuid=None,
snap_name=None, snap_size=None,
exclusive=None):
"""Calculate the environment for an External Storage script.
@type unique_id: tuple (driver, vol_name)
@param unique_id: ExtStorage pool and name of the Volume
@type ext_params: dict
@param ext_params: the EXT parameters
@type size: integer
@param size: size of the Volume (in mebibytes)
@type grow: integer
@param grow: new size of Volume after grow (in mebibytes)
@type metadata: string
@param metadata: metadata info of the Volume
@type name: string
@param name: name of the Volume (objects.Disk.name)
@type uuid: string
@param uuid: uuid of the Volume (objects.Disk.uuid)
@type snap_size: integer
@param snap_size: the size of the snapshot
@type snap_name: string
@param snap_name: the name of the snapshot
@type exclusive: boolean
@param exclusive: Whether the Volume will be opened exclusively or not
@rtype: dict
@return: dict of environment variables
"""
vol_name = unique_id[1]
result = {}
result["VOL_NAME"] = vol_name
# EXT params
for pname, pvalue in ext_params.items():
result["EXTP_%s" % pname.upper()] = str(pvalue)
if size is not None:
result["VOL_SIZE"] = str(size)
if grow is not None:
result["VOL_NEW_SIZE"] = str(grow)
if metadata is not None:
result["VOL_METADATA"] = metadata
if name is not None:
result["VOL_CNAME"] = name
if uuid is not None:
result["VOL_UUID"] = uuid
if snap_name is not None:
result["VOL_SNAPSHOT_NAME"] = snap_name
if snap_size is not None:
result["VOL_SNAPSHOT_SIZE"] = str(snap_size)
if exclusive is not None:
result["VOL_OPEN_EXCLUSIVE"] = str(exclusive)
return result
|
323c9fae9e6cbc1c1a107dd018bbe5f95520a8fe
| 700,737
|
import time
def is_expired(epoch_time):
"""True if current time has passed the provided epoch_time"""
return time.time() > epoch_time
|
b264fd1d73fe7f9c97592e6bffc27c81574d6bde
| 700,738
|
def stations_by_river(stations, river):
"""Takes a list of stations and returns a list of all the station names
on a specific river in alphabetic order"""
station_names = []
for station in stations:
if station.river == river:
station_names.append(station.name)
station_names = sorted(station_names)
return station_names
|
078e83affc54b90f2a58ad46cefdd895d9f8c1e6
| 700,740
|
import configparser
def get_cluster_details():
"""
Gets the credentials from the config file
"""
config = configparser.ConfigParser()
config.read_file(open('dwh.cfg'))
KEY = config.get('AWS','KEY')
SECRET = config.get('AWS','SECRET')
DWH_CLUSTER_TYPE = config.get("DWH","DWH_CLUSTER_TYPE")
DWH_NUM_NODES = config.get("DWH","DWH_NUM_NODES")
DWH_NODE_TYPE = config.get("DWH","DWH_NODE_TYPE")
DWH_CLUSTER_IDENTIFIER = config.get("DWH","DWH_CLUSTER_IDENTIFIER")
DWH_DB = config.get("DWH","DWH_DB")
DWH_DB_USER = config.get("DWH","DWH_DB_USER")
DWH_DB_PASSWORD = config.get("DWH","DWH_DB_PASSWORD")
DWH_PORT = config.get("DWH","DWH_PORT")
DWH_IAM_ROLE_NAME = config.get("DWH", "DWH_IAM_ROLE_NAME")
return KEY, SECRET, DWH_CLUSTER_TYPE, DWH_NUM_NODES, DWH_NODE_TYPE, DWH_CLUSTER_IDENTIFIER, DWH_DB, DWH_DB_USER, DWH_DB_PASSWORD, DWH_PORT, DWH_IAM_ROLE_NAME
|
a7d19c76e134b74d18a37289c16221064664c036
| 700,741
|
import functools
import inspect
import six
def map_arg(**maps):
"""
Apply a mapping on certains argument before calling the original function.
Args:
maps (dict): {key: map_func}
"""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
argmap = inspect.getcallargs(func, *args, **kwargs)
for k, map_func in six.iteritems(maps):
if k in argmap:
argmap[k] = map_func(argmap[k])
return func(**argmap)
return wrapper
return deco
|
4b327d3167a6c9bd4da84671a661767db04bcb6b
| 700,742
|
def clean_catalog(ukidss_catalog, clean_band='K_1', badclass=-9999,
maxerrbits=41, minerrbits=0, maxpperrbits=60):
"""
Attempt to remove 'bad' entries in a catalog.
Parameters
----------
ukidss_catalog : `~astropy.io.fits.BinTableHDU`
A FITS binary table instance from the UKIDSS survey.
clean_band : ``'K_1'``, ``'K_2'``, ``'J'``, ``'H'``
The band to use for bad photometry flagging.
badclass : int
Class to exclude.
minerrbits : int
maxerrbits : int
Inside this range is the accepted number of error bits.
maxpperrbits : int
Exclude this type of error bit.
Examples
--------
"""
band = clean_band
mask = ((ukidss_catalog[band + 'ERRBITS'] <= maxerrbits)
* (ukidss_catalog[band + 'ERRBITS'] >= minerrbits)
* ((ukidss_catalog['PRIORSEC'] == ukidss_catalog['FRAMESETID'])
+ (ukidss_catalog['PRIORSEC'] == 0))
* (ukidss_catalog[band + 'PPERRBITS'] < maxpperrbits)
)
if band+'CLASS' in ukidss_catalog.colnames:
mask *= (ukidss_catalog[band + 'CLASS'] != badclass)
elif 'mergedClass' in ukidss_catalog.colnames:
mask *= (ukidss_catalog['mergedClass'] != badclass)
return ukidss_catalog.data[mask]
|
4fa593f758a645bf49fe7479a0876e6a1fc6068b
| 700,743
|
def setmask(arr, x1=None, x2=None):
"""setmask(arr, x1, x2)
arr = 1D arr
x1 = lower value
x2 = upper value
by default it returns a mask that
is the full range of arr
returns
=======
mask, x1, x2
if input x1 and x2 are out of bounds
then it sets x1 and x2 to the boundry of arr"""
#setup up masks
if (x1 == None) | (x1 <= arr[0]):
#if no starting value for fit given use lowest
#or if starting value too low, defaulting to inital value in data
x1 = arr[0]
if (x2 == None) | (x2 >= arr[-1]):
#if no ending value for fit given use highest
#or if starting value too high, defaulting to ending value in data
x2 = arr[-1]
#data to be fit is masked
#data will only be fit over
#the masked values
mask = ( arr >= x1 ) & ( arr <= x2 )
return mask, x1, x2
|
a2e97675045dfb67ba23838493ce19020453c4f9
| 700,744
|
import hashlib
def create_hash(secret: str, url: str) -> str:
"""Create a hash of the secret and url."""
s = f'{secret}{url}'
return str(hashlib.md5(s.encode()).hexdigest()[0:8])
|
891fcc45fe7706a984fb9282ab17887710e6da0a
| 700,745
|
def final_run_could_be_extended(values, end_idx):
"""Heuristically see if the value run at end_idx could be extended
Returns true if there is a run at end_idx, if end_idx is not the last
value, and if the value after end_idx could be part of a run.
To keep this constant-time, we aren't checking if the run can actually
be extended to the desired length.
"""
if end_idx == 0 or end_idx == len(values):
return False
return values[end_idx - 1] is True and values[end_idx] is not False
|
99244baa3379f261d9cc2e5475b33c824dff6781
| 700,746
|
def get_nth_digit(N, n):
"""
return the nth digit from an N digit number
>>> get_nth_digit(12345, 3)
4
>>> get_nth_digit(12345, 7)
Traceback (most recent call last):
...
IndexError: string index out of range
"""
return int(str(N)[n])
|
25c01c14589fb091154e8509a84f98811946938f
| 700,747
|
import os
def split_path(filepath):
"""Return filename and extension of file"""
return os.path.splitext(filepath)
|
a949afd956f5c41e5a3f137383ae61da88b330b0
| 700,748
|
from typing import Optional
from pathlib import Path
import os
def generate_tmp_file_path(
tmpdir_factory, file_name_with_extension: str, tmp_dir_path: Optional[Path] = None
) -> Path:
"""Generate file path relative to a temporary directory.
:param tmpdir_factory: py.test's `tmpdir_factory` fixture.
:param file_name_with_extension: file name with extension e.g. `file_name.ext`.
:param tmp_dir_path: path to directory (relative to the temporary one created by `tmpdir_factory`) where the generated file path should reside. # noqa
:return: file path.
"""
basetemp = tmpdir_factory.getbasetemp()
if tmp_dir_path is not None:
if os.path.isabs(tmp_dir_path):
raise ValueError("tmp_dir_path is not a relative path!")
# http://stackoverflow.com/a/16595356/1557013
for tmp_file_dir_path_part in os.path.normpath(tmp_dir_path).split(os.sep):
# Accounting for possible path separator at the end.
if tmp_file_dir_path_part:
tmpdir_factory.mktemp(tmp_file_dir_path_part)
return Path(basetemp) / tmp_dir_path / file_name_with_extension
return Path(basetemp.join(file_name_with_extension))
|
e922aa51f97ec8db4fa3181f4f8193038fc2f7ea
| 700,749
|
def _strip_version(version):
"""Strip trailing characters that aren't digits or '.' from version names.
Some OS versions look like "9.0gm", which is not useful for select()
statements. Thus, we strip the trailing "gm" part.
Args:
version: the version string
Returns:
The version with trailing letters stripped.
"""
result = ""
for ch in str(version):
if not ch.isdigit() and ch != ".":
break
result += ch
return result
|
483851b67347c2e23d1c625fc7bb925664f8e1e1
| 700,750
|
import os
def list_from_env(key, default=""):
"""
Splits a string in the format "a,b,c,d,e,f" into
['a', 'b', 'c', 'd', 'e', 'f', ]
"""
try:
val = os.environ.get(key, default)
return val.split(',')
except (KeyError, ValueError):
return []
|
74bf225d69990f7c649f446293722955d75a4490
| 700,751
|
from unittest.mock import patch
import builtins
def patch_input(**kwargs):
"""A helper to provide mocked cm patching input function which was renamed in PY3"""
return patch.object(builtins, 'input', **kwargs)
|
1f70b4b3507f914c5546fa823d6de084b3be8870
| 700,752
|
def hash_1():
""" Test hash 1"""
return "73bef2ac39be261ae9a06076302c1d0af982e0560e88ac168980fab6ea5dd9c4"
|
e6aef2a19657e727778ac117ec75d75cf5c4944a
| 700,753
|
def _RGB2sRGB(RGB):
"""
Convert the 24-bits Adobe RGB color to the standard RGB color defined
in Web Content Accessibility Guidelines (WCAG) 2.0
see https://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef for more references
:param RGB: The input RGB color or colors shape== ... x 3 (R, G, and B channels)
:type RGB: numpy.ndarray
:return: converted sRGB colors
:rtype: numpy.ndarray
"""
sRGB = RGB / 255
return sRGB
|
c2c5c25d64cc7b0cb9b004846b72aecada7c5d85
| 700,755
|
def find_meeting_point(river_1, river_2):
"""
Find the first meeting point of two rivers
"""
meeting_points = []
while not meeting_points:
points_1 = []
points_2 = []
for _ in range(10000):
points_1.append(next(river_1))
points_2.append(next(river_2))
meeting_points = [i for i in points_1 if i in points_2]
return meeting_points[0]
|
22763dc4d88e7261eb3f67f910379a0791bf9fc4
| 700,756
|
import os
def get_dca_output_file_path(output_dir, msa_file_name, prefix='', postfix=''):
"""Locates the file path to which DCA ouput can be written.
Parameters
-----------
output_dir : str
DCA computation related output directory.
msa_file_name : str
Name of the alignment file. This is used to create output file by
adding relevant prefix and/or postfix to it.
prefix : str
A string that will be leading the base name of msa file during
output file creation.
postfix : str
A string that will be trailing the base name of msa file during
output file creation.
Returns
-------
output_file_path : str
relative path to the output file name
"""
msa_file_base_name = os.path.basename(msa_file_name)
msa_file_root, ext = os.path.splitext(msa_file_base_name)
output_file_base_name = prefix.strip() + msa_file_root.strip() + postfix.strip()
output_file_path = os.path.join(output_dir, output_file_base_name)
return output_file_path
|
0c2fce86749cc401c2bcf5673ff9a168e673579f
| 700,757
|
def method_authorizations(*scopes: str) -> dict:
"""
Return method security.
Contains only one OAuth2 security.
:param scopes: All scope names that should be available (as string).
"""
return {"security": [{"oauth2": scopes}]}
|
4bf2f93715b9798ef20119288178d69b3907e85e
| 700,758
|
def file_to_save(_x):
"""Definition of argument type for learner save data file,
a string with a pathname of a file."""
return _x
|
e67821fa7a273d41f5a69c0577b3e9ace0955e20
| 700,759
|
import requests
import json
def list_tags(image, cli=False):
"""
Return a list of tags of a given Docker Hub image.
Example:
In : list_tags('google/debian')
Out: ['jessie', 'wheezy']
In : list_tags('python')
Out: ['31', 'rawhide', '30', '29', 'latest' ...]
"""
if cli:
print("The image '{}' on Docker Hub got following tag(s):".format(image))
if image.find('/') == -1:
image = 'library/'+image
tags = []
page = 1
while True:
url = "https://registry.hub.docker.com/v2/repositories/{}/tags/?page={}".format(
image, page)
request = requests.get(url)
if request.status_code == 200:
result = json.loads(request.text)
for i in range(len(result["results"])):
if cli:
print(result["results"][i]["name"])
else:
tags.append(result["results"][i]["name"])
page += 1
else:
break
if cli == False:
return tags
|
aefa058e32b72911a0b97619edeee119914440b7
| 700,760
|
def get_slurm_script_gpu(output_dir, command):
"""Returns contents of SLURM script for a gpu job."""
return """#!/bin/bash
#SBATCH -N 1
#SBATCH --ntasks-per-node=1
#SBATCH --ntasks-per-socket=1
#SBATCH --gres=gpu:tesla_p100:1
#SBATCH --cpus-per-task=4
#SBATCH --mem=64000
#SBATCH --output={}/slurm_%j.out
#SBATCH -t 05:59:00
#module load anaconda3 cudatoolkit/10.0 cudnn/cuda-10.0/7.3.1
#source activate yumi
{}
""".format(
output_dir, command
)
|
3f9d587c2943cd821d000fab419d3591440c4d3d
| 700,761
|
import os
def github_token():
"""Return the Github token to use for real tests."""
if not 'GH_TOKEN' in os.environ:
raise RuntimeError('GH_TOKEN must be defined for this test')
return os.environ['GH_TOKEN']
|
55ee21f019601e0bb9ac7a890cfcf37fdc932ffd
| 700,762
|
import uuid
import six
def create_uuid3(namespace, name):
"""
Return new UUID based on a hash of a UUID namespace and a string.
:param namespace: The namespace
:param name: The string
:type namespace: uuid.UUID
:type name: six.text
:return:
:rtype: uuid.UUID
"""
return uuid.uuid3(namespace, six.ensure_str(name))
|
1a6898f80849a11f643a58798adc8a165e3b0e8d
| 700,763
|
def buildTypeTree(cls:type) -> dict:
"""
Return a tree of subclasses of a class
Arguments:
cls (type): Class from which to return descendants
Returns:
dict: Dict of all subclasses
Example:
buildTypeTree(MainClass) returns:
{
MainClass.SubClass1: {
MainClass.SubClass1.SubClass11: {},
MainClass.SubClass1.SubClass12: {}
},
MainClass.SubClass2: {}
}
"""
typeTree = {}
for subclass in cls.__subclasses__():
typeTree[subclass] = buildTypeTree(subclass)
return(typeTree)
|
7937df4d9643f20c3e5379f84ef36a28226707ba
| 700,764
|
import click
def _cb_key_val(ctx, param, value):
"""
Click callback to validate and convert `--opt key=val --opt key2=val2` to
`{'key': 'val', 'key2': 'val2'}`.
Returns
-------
dict
"""
output = {}
for pair in value:
if '=' not in pair:
raise click.BadParameter("incorrect syntax for KEY=VAL argument: `%s'" % pair)
else:
key, val = pair.split('=')
output[key] = val
return output
|
b74dea54c8e57e8c6ebbd743401f45ba57bff03b
| 700,765
|
import argparse
def percentage_float(x):
"""
Chech whether the float is a percentage.
:param x: The value to check
:return: The float reprentation of the argument.
:raise: argparse.ArgumentTypeError if the argument is not in [0, 100].
"""
x = float(x)
if x < 0 or x > 100:
raise argparse.ArgumentTypeError('{} not in range [0, 100]'.format(x))
return x
|
6a15c73d9469c066d94e22bf7af119ba68a20156
| 700,768
|
def isAVersionableResource(obj):
""" True if an object is versionable.
To qualify, the object must be persistent (have its own db record), and
must not have an true attribute named '__non_versionable__'."""
if getattr(obj, '__non_versionable__', 0):
return 0
return hasattr(obj, '_p_oid')
|
42dcd02b1f4e1c9f9ff555ec597d8011cfe64893
| 700,770
|
def fix_variable(problem, pivot, value):
"""
Return a new problem that is a copy of the one provided with
the pivot variable set to value
This function is used for branching, and prints the selection
made.
"""
new_problem = problem.copy()
new_problem['variables'] = problem['variables'].copy()
new_problem['variables'][pivot] = value
print(f'choosing: {pivot} {value}')
return new_problem
|
40e7d358eff405c481aedd9d2b1505664fcd4d6e
| 700,771
|
def test_progress(arg1, arg2, kwd1, kwd2, progress):
"""Simple test target for submit_progress."""
return arg1, arg2, kwd1, kwd2
|
1d761c572b15c41e1a04aafbb53ca825792df8fe
| 700,772
|
def transform_dict_to_kv_list(options):
"""
{"key": None, "key2": None} becomes 'key, key2'
{"key": "\"\"", "key2": "3.5in", tocbibind: None} becomes 'key="", key2=3.5in, tocbibind'
"""
assert isinstance(options, dict)
return ", ".join(["{}={}".format(k,v) if v is not None else k for k,v in options.items()])
|
571b3d5724b7aa0ff53698349a3255b14517bd78
| 700,773
|
import numpy
def Mat(m,n):
"""
Build an m x n matrix (using numpy)
For example:
>>> Mat(2,3)
array([[ 0., 0., 0.],
[ 0., 0., 0.]])
"""
return numpy.zeros((m,n),'d')
|
e36164cd29ed0070a01b87ca32e6f130271dbb95
| 700,774
|
def _get_shape_name(array_name, shape_name = None):
"""Either get shape name or create from array_name."""
return shape_name if shape_name else f'{array_name}/shape'
|
fe0065faa3e917bb6faef5189ec7ea85ed152c99
| 700,775
|
def getMinUnvisited(unvisited, dist):
"""
return the minimum distance vertex from
the set of vertices not yet processed.
Parameters:
unvisited (set): the set containing all the vertex not yet processed
dist (dict): a dictionary with vertex as key and the total distance from the source as value
"""
aux = {key: dist[key] for key in unvisited}
minimum = min(aux.values())
for key in unvisited:
if dist[key] == minimum:
return key
|
5ccd7ab9e7e7b70c9aedecb56049332ae1f7b530
| 700,776
|
def _alloc_key(name):
"""Constructs allocation key based on app name/pattern."""
if '@' in name:
key = name[name.find('@') + 1:name.find('.')]
else:
key = name[0:name.find('.')]
return key
|
ca3182f52d780f94a6a18c51ad0b7d841ead20d1
| 700,778
|
def _derivative(f, a, method='central', h=0.01):
"""
Compute the difference formula for f'(a) with step size h.
copied from:
https://personal.math.ubc.ca/~pwalls/math-python/differentiation/differentiation/
Parameters
----------
f : function
Vectorized function of one variable
a : number
Compute derivative at x = a
method : string
Difference formula: 'forward', 'backward' or 'central'
h : number
Step size in difference formula
Returns
-------
float
Difference formula:
central: f(a+h) - f(a-h))/2h
forward: f(a+h) - f(a))/h
backward: f(a) - f(a-h))/h
"""
if method == 'central':
return (f(a + h) - f(a - h)) / (2 * h)
elif method == 'forward':
return (f(a + h) - f(a)) / h
elif method == 'backward':
return (f(a) - f(a - h)) / h
else:
raise ValueError("Method must be 'central', 'forward' or 'backward'.")
|
de02aaf132922c0be8aeb84bea5af5d09e850b9d
| 700,780
|
import argparse
def parseCmd():
"""Parse command line arguments
Returns:
dictionary: Dictionary with arguments
"""
parser = argparse.ArgumentParser(description='Renames the transcripts and genes of a GTF file.')
parser.add_argument('--gtf', type=str, required=True,
help='Path to a gene prediciton file in GTF format, for example the output of TSEBRA.')
parser.add_argument('--prefix', type=str,
help='The string is added as a prefix to all transcript and gene IDs.')
parser.add_argument('--translation_tab', type=str,
help='Writes the translation table for old transcript IDs to new transcript IDs to the given file path.')
parser.add_argument('--out', type=str, required=True,
help='Path to the output file.')
return parser.parse_args()
|
b9bb712686108db5051c0ac46ba94646a296738d
| 700,781
|
def insert_in_bst(root, node):
"""
Insert node in the binary search tree
:param root: root node of the binary search tree
:type root: TreeNode
:param node: node to insert
:type node: TreeNode
:return: root node
:rtype: TreeNode
"""
if root is None:
root = node
else:
if root.val < node.val:
if root.right is None:
root.right = node
else:
insert_in_bst(root.right, node)
else:
if root.left is None:
root.left = node
else:
insert_in_bst(root.left, node)
return root
|
3c737d71c5793e7baa51d1ad75b6cc056abbda82
| 700,782
|
def format_bytes(b):
"""Format bytes as human-readable text."""
kb = 1024
mb = kb*1024
gb = mb*1024
if b < kb:
return '%s b' % b
elif b < mb:
return '{0:.2f} kb'.format(float(b) / kb)
elif b < gb:
return '{0:.2f} mb'.format(float(b) / mb)
else:
return '{0:.2f} gb'.format(float(b) / gb)
|
4c41105449a8a07e3aca932d9ab3326176f6f1f6
| 700,783
|
def freeze_layer(layer):
"""
Freeze a layer, so its weights won't be updated during training
:param layer:
:return:
"""
for param in layer.params:
layer.params[param].discard('trainable')
return layer
|
2edcd24743fc62bf5e8a7cef9ce871e691d38d85
| 700,784
|
def parse_cached(cached_credentials):
"""Parse existing csv file
Requires first 4 columns to be TeamNum, Password, Code, Color [in hex]
"""
teamnums = []
passwords = []
codes = []
with open(cached_credentials) as f:
f.readline()
for line in f:
tokens = line[:-1].split(',')
teamnums.append(int(tokens[0]))
passwords.append(tokens[1])
codes.append(tokens[2])
return (teamnums, passwords, codes)
|
95b9802011f22a66aa688947f059b29592de7487
| 700,785
|
def include_original(dec):
""" Meta decorator, which make the original function callable
(via f._original() )
"""
def meta_decorator(f):
decorated = dec(f)
decorated._original = f
return decorated
return meta_decorator
|
ba1bd643c192c1fc11df668ee4744202e1e3fd3a
| 700,786
|
def splice_before(base, search, splice, post_splice="_"):
"""Splice in a string before a given substring.
Args:
base: String in which to splice.
search: Splice before this substring.
splice: Splice in this string; falls back to a "." if not found.
post_splice: String to add after the spliced string if found. If
only a "." is found, ``post_splice`` will be added before
``splice`` instead. Defaults to "_".
Returns:
``base`` with ``splice`` spliced in before ``search`` if found,
separated by ``post_splice``, falling back to splicing before
the first "." with ``post_splice`` placed in front of ``splice``
instead. If neither ``search`` nor ``.`` are found, simply
returns ``base``.
"""
i = base.rfind(search)
if i == -1:
# fallback to splicing before extension
i = base.rfind(".")
if i == -1:
return base
else:
# turn post-splice into pre-splice delimiter, assuming that the
# absence of search string means delimiter is not before the ext
splice = post_splice + splice
post_splice = ""
return base[0:i] + splice + post_splice + base[i:]
|
f8f5bf3c2355c38d16157836863e501cbc846d40
| 700,787
|
import csv
def average_trip_length_with_protion_of_trips_longer_than_30_min(filename):
"""
This function reads in a file with trip data and reports the number of
trips made by subscribers, customers, and total overall.
"""
with open(filename, 'r') as f_in:
# set up csv reader object
reader = csv.DictReader(f_in)
# initialize count variables
n_total_trips = 0
n_total_length = 0
n_trips_longer_than_30_min = 0
# tally up ride types
for row in reader:
duration = float(row['duration'])
n_total_trips+=1
n_total_length+=duration
if(duration>30):
n_trips_longer_than_30_min+=1
# return tallies as a tuple
return(n_total_length/n_total_trips, n_trips_longer_than_30_min/n_total_trips)
|
eda083cde82da0f8db8f2a4716674496e81a2c74
| 700,788
|
def text_from_doc_list(doc_list):
"""
extract a text from list of html elements.
"""
return [doc.text for doc in doc_list]
|
09370e07fc34c481091a5d34683e3668da12f5a4
| 700,789
|
def adder():
"""Adds all the numbers the user gives until "done" is typed."""
print("Type a number or 'done' when finished")
result = 0
while True:
try:
val = input("Please give input: ")
if val == "done":
break
else:
result += float(val)
except ValueError:
print("Wrong input type")
return result
|
ce68159b6f071b47b9876ce3a6a7ec18ddd0d5fb
| 700,790
|
def sort_chans(chans):
"""
A utility function to sort channel codes into Z, N, E or Z, 1, 2
order.
"""
sorted_chans = []
for chan in chans:
if chan[2] == "Z":
sorted_chans += [chan]
for chan in chans:
if chan[2] == "N" or chan[2] == "1":
sorted_chans += [chan]
for chan in chans:
if chan[2] == "E" or chan[2] == "2":
sorted_chans += [chan]
return sorted_chans
|
22c37441698b7dd89daf508b055922d9781003f5
| 700,791
|
def normalize(df_origin):
"""Fill missing values, drop unneeded columns and convert columns to appropriate dtypes"""
df = df_origin.copy()
drop_columns = ["name", "owner", "repository"]
for c in drop_columns:
if c in df.columns:
df.drop(c, axis=1, inplace=True)
for c in df.columns:
if df[c].dtype == 'O':
if c in ['isOwnerHomepage', 'hasHomepage', 'hasLicense', 'hasTravisConfig', 'hasCircleConfig', 'hasCiConfig']:
df[c] = (df[c] == 'True').astype(int)
else:
df[c].fillna('', inplace=True)
else:
df[c].fillna(0, inplace=True)
df[c] = df[c].astype(int)
return df
|
22af67e135d714297ffc2ea5c1fab8616be2feb3
| 700,792
|
def check_file_isvid(filename):
"""
checks if a file has a video extension, accepted files are: '.mp4', '.mpg', '.avi'
:param filename: (str) name of the file
:return: (bool)
"""
list_extensions = ['.mpg', '.MPG', '.mp4', '.MP4', '.AVI', '.avi']
if filename[-4:] in list_extensions:
return True
else:
return False
|
5762f9020bce682b7eda948a92a41e85dedfe5c2
| 700,793
|
import sys
import io
def open(filename, mode="r"):
"""
Open a file for CSV mode in a Python 2 and 3 compatible way.
mode must be one of "r" for reading or "w" for writing.
"""
if sys.version_info[0] < 3:
return io.open(filename, mode + "b")
return io.open(filename, mode, encoding="utf-8", newline="")
|
4f32da4da7d0645e861c26d7ab9f2a87e2778f87
| 700,794
|
import os.path
def _extract_filename(upload_filename):
"""
Extract filename from fully qualified path to use if no filename provided
"""
return os.path.basename(upload_filename)
|
85f2b1895a246fbf1bdd3c10de56b6c479b88330
| 700,795
|
def get_attr_lookup(lines, attr_name):
"""
:arg lines: a list of :class:`TextLine` instances
:arg attr_name: A string, e.g. ``"y0"``, an attribute
of :class:`TextLine`
:returns: A dictionary of strings mapping values of
the given attribute to lists of :class:`TextLine`
sharing that attribute.
This function can be used to identify lines of text or
rows in a table. Note that it relies on *exactly* matching
coordinates.
"""
result = {}
for l in lines:
result.setdefault(getattr(l, attr_name), []).append(l)
return result
|
3f87431edeb11e9edfe824bf58aeda93ad82d8ae
| 700,797
|
def encode_function_data(initializer=None, *args):
"""Encodes the function call so we can work with an initializer.
Args:
initializer ([brownie.network.contract.ContractTx], optional):
The initializer function we want to call.
args (Any, optional):
The arguments to pass to the initializer function
Returns:
[bytes]: Return the encoded bytes.
"""
if not len(args):
args = b""
if initializer:
return initializer.encode_input(*args)
return b""
|
303c297d8ea2b62d3ecb6ccc1e208fc54dd84e49
| 700,798
|
def repeat_img_per_cap(imgsfeats, imgsfc7, ncap_per_img):
"""Repeat image features ncap_per_img times"""
batchsize, featdim, feat_h, feat_w = imgsfeats.size()
batchsize_cap = batchsize*ncap_per_img
imgsfeats = imgsfeats.unsqueeze(1).expand(\
batchsize, ncap_per_img, featdim, feat_h, feat_w)
imgsfeats = imgsfeats.contiguous().view(\
batchsize_cap, featdim, feat_h, feat_w)
batchsize, featdim = imgsfc7.size()
batchsize_cap = batchsize*ncap_per_img
imgsfc7 = imgsfc7.unsqueeze(1).expand(\
batchsize, ncap_per_img, featdim)
imgsfc7 = imgsfc7.contiguous().view(\
batchsize_cap, featdim)
return imgsfeats, imgsfc7
|
da3b0d51fe8a8511ecbafed865ab571ac6d267a3
| 700,799
|
def subtract(x, y):
"""
Subtracts two number
:param x: minuend
:param y: subtrahend
:return: difference between two numbers
"""
return x - y
|
cb9165d72a3aa0ec7b82f0ad89e53bcf8beffa3a
| 700,800
|
from typing import Dict
def file_name_convention() -> Dict:
"""
This function returns the file name taxonomy which is used by ImageAutoOutput and Dataset class
"""
file_name_convention = {"CT": "image",
"MR": "image",
"RTDOSE_CT": "dose",
"RTSTRUCT_CT": "mask_ct.seg",
"RTSTRUCT_MR": "mask_mr.seg",
"RTSTRUCT_PT": "mask_pt.seg",
"PT_CT": "pet",
"PT": "pet",
"RTDOSE": "dose",
"RTSTRUCT": "mask.seg"}
return file_name_convention
|
f3c56306c8dd0f3c228064e8a72bef51b70a4d93
| 700,801
|
import os
def ensure_dir(directory):
"""Make sure the directory exists. If not, create it."""
if not os.path.exists(directory):
os.makedirs(directory)
return directory
|
b02c55d000eb024dbe8e54a2648bf3e8200c136f
| 700,803
|
def mcb(l, bit, mlb, tiebreaker = "1"):
"""
l = list of bits, e.g. ["00100", "11110", "10110"]
bit = index of the bit to consider, integer
mlb = most ("1") or least ("0") bit
tiebreaker = if there's an even split, default to this value.
returns the most common occurrencs, subject to tiebreakers,
"0" or "1" as a string
"""
s = 0
exact_split = len(l) / 2
for i in l:
if i[bit] == "1":
s += 1
if s == exact_split:
return tiebreaker
elif s > exact_split:
return mlb
else:
return str(1 - int(mlb))
|
9256db43f5564ac62f8f83a27332a408a496fc3e
| 700,804
|
import re
def convert_dict(txtfile):
""" convert this to dictonary, where key = CISC and val = ADIE"""
with open(txtfile) as f:
rename = {}
for line in f:
(key, val) = line.split()
# Remove any non-digit i.e. 'CISC'
rename[str(re.sub("[^0-9]", "", key))] = str(val)
return rename
|
29f61550fe59f1075e1a822916aae7b54c06c18a
| 700,805
|
import logging
def get_query_for_oracle_load_full(table_name, columns, owner):
"""
JDBC query for full ingestion of one table
"""
logging.info(f"BUILDING FULL QUERY for {table_name}")
select_cols = ",".join(str(x) for x in columns)
return f"select {select_cols} from {owner}.{table_name}"
|
e91497cae2cf5804c89b063e77943694397a2d62
| 700,806
|
def balance(text, bal=0):
"""
Checks whether the parens in the text are balanced:
- zero: balanced
- negative: too many right parens
- positive: too many left parens
"""
if text == '':
return bal
elif text[0] == '(' and bal >= 0:
return balance(text[1:], bal + 1)
elif text[0] == ')':
return balance(text[1:], bal - 1)
else:
return balance(text[1:], bal)
|
e3e523d2c0bab114c3ac243b9fabc934ce92a694
| 700,807
|
def _inches_to_meters(length):
"""Convert length from inches to meters"""
return length * 2.54 / 100.0
|
fccd2937b87c7b1c7eba793b66b4b8573de1e472
| 700,808
|
def area(box):
"""Calculates area of a given bounding box."""
return float((box[1][0]-box[0][0]) * (box[1][1] - box[0][1]))
|
3c2fac0d92c8b9cc05dff3cba59d2a83670392e0
| 700,809
|
import os
import platform
def validate_can_run():
"""
Returns False if CI_PROJECT_DIR exists and platform is Windows, and
True otherwise.
"""
if 'CI_PROJECT_DIR' in os.environ and platform.system() == 'Windows':
return True
return True
|
e7eb6f7aef4538291c7031d8863a71b51538cbe0
| 700,810
|
def sentence_to_token_ids(sentence, word2id):
""" Gets token id's of each word in the sentence and returns a list of
those words. Is called by data_to_token_ids and the Lexer.
Args:
sentence: A list of word tokens.
word2id: A dictionary that maps words to its given id. This can
be for the input or target vocabulary.
"""
tokenized_sentence = []
for word in sentence:
tokenized_sentence.append(str(word2id[word]))
return tokenized_sentence
|
e5ce8510574a40ac761beae2596d7e04873b9b1e
| 700,811
|
import os
def Generate_Raven_Timeseries_rvt_String(
outFolderraven, outObsfileFolder, obsnm, Model_Name
): # Modify_template_rvt(outFolderraven,outObsfileFolder,obsnm):
"""Generate a string in Raven time series rvt input file format
Function that used to modify raven model timeseries rvt file (Model_Name.rvt)
Add ":RedirectToFile ./obs/guagename_subbasinid.rvt"
for each gauge in the end of model rvt file (Model_Name.rvt)
Parameters
----------
outFolderraven : String
Path and name of the output folder of Raven input files
outObsfileFolder : String
Path and name of the output folder to save obervation rvt file
of each gauge
obsnm : data-type
Dataframe of observation gauge information for this gauge including
at least following two columns
'Obs_NM': the name of the stream flow obsrvation gauge
'SubId' : the subbasin Id of this stremflow gauge located at.
Model_Name : string
The Raven model base name. File name of the raven input will be
Model_Name.xxx.
Notes
------
None
See Also
--------
DownloadStreamflowdata_US : Generate flowdata inputs
needed by this function
DownloadStreamflowdata_CA : Generate flowdata inputs
needed by this function
Returns
-------
output_string : string
It is the string that contains the content that will be used to
modify the raven time series rvt input file of this gauge
Examples
--------
>>> from WriteRavenInputs import Generate_Raven_Timeseries_rvt_String
>>> outFolderraven = 'c:/path_to_the_raven_input_folder/'
>>> outObsfileFolder = 'c:/path_to_the_raven_streamflow_observation gauge_folder/'
>>> Subbasin_ID = 1
>>> Station_NM = '05127000'
>>> obsnms = pd.DataFrame(data=[Subbasin_ID,Station_NM],columns=['SubId','Obs_NM'])
>>> Model_Name = 'test'
>>> output_string = Generate_Raven_Timeseries_rvt_String(outFolderraven,outObsfileFolder,obsnm,Model_Name)
"""
toobsrvtfile = os.path.join(outFolderraven, Model_Name + ".rvt")
obsflodername = "./" + os.path.split(outObsfileFolder)[1] + "/"
output_string = (
" \n"
+ ":RedirectToFile "
+ obsflodername
+ obsnm["Obs_NM"]
+ "_"
+ str(obsnm["SubId"])
+ ".rvt"
+ " \n"
)
return output_string
|
a200bc7e350278b78018cf014ea948b98d56b0f5
| 700,812
|
def insert_clause(table_name, keys):
""" Create a insert clause string for SQL.
Args:
table_name: The table where the insertion will happen.
keys: An iterator with strings specifying the fields to change.
Returns:
The query as a string
"""
fields = list(keys)
fields_str = ', '.join(fields)
values_str = ', '.join(['?']*len(fields))
query = 'INSERT INTO {} ({}) VALUES ({})'.format(table_name, fields_str, values_str)
return query
|
7c57bff8dec2242ed2ba1c7efa7543296ce1242f
| 700,813
|
def strip_non_alpa(text):
"""
Strip string from non alpha caracters
"""
letters = []
for let in list(text):
if let.isalpha():
letters.append(let)
return letters
|
131e48d7782a5949855c0391669da4413c293801
| 700,814
|
def implicit_euler(xs, h, y0, f, **derivatives):
"""Implicit Euler"""
ys = [y0]
for k in range(len(xs) - 1):
subsidiary_y = ys[k] + f(xs[k], ys[k]) * h
next_y = ys[k] + f(xs[k + 1], subsidiary_y) * h
ys.append(next_y)
return ys
|
3700ceef618dfb8485486f2b9c5695af1483ffd2
| 700,815
|
def choose_pivot_first(_: list[int], left: int, __: int) -> int:
"""Choose first element as pivot"""
return left
|
931c7a182feda076213ec85f2d7909e7ff2e87cb
| 700,816
|
def retag_from_strings(string_tag) :
"""
Returns only the final node tag
"""
valure = string_tag.rfind('+')
if valure!= -1 :
tag_recal =string_tag [valure+1:]
else :
tag_recal = string_tag
return tag_recal
|
5bef884498efb19eb354bb6119450c9c25a19e1c
| 700,817
|
import subprocess
def issue_shell_command(cmd: str, my_env=None):
"""
Issues a command in a shell and returns the result as str.
Parameters:
cmd - command to be issued (str)
In python3.x, stdout,stderr are both b'' (byte string literal: bytes object)
and must be decoded to UTF-8 for string concatenation etc
Example usage (simple):
>> issue_shell_command(cmd="ls")
Example usage (more involved):
>> s3dir = "s3://..."; issue_shell_command("aws s3 ls --recursive {}".format(s3dir))
"""
pipe = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=my_env)
return pipe.stdout.strip().decode('UTF-8') + '\n' + pipe.stderr.strip().decode('UTF-8')
|
85ba6e15da3abd17b7b54a0920c373de6d7938e6
| 700,818
|
import pathlib
def get_summit_config_path(config_dir_name=".summit"):
"""Returns the path to the summit config directory"""
home = pathlib.Path.home()
return home / config_dir_name
|
af89240c29b440d52e41676c07cd97fa642d288d
| 700,819
|
def merge_coco_results(existing_coco_results, new_coco_results, image_id_offset):
""" Merges the two given coco result dicts into one.
:param existing_coco_results: A dict describing the first coco results.
:param new_coco_results: A dict describing the second coco results.
:return: A dict containing the merged coco results.
"""
for res in new_coco_results:
res['image_id'] += image_id_offset
existing_coco_results += new_coco_results
return existing_coco_results
|
78b8efe19b3f540b6b0943cacc7207a746232faf
| 700,820
|
def process_regex(regex):
""" This function parse a regex string
into a dictionary of fields and regexes
Format: <field1> -> <regex1>
<field2 -> <regex2> etc."""
res_dict = {}
lines = regex.split("\n")
for l in lines:
tok = l.split("->")
if len(tok) != 2:
continue
field = tok[0].strip(" \t\n\r")
rg = tok[1].strip(" \t\n\r")
res_dict[field] = rg
return res_dict
|
ad8fb6cc1d2713de53442ce9c9defbe2a45da0a5
| 700,821
|
def get_price_including_tax(soup):
""" Analyze "soup" to extract price with tax.
Args:
soup -- bs4.BeautifulSoup from http request of book url.
Return:
price with tax
"""
table = soup.table
cell = table.find_all("td")
price_including_tax = cell[3].text
return price_including_tax
|
e59672a873377ee42348573a8ea54409771a844d
| 700,822
|
import os
def is_dev_environment() -> bool:
"""Returns True if the project source code structure is found in the working directory"""
return os.path.isdir("caos") and \
os.path.isdir("caos/_cli_commands") and \
os.path.isfile("caos/_cli.py") and \
os.path.isdir("docs") and\
os.path.isdir("tests") and\
os.path.isfile("LICENSE") and\
os.path.isfile("caos.py")
|
65d2cf1ef70215fd268ab9fa9aec2cc15f141d51
| 700,823
|
import logging
def create_table(dynamodb, table_name, partition_key, sort_key={}, rcu=15, wcu=5):
"""
Purpose:
Create an DynamoDB Table by name
Args:
dynamodb (DynamoDB Resource Object): DynamoDB Object owning the Table
table_name (String): Name of table to return
partition_key (Dict): Dict with name and type of the partition key
e.g. {"name": "name_of_partition_key", "type": "S"}
sort_key (Dict): Dict with name and type of the sort key
e.g. {"name": "name_of_sort_key", "type": "S"}
rcu (Int): Read Capacity Units for the table. Defaults to 15
wcu (Int): Write Capacity Units for the table. Defaults to 5
Return:
table (DynamoDB Table Object): Created Table Object
"""
logging.info(f"Creating Table {table_name} with RCU={rcu} and WCU={wcu}")
key_schema = []
attribute_definitions = []
key_schema.append({"AttributeName": partition_key["name"], "KeyType": "HASH"})
attribute_definitions.append(
{"AttributeName": partition_key["name"], "AttributeType": partition_key["type"]}
)
if sort_key:
key_schema.append({"AttributeName": sort_key["name"], "KeyType": "RANGE"})
attribute_definitions.append(
{"AttributeName": sort_key["name"], "AttributeType": sort_key["type"]}
)
logging.info(f"Key Schema: {key_schema}")
logging.info(f"Attribute Definitions: {attribute_definitions}")
try:
table = dynamodb.create_table(
TableName=table_name,
KeySchema=key_schema,
AttributeDefinitions=attribute_definitions,
ProvisionedThroughput={"ReadCapacityUnits": rcu, "WriteCapacityUnits": wcu},
)
except Exception as err:
logging.exception(f"Exception Creating Table: {err}")
raise
return table
|
192a6a0d643d6bf6604d91517bc37a76cf61a9bd
| 700,824
|
def is_rm_textfile(filename):
"""Returns True if the given filename is a known remarkable-specific textfile."""
if filename.endswith('.json'):
return True
if filename.endswith('.content'):
return True
if filename.endswith('.pagedata'):
return True
if filename.endswith('.bookm'):
return True
return False
|
fd2d05fb1900d432c63d9b2bad0b802e5e00c601
| 700,825
|
import os
def delete_files(root, files, origin, target):
"""
Here we invert the origin and target on the file logic so we can reverse the sync and delete the extra files
"""
files_scanned = 0
files_unchanged = 0
files_updated = 0
files_deleted = 0
files_skipped = []
files_not_found = []
current_path = root.replace(target, '').lstrip(os.sep)
for file in files:
target_path = os.path.join(root, file)
origin_folder = os.path.join(origin, current_path)
origin_path = os.path.join(origin_folder, file)
files_scanned += 1
# We delete the file if it doesn't exists on the origin
if os.path.exists(origin_path):
# Do nothing
pass
else:
# print(f'File marked for deletion: {target_path}')
try:
os.remove(target_path)
files_deleted += 1
except PermissionError:
files_skipped.append(target_path)
except FileNotFoundError:
files_not_found.append(target_path)
except OSError:
files_skipped.append(target_path)
# end if
# end for
return [files_scanned, files_unchanged, files_updated, files_deleted, files_skipped, files_not_found]
|
f30ff5496f8e7833e1e2a3bea3753ed8a2533ae1
| 700,826
|
def calc_grad_norms(model):
"""Computes a gradient clipping coefficient based on gradient norm."""
norms = []
for p in model.parameters():
if p.grad is None:
continue
modulenorm = p.grad.data.norm()
norms += [modulenorm]
return norms
|
6a35dafa04182716c0de1ffd5edc969118c8f078
| 700,827
|
from typing import List
from typing import Any
from typing import Dict
def create_filter_dict(filter_type: str, filter_by: str, filter_value: List[Any], operator: str) -> Dict[str, Any]:
"""Creates a dictionary with the filter for the list-incidents request.
:param filter_type: The filter type.
:param filter_by: The field name to filter by.
:param filter_value: The filter value.
:param operator: The operator to use for the filter.
"""
return {"filterType": filter_type, "operandOne": {"name": filter_by},
"operandTwoValues": filter_value, "operator": operator}
|
31c1a86c9fdfb193669e99cb1425bea9c89bf367
| 700,828
|
def get_resource_name(prefix, project_name):
"""Get a name that can be used for GCE resources."""
# https://cloud.google.com/compute/docs/reference/latest/instanceGroupManagers
max_name_length = 58
project_name = project_name.lower().replace('_', '-')
name = prefix + '-' + project_name
return name[:max_name_length]
|
7779f71e00063b32566f05d4cb0d8daef81043c0
| 700,829
|
def find_beat(v):
"""
find the beat of a vector format midi using the autocorrelation function
"""
# binary vector for testing autocorrelation
v2 = [0 if x[0] == -1 else 1 for x in v]
result = []
# no need to check more than 24*4 = 96
# i.e. 4 quarter notes of standard midi
for lag in range(96):
s = 0
for i in range(len(v2)):
if v2[i] > 0 and v2[(i + lag) % len(v2)] > 0:
s += 1
result.append((lag, s))
k = 1
srt = sorted(result, key=lambda x: x[1])
while srt[-k][0] == 0:
k += 1
return srt[-k][0]
|
b408afced09779eb69b40ae54d1fd2c2cfcf1906
| 700,830
|
def read_line_from_file(file):
"""Reads one line from a file and returns it as string."""
with open(file, "r") as f:
result = f.readline()
return result
|
3f3da3642e7931469e853c977aef67cd1024bbe5
| 700,831
|
def round_float(value, precision=1):
"""
Returns the float as a string, rounded to the specified precision and
with trailing zeroes (and . if no decimals) removed.
"""
return str(round(value, precision)).rstrip("0").rstrip(".")
|
afa167709c73b2c536a795c0e38975e212311210
| 700,832
|
def test_game(game,team):
"""Broken out so we can test for all kinds of variations once we build the variation list."""
return (team.lower() in [game["competitions"][0]["competitors"][0]["team"]["location"].lower(),
game["competitions"][0]["competitors"][1]["team"]["location"].lower(),
game["competitions"][0]["competitors"][0]["team"]["displayName"].lower(),
game["competitions"][0]["competitors"][1]["team"]["displayName"].lower(),
game["competitions"][0]["competitors"][0]["team"]["abbreviation"].lower(),
game["competitions"][0]["competitors"][1]["team"]["abbreviation"].lower()])
|
7e35d434415ba5cb94fd02f119bd59f5f98898fb
| 700,834
|
def appium_bytes(value, encoding):
"""
Return a bytes-like object. Has _appium_ prefix to avoid overriding built-in bytes.
:param value: A value to convert
:type value: string
:param encoding: A encoding which will convert to
:type encoding: string
:return: A bytes-like object
:rtype: string
"""
try:
return bytes(value, encoding) # Python 3
except TypeError:
return value
|
b7acc045584ffad834bb24f30fe930736f55d699
| 700,835
|
from typing import List
import csv
def read_sts_inputs(path: str) -> List[str]:
"""Read input texts from a tsv file, formatted like the official STS benchmark"""
inputs = []
with open(path, 'r', encoding='utf8') as fh:
reader = csv.reader(fh, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
try:
sent_a, sent_b = row[5], row[6]
inputs.append(sent_a)
inputs.append(sent_b)
except IndexError:
print(f"Cannot parse line {row}")
print(f"Done loading {len(inputs)} inputs from file '{path}'")
return inputs
|
6af4e934d648b550298e71584eaf47e4267316ac
| 700,836
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.