content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def parse_threshold_list(parselist, current):
"""Parse where the value lands on the require threshold"""
for index in range(len(parselist)):
if current <= parselist[index]:
return parselist[index]
return 100 | b0fb9bcb8f4de459d87afe0c0330a053086e91a1 | 43,269 |
import subprocess
def pdflatex(latex, directory, name) -> str:
"""
This function generates a .pdf file at the target location. It uses pdflatex to compile the given latex code.
:param expr: A single latex formular string without $$ or an equation environment.
:param directory: The directory path in which the .pdf file should be saved in.
:param name: An unique identifier for the generated file.
:return: Returns the full path of the generated .pdf file.
"""
file = directory + '/' + name + '.tex'
with open(file, 'w') as f:
f.write(latex)
cmd = ['pdflatex',
'-interaction=batchmode',
'-interaction=nonstopmode',
file]
subprocess.run(cmd, cwd=directory, stdout=subprocess.DEVNULL) # errors are critical
return file[:-3] + 'pdf' | 17eff56123e604b0fe2ee23a8bfeb92ae6819bf7 | 43,270 |
import torch
def probs_as_images(probs, W=64):
"""
Inputs:
probs: (..., D)
Outputs:
vid : (..., C, H, W), C = 3, W = 32
"""
orig_shape = list(probs.shape)
D = orig_shape[-1]
H = D // W
assert D % W == 0
probs = (probs * 255).to(torch.uint8)
C = 3
probs = probs.view(-1, 1, H, W).expand(-1, C, -1, -1).contiguous()
probs = probs.view(*orig_shape[:-1], C, H, W)
return probs | c45b594c684c5c10316f34c26b1be0f65fc2b90b | 43,272 |
def model_name(model):
"""Grab the name of the model"""
return '%s.%s' % (model._meta.app_label, model._meta.object_name) | b4b877f0455f860e0ba3c045f6527594a4641231 | 43,273 |
def parent_pkg(s):
"""Get the parent package
>>> parent_pkg('a.b')
'a'
>>> parent_pkg('a.b.c.d')
'a.b.c'
>>> parent_pkg('a')
''
:param s:
:return:
"""
return '.'.join(s.split('.')[:-1]) | b21c32f02a54566137cca4d7538fd3161fd57ee8 | 43,275 |
import importlib
def _load_driver(backend, **kargs):
"""Load the correct backend driver for data persistent."""
bk_module = importlib.import_module('backend', __package__)
driver_cls = getattr(bk_module, str.capitalize(backend) + 'Backend')
return driver_cls(**kargs) | 79066605cf32c99fa8d9b583d333c1b19d6b4a6d | 43,276 |
import sqlite3
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema | f50ec1eeb237a3c7be6eb34ffc8e197fed333811 | 43,278 |
def _adjust_ix(i, n):
"""Internal helper function"""
if i >= n:
return i+1
else:
return i | 6ba67fc2bd65156395feffe5fa2b6e881a621efd | 43,279 |
def D4(d2, d3):
"""Factor Variables D4."""
return 1 + 3 * d3 / d2 | 96f4e6d293b631511c4c8f0ff3a13378a15ff077 | 43,280 |
def get_labels_for_ids(labels, ids, ids_are_one_indexed=False):
"""Get the human-readable labels for given ids.
Args:
labels: dict, string-ID to label mapping from ImageNet.
ids: list of ints, IDs to return labels for.
ids_are_one_indexed: whether to increment passed IDs by 1 to account for
the background category. See ArgParser `--ids_are_one_indexed`
for details.
Returns:
list of category labels
"""
return [labels[str(x + int(ids_are_one_indexed))] for x in ids] | bc39fe8e7ccaac9ba2abc8a5a2e2fa0a779c82bf | 43,282 |
def exec_kwargs(kwargs: dict) -> dict:
"""Calls any value of kwargs that represents a callable and updates
value to the callable's return value"""
for kw, val in kwargs.items():
if callable(val):
kwargs[kw] = val()
return kwargs | db2c76b0c9376a224880402fd437d0c608ddee1f | 43,283 |
def boolstr(value, true="true", false="false"):
"""
Convert a boolean value into a string. This function is
intended to be used from within file templates to provide
an easy way to take boolean values stored in Pillars or
Grains, and write them out in the apprpriate syntax for
a particular file template.
:param value: The boolean value to be converted
:param true: The value to return if ``value`` is ``True``
:param false: The value to return if ``value`` is ``False``
In this example, a pillar named ``smtp:encrypted`` stores a boolean
value, but the template that uses that value needs ``yes`` or ``no``
to be written, based on the boolean value.
*Note: this is written on two lines for clarity. The same result
could be achieved in one line.*
.. code-block:: jinja
{% set encrypted = salt[pillar.get]('smtp:encrypted', false) %}
use_tls: {{ salt['slsutil.boolstr'](encrypted, 'yes', 'no') }}
Result (assuming the value is ``True``):
.. code-block:: none
use_tls: yes
"""
if value:
return true
return false | 434ccea4c261740e9e9b3461725dac8e40fba1d3 | 43,285 |
def _resolve_frameworks(lookup_dict, data):
"""
Resolves any framework related includes
"""
if "frameworks" in lookup_dict:
# cool, we got some frameworks in our lookup section
# add them to the main data
fw = lookup_dict["frameworks"]
if "frameworks" not in data:
data["frameworks"] = {}
if data["frameworks"] is None:
data["frameworks"] = {}
data["frameworks"].update(fw)
return data | 89a745fc19b8ddc32717f5a47e1b5d472bc1d36b | 43,286 |
import os
def get_json_files(dir, relative=False):
"""Gets all .json files from given directory
Returns list of files' absolute paths"""
jsons = []
for file in os.listdir(dir):
if file.endswith(".json"):
if not relative:
jsons.append(os.path.join(dir, file))
else:
jsons.append(file)
return jsons | 1e827db62fa52523f258c336fe95bd2fd506ca13 | 43,287 |
def formate_dataset(dframe):
"""
Formate le dataset
:param dframe: dataframe à formater
:return: dataframe formaté
"""
dframe['text'] = dframe['text'] + " " + dframe['title']
del dframe['title']
del dframe['subject']
del dframe['date']
return dframe | 8673e51f82748339c00bf3e22a0cc8e841dfbc98 | 43,288 |
def match_gender(text):
"""
:param text: string
:return: string
"""
text = text.lower()
if 'female' in text or '女' in text:
return 'female'
if 'male' in text or '男' in text:
return 'male'
return '' | df85c137d9f8ec69cce958abc5e7ab8996ee7de4 | 43,290 |
def div(a, b):
"""Divide a by b."""
return a / b | 0a951296d520391c765f5867eb9d5000b4614aea | 43,291 |
def ssl_check():
"""
Slack will periodically send GET requests to check that the SSL cert
validates ok. We should just respond with 200 OK as soon as possible.
https://api.slack.com/slash-commands#ssl
"""
return "All ok, mm'kay." | ac701d80e6d3fb995b0409b44e7fc9d2cf6b95a9 | 43,292 |
import re
def path_verify(path):
"""Verify path."""
return re.sub(r"[^_A-Za-z0-9\/.]", "", path) | a8cf4f621c83011994212bd55b94c3aa4bc97cfa | 43,293 |
def sum_posts(kinesis_actors):
"""Sum all posts across an array of KinesisPosters
"""
total_records = 0
for actor in kinesis_actors:
total_records += actor.total_records
return total_records | e3198a37d4678383321e0624b6a2ffe2ca8cc038 | 43,294 |
import torch
def collate_fn(data):
"""Construct a bacth by padding the sequence to the size of the longest.
Args:
data (tuple): tensors
Returns:
tuple: padded tensors
"""
# Construct a bacth by padding the sequence to the size of the longest
size = [len(_x) for _x in list(zip(*data))[0]]
pad_data = [torch.zeros(len(size), max(size)) for _ in zip(*data)]
pad_mask = torch.ones(len(size), max(size))
for i, _data in enumerate(data):
end = size[i]
pad_mask[i, :end] = 0
for j, d in enumerate(_data):
pad_data[j][i, :end] = d
return [d.type(torch.int64)
for d in pad_data] + [pad_mask.type(torch.bool)]
# not yet supported by yapf and black formatter (allowed in Python 3.8)
# return *[d.type(torch.int64) for d in pad_data],
# pad_mask.type(torch.bool) | 0dffeac33a95a7001a1898299a329b12b1b29ffe | 43,295 |
from typing import Dict
import os
def get_ave_score(input_file: str) -> Dict:
"""
获取点击率分数
Args:
input_file:输入文件
Return:
商品id和点击次数Dict
"""
if not os.path.exists(input_file):
return {}
line_num = 0
record_dict = {}
score_dict = {}
fp = open(input_file)
for line in fp:
if line_num == 0:
line_num += 1
continue
item = line.strip().split(',')
if len(item) < 4:
continue
user_id, item_id, rating = item[0], item[1], float(item[2])
if item_id not in record_dict:
record_dict[item_id] = [0, 0]
record_dict[item_id][0] += 1
record_dict[item_id][1] += rating
fp.close()
for item_id in record_dict:
score_dict[item_id] = round(record_dict[item_id][1] / record_dict[item_id][0], 3)
return score_dict | b9d98f83e484f50475519f88f128d960cb51181f | 43,296 |
def SmoothBrokenPowerLaw(x, norm=1., gamma_low=1., gamma_high=1., break_freq=1.):
"""
Smooth broken power law function,
implemented using astropy.modeling.models custom model
Parameters
----------
x: numpy.ndarray
non-zero frequencies
norm: float
normalization frequency
gamma_low: float
power law index for f --> zero
gamma_high: float
power law index for f --> infinity
break_freq: float
break frequency
Returns
-------
model: astropy.modeling.Model
generalized smooth broken power law psd model
"""
return norm * x**(-gamma_low) / (1. + (x / break_freq)**2)**(-(gamma_low - gamma_high) / 2) | e5d40a987d4427a5f6dac1a7727e908aa78ec95e | 43,297 |
def format_exts(exts: list):
""" Returns a formatted list of extentions. """
return ", ".join(["." + ext for ext in exts]) | e1ab9f8cf039291a9344111308ef72715ae19188 | 43,298 |
import os
import configparser
def load_saved_config(filepath):
"""Load a saved config.ConfigParser object at 'filepath/config.ini'.
Args:
filepath (str): filepath to the saved config file 'config.ini'
Returns:
parsed config.ConfigParser object at 'filepath/config.ini'.
"""
saved_config_filepath = os.path.join(filepath, 'config.ini')
saved_config = configparser.ConfigParser()
saved_config.read(saved_config_filepath)
return saved_config | 65e9ba44c8411b2e89d20f7c8ec9c180043a0446 | 43,300 |
import configparser
def configurations(path):
"""Parses and reads the configuration file named found at path. Returns
a configparser Object."""
# create config parsing object
config = configparser.ConfigParser()
# read config
config.read(path)
return config | de3177feee980f1ffa3be9b1b330ed780304108f | 43,301 |
def normalize_proposals(proposals):
"""Performs a number of normalizations in name proposals"""
# Remove trailing and leading whitespaces
proposals = [p.strip() for p in proposals]
# Discard proposals that start with a single character not in a recognized list
VALID_STARTERS = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'v', '©', '®', '™'}
proposals = [p for p in proposals if len(p.split(" ")[0]) > 1 or p.split(" ")[0] in VALID_STARTERS]
# Remove duplicates, preserving order
proposals = [p for i, p in enumerate(proposals) if proposals[:i].count(p) == 0]
return proposals | 826790478fc593a6bb7f6bc74244acc98b1d7c73 | 43,302 |
def adjust(val, length=6):
"""
This function left align the numerical value for printing purpose
"""
return str(val).ljust(length) | 64ac888b308821215bf68cd4d846898d403fdb24 | 43,304 |
def initSensitiveWordMap(sensitiveWordSet):
"""
初始化敏感词库,构建DFA算法模型
:param sensitiveWordSet: 敏感词库,包括词语和其对应的敏感类别
:return: DFA模型
"""
sensitiveWordMap = dict()
for category, key in sensitiveWordSet:
nowMap = sensitiveWordMap
for i in range(len(key)):
keyChar = key[i] # 转换成char型
wordMap = nowMap.get(keyChar) # 库中获取关键字
# 如果存在该key,直接赋值,用于下一个循环获取
if wordMap != None:
nowMap = wordMap
else:
# 不存在则构建一个map,同时将isEnd设置为0,因为不是最后一个
newWorMap = dict()
# 不是最后一个
newWorMap["isEnd"] = "0"
nowMap[keyChar] = newWorMap
nowMap = newWorMap
# 最后一个
if i == len(key) - 1:
nowMap["isEnd"] = "1"
nowMap["category"] = category
return sensitiveWordMap | b85ae8a1f7ba89857d296c00736ae67d75381730 | 43,305 |
def toLower(str):
""" This method just turns the first char of a word to lowercase. """
return str[:1].lower() + str[1:] if str else '' | f44d129af901b9ee9781cd4c78b9ec9361e29a84 | 43,306 |
import json
import click
def set_preferences(micc_file):
"""Set the preferences in *micc_file*.
(This function requires user interaction!)
:param Path micc_file: path to a json file.
"""
with micc_file.open() as f:
preferences = json.load(f)
ty = None
for parameter,description in preferences.items():
if not description['default'].startswith('{{ '):
if 'type' in description:
ty = description['type']
description['type'] = eval(ty)
answer = click.prompt(**description)
if ty:
# set the string back
description['type'] = ty
preferences[parameter]['default'] = answer
with micc_file.open(mode='w') as f:
json.dump(preferences, f, indent=2)
return preferences | b928c2ad774704ac79e657f596906d6a57f1a412 | 43,307 |
import string
import random
def gen_id(length):
""" Create a well-usable random id """
source = string.ascii_letters + string.digits
return ''.join([random.choice(source) for _ in range(0, length)]) | 6c08f786476ee90d9f9d71fe499be99c93a523d2 | 43,310 |
def decode(encoded_text):
"""Decodes given encoded text.
Parameters
----------
encoded_text : str
Text to decode.
Returns
-------
str
Decoded string.
"""
index = encoded_text.find(' ')
if index < 0:
raise RuntimeError("Text doesn't appear to be encoded or there is an encryption key mismatch.")
return encoded_text[index + 1:] | f9a8b24a8e7ec74f80f43c6fe9ba3d3fb7bf7dc9 | 43,311 |
def prepare_url(valip, valch, valc, valii):
"""
Prepare the URL
"""
## Parameter - IP
url = "http://" + valip
## Parameter - URL - action
url += "/api/v100/dali_devices.ssi?action=get_device"
## Parameter - Channel
url += "&ch=" + valch
## Parameter - Lamp index, group index or channel
if valc == 1:
# Lamp index
url += "&di=" + valii
elif valc == 2:
# Group index
url += "&gi=" + valii
else:
# Channel
url += "&gi=-1"
return url | 4516e45206a7f681e71856cca5b1e1332c775e68 | 43,314 |
import re
def distanceEvaluate(sExpectedFile, sOutputFile):
"""
Evaluate if the output is the same as expected input for the vertices operation.
"""
lExpMatches = []
lActMatches = []
sDelimiter = " "
with open(sExpectedFile, "r") as fExpected:
for sLine in fExpected:
# space delimiter
sLine1 = sLine.strip()
lFields = re.split("[\t ]*[,|\|]?[\t ]*", sLine1)
try:
if len(lFields) == 3:
lExpMatches.append(int(lFields[2]))
except ValueError:
# distance isn't an integer
return False
with open(sOutputFile, "r") as fOut:
for sLine in fOut:
# space delimiter
sLine1 = sLine.strip()
# if line is empty, we continue (this also takes care of extra newline at end of file)
if len(sLine1) == 0:
continue
# should be space-delimited, but in case submissions use other delimiters
lFields = re.split("[\t ]*[,|\|]?[\t ]*", sLine1)
try:
if len(lFields) == 3:
lActMatches.append(int(lFields[2]))
except ValueError:
# distance isn't an integer
return False
if len(lExpMatches) != len(lActMatches):
return False
for i in range(len(lExpMatches)):
if lExpMatches[i] != lActMatches[i]:
return False
# passed
return True | e0c555213b4a94526bfe2c23fc878c1f6a2199a8 | 43,316 |
def read_input(ifn):
"""
Read an Abaqus INP file, read its sections.
Return the section headings and the lines.
"""
with open(ifn) as inf:
lines = [ln.strip() for ln in inf.readlines()]
# Remove comments
lines = [ln for ln in lines if not ln.startswith("**")]
# Find section headers
headings = [(ln[1:], n) for n, ln in enumerate(lines) if ln.startswith("*")]
# Filter the headings so that every heading has a start-of-data and
# end-of-data index.
headings.append(("end", -1))
ln = [h[1] for h in headings]
headings = [
(name, start + 1, end) for (name, start), end in zip(headings[:-1], ln[1:])
]
return headings, lines | 7223dc6d808cb96101cd18e21f09ef013478fa6e | 43,317 |
def IGESTreeWalk(object, level = 0, levelname = ""):
"""Walk IGES Class Tree"""
branch = list()
for item in object.__dict__: # Get keys for this level
if "IGES" in str(type(object.__dict__[item])): # There is another level down
branch.extend(IGESTreeWalk(object.__dict__[item], level + 1, "".join((levelname, item, "."))))
elif "_" not in item:
branch.append("".join((levelname, item)))
return branch | 38d18cfb96d87dd963df2bdfd5e1ee3f2dd7b7cd | 43,318 |
import random
import string
def random_password():
"""
Returns a 12 characters randomic string.
"""
return ''.join(random.choice(string.ascii_letters) for m in range(12)) | c7e67650b8e2a5c680ce77ab1179f2435021bd3e | 43,322 |
import logging
def get_logger(entity):
"""
Retrieves loggers from the enties fully scoped name.
get_logger(Replay) -> sc2reader.replay.Replay
get_logger(get_logger) -> sc2reader.utils.get_logger
:param entity: The entity for which we want a logger.
"""
try:
return logging.getLogger(entity.__module__+'.'+entity.__name__)
except AttributeError:
raise TypeError("Cannot retrieve logger for {0}.".format(entity)) | 5109285f48633266cb30bdade27a383c783b1271 | 43,323 |
def convert_to_float(number_string):
"""Convert comma-delimited real numberts in string format to a float
>>> convert_to_float("-79,1")
-79.1
"""
return(float(number_string.replace(',', '.'))) | 8b30293677af0860f32eb28b544c262a3b1279f4 | 43,325 |
def _darknet_required_attr(attr, key):
"""Check the attribute exists and return if exists, if not return error."""
assert isinstance(attr, dict)
if key not in attr:
raise AttributeError("Required attribute {} not found.".format(key))
return attr[key] | 938555c55069fafc28a1d43a06b1d316abaea87f | 43,326 |
import yaml
def create_config(base_file, exp_file=None):
""" Combines yaml files into single configuration dict """
p = yaml.load(open(base_file, "r"), Loader=yaml.FullLoader)
if exp_file is not None:
exp_config = yaml.load(open(exp_file, "r"), Loader=yaml.FullLoader)
p = {**p, **exp_config}
p['exp_config_path'] = exp_file
p['base_config_path'] = base_file
return p | 398b72e671795a5eb6c0c37610721c86b23d2871 | 43,327 |
def memusage(pid = None):
"""Get the memory usage.
@param pid: Process to analyze (None for current process)
"""
if pid is None:
pfn = "/proc/self/statm"
else:
pfn = "/proc/%d/statm" % pid
line = open(pfn).readline()
# Assume page size is 4k (true for i386). This can be adjusted by reading
# resource.getpagesize()
arr = [ 4 * int(x) for x in line.split()[:6] ]
vmsize, vmrss, vmshared, text, lib, data = arr
# The RHS in the following description is the fields in /proc/self/status
# text is VmExe
# data is VmData + VmStk
return vmsize, vmrss, vmshared, text, lib, data | c9e436eac02b5142d4a5016ddb8590eb01760c2e | 43,328 |
def TruncateText(text_string, str_length=18):
""" Truncate the text string after a certain
number of characters.
"""
chars = []
for char in text_string:
chars.append(char)
if len(chars) > str_length:
words = chars[:str_length - 1]
text = ''.join(words)
return '{}...'.format(text)
else:
return text_string | f9befd54748b1367f7141c9f6abe6601547092d2 | 43,329 |
def grouper(x):
"""
Given a line number in a fastq file, return a tuple
mapping each line to a read number (modulo 4) and
line number
"""
return int(x[1] / 4), x[0] | e61093ec88fc0ab26d7b1e5662d75a1506bb6df8 | 43,330 |
import importlib
def deserialize_class(serilalized_cls):
""" Deserialize Python class """
module_name, cls_name = serilalized_cls.split(':')
module = importlib.import_module(module_name)
return getattr(module, cls_name) | 0b067a5731ed6f36dad73fafdcf84a113bb70c52 | 43,331 |
import os
def get_url_map():
"""
Loads custom/pypi/map.txt and builds a dict where map[package_name] = url
:return: dict, urls
"""
map = {}
path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), # current working dir ../
"custom", # ../custom/
"pypi", # ../custom/pypi/
"map.txt" # ../custom/pypi/map.txt
)
with open(path) as f:
for line in f.readlines():
package, url = line.strip().split(": ")
map[package] = url
return map | ac9e8e323016b36bf4a35459128c28cca23bcf28 | 43,332 |
def times2(num):
""" This function returns twice the value of the entered number """
return num * 2 | a59997b5e94b287c849f1aa3250c2e5b8df7b094 | 43,334 |
def qubit_constraint(ind1, ind2, instructions):
"""
Determine if two instructions have overlap in the used qubits.
"""
if instructions[ind1].used_qubits & instructions[ind2].used_qubits:
return False
else:
return True | c8fce40c70977c86a9c86bb1b17d92037571fb36 | 43,335 |
from typing import IO
def load_highlighting_info_cmd(file: IO[str]):
"""Load highlighting info for agda file.
"""
return ('load_highlighting_info', {'file': file}) | 274f78f4a677eb035b35eaa4bf84f0207ac60126 | 43,336 |
def data_split(data, frac_validate=.2, frac_test=.2):
"""
Split data into train, validatio, train2 and test set.
Args:
data (array or series): data of interest
frac_valid (float): a fraction of data for validation set (default = 0.2)
frac_test (float): a fraction of data for test set (default = 0.2)
Returns:
a tuple of 4 dataframe including train, train2, valid, and test.
"""
n_size = data.shape[0]
n_test = int(n_size*(frac_test))
n_validate = int(n_size*(frac_validate))
n_train = n_size - n_test - n_validate
train = data.iloc[:n_train].dropna()
validate = data.iloc[n_train:-n_test]
train2 = data.iloc[:-n_test]
test = data.iloc[-n_test:]
return train, validate, train2, test | 6f391aee009bbc95a6216b6b42d53fcbf71cb5ad | 43,337 |
from datetime import datetime
def timeType(base = None):
"""장 전,후 시간을 반환
:param base: 기준일시
:type base: datetime
:return: 기준일시에 맞는 타입문자를 반환
BEFORE(장시작 전),SHOWTIME(장 운영시간),AFTER(장종료 후)
::
timeType()
timeType(datetime.today())
"""
today = base if base else datetime.today()
mainStart = today.replace(hour=8, minute=50, second=0, microsecond=0)
mainEnd = today.replace(hour=15, minute=0, second=0, microsecond=0)
if today.weekday() < 5:
if today >= mainStart and today <= mainEnd:
return "SHOWTIME"
else:
if today < mainStart:
return "BEFORE"
elif today > mainEnd:
return "AFTER"
else:
return "NONE" | f8430e07a6a9e56416b4d853099de94ba262ee19 | 43,338 |
def table_1_a(data):
"""
Input regiondata from "importing_regiondata()"
Returns: ?
"""
df = data.query("abspctileADsm0_2moistu > 6 & abspctileADurbfrac > 6")
var_list = ["ADsm0_2moistu", "mean_moistu1950_69", "ADurbfrac", "firsturbfrac", "lndiscst",
"areasqkm", "extent_agE", "extent_agH", "D_moist_GT1"]
df = df[var_list].sort_values(by = var_list)
df = df.rename(columns={"ADsm0_2moistu" : "Annualized moisture growth"})
return df | ad7ad8dda6d2495b4a8e287bbaa8b8fe9412bf3c | 43,339 |
def config_format_frame(config, fnum):
"""Try to use string formatting to instert frame number into strings"""
fmt_config = {}
for key, (t, val) in config.items():
try:
fmt_config[key] = [t, val % fnum]
except TypeError:
fmt_config[key] = [t, val]
return fmt_config | 63358412e76b9630152618d798b2798d64a4ff09 | 43,340 |
def latticeWrapIdx(index, lattice_shape):
"""
Returns periodic lattice index for a given iterable index
:param index: List of cells
:param lattice_shape: Oxygen distribution in lattice array shape
:return: Modified indexes
"""
if not hasattr(index, '__iter__'): return index # handle integer slices
if len(index) != len(lattice_shape): return index # must reference a scalar
if any(type(i) == slice for i in index): return index # slices not supported
if len(index) == len(lattice_shape): # periodic indexing of scalars
mod_index = tuple(((i % s + s) % s for i, s in zip(index, lattice_shape)))
return mod_index
raise ValueError('Unexpected index: {}'.format(index)) | 80d4caf99770548f7d0edf3f6aaac2e7a904ecec | 43,341 |
def range_union(ranges):
"""
Returns total size of ranges, expect range as (chr, left, right)
>>> ranges = [("1", 30, 45), ("1", 40, 50), ("1", 10, 50)]
>>> range_union(ranges)
41
>>> ranges = [("1", 30, 45), ("2", 40, 50)]
>>> range_union(ranges)
27
>>> ranges = [("1", 30, 45), ("1", 45, 50)]
>>> range_union(ranges)
21
>>> range_union([])
0
"""
if not ranges:
return 0
ranges.sort()
total_len = 0
cur_chr, cur_left, cur_right = ranges[0] # left-most range
for r in ranges:
# open new range if left > cur_right or chr != cur_chr
if r[1] > cur_right or r[0] != cur_chr:
total_len += cur_right - cur_left + 1
cur_chr, cur_left, cur_right = r
else:
# update cur_right
cur_right = max(r[2], cur_right)
# the last one
total_len += cur_right - cur_left + 1
return total_len | 3fa8aa251c207d2576d3739116c0d33378716bee | 43,343 |
import base64
def readDataOrPath(dataStr):
"""
Reads in either base64 data or a path.
"""
if dataStr.startswith('base64:'):
dataPath = None
dataContents = base64.b64decode(dataStr[7:])
else:
dataPath = dataStr
with open(dataStr, 'rb') as stream:
dataContents = stream.read()
return dataPath, dataContents | 0b18eaa4affdb409455e7575a6938963df1f1db1 | 43,344 |
def truncate_chars(val: str, num: int, end: str = "...") -> str:
"""Truncates a string if it is longer than the specified number of characters.
Truncated strings will end with `end`, an ellipsis by default."""
val_length = len(val)
end_length = len(end)
if val_length < num:
return val
return f"{val[:num-end_length]}{end}" | fc5ba4950e4a7562b1a0d97850048c35d3f3518c | 43,345 |
def put_item(status, created_at):
"""input post upload status to DynamoDB"""
# なんでidがuriなのか。。
return dict(
id=status.uri,
PK=status.uri,
SK=created_at,
indexKey="Status",
name=status.name,
filename=status.filename,
createdAt=created_at,
createdUser=status.user,
status=status.status,
) | 94cf167b6b11757e7453c653f712d8a2d29db29b | 43,347 |
import os
def _libs():
"""
Make a dict containing the name and path of each of the libs.
:return dict: name of the lib as key, path of the lib as value
"""
exclude = ('__init__.py', '__init__.pyc', '__pycache__')
lib_dir = os.path.relpath(os.path.dirname(__file__))
# Filter out self
libs = filter(lambda p: p not in exclude, os.listdir(lib_dir))
return dict((lib, os.path.join(lib_dir, lib)) for lib in libs) | 576091f31cfc7d908c645563d5dcc03a2da49ae5 | 43,348 |
def full_to_one(full):
"""
Convert full amino acid name to one-letter amino acid code.
"""
assert full.lower() in [
"phenylalanine",
"leucine",
"serine",
"tyrosine",
"stop",
"cysteine",
"tryptophan",
"proline",
"histidine",
"glutamine",
"arginine",
"isoleucine",
"methionine",
"threonine",
"asparagine",
"lysine",
"valine",
"alanine",
"aspartic acid",
"glutamic acid",
"glycine",
"unnatural aa",
], (
"Error, %s is not a valid amino acid" % full
)
AA = {
"phenylalanine": "F",
"leucine": "L",
"serine": "S",
"tyrosine": "Y",
"stop": "*",
"cysteine": "C",
"tryptophan": "W",
"proline": "P",
"histidine": "H",
"glutamine": "Q",
"arginine": "R",
"isoleucine": "I",
"methionine": "M",
"threonine": "T",
"asparagine": "N",
"lysine": "K",
"valine": "V",
"alanine": "A",
"aspartic acid": "D",
"glutamic acid": "E",
"glycine": "G",
"unnatural aa": "U",
}
return AA[full.lower()] | 182438900416a7a0be9bb799648c822597670e32 | 43,349 |
def print_failure(filename, failure, colored=False, verbose=False):
"""
Pretty prints a failure
:param filename: str, path to file tested
:param failure: Failure
:param colored: bool, prints with ansi colors according to failure severity
"""
def color_string(message, severity):
RED = '31m'
YELLOW = '33m'
BLUE = '34m'
WHITE = '37m'
RESET = '0m'
color_map = {
'ERROR': RED,
'WARNING': YELLOW,
'INFO': BLUE,
}
return f'\033[{color_map.get(severity, WHITE)}{message}\033[{RESET}'
message = f'[{failure.role}] {filename}:{failure.line}: {failure.element}: {failure.message}'
if verbose:
message += f'\n location: {failure.location}\n test: {failure.test}'
if colored:
message = color_string(message, failure.role)
# print(message)
return message | f402ea84316a6e2b48cb0632d8a5f8421650a128 | 43,351 |
import yaml
def load_yaml(filename):
"""Load a yaml file.
Args:
filename (str): Filename.
Returns:
dict: Dictionary.
"""
try:
with open(filename, "r") as f:
config = yaml.load(f, yaml.SafeLoader)
return config
except FileNotFoundError: # for file not found
raise
except Exception as e: # for other exceptions
raise IOError("load {0} error!".format(filename)) | 702ff5ca3321c7ec2ffc3d09363b6c4e22092837 | 43,353 |
def login():
""" forma de realizar login, sem expor os dados de autentificação """
l = []
f = open('_vault/auth.txt', 'r')
for line in f:
l.append(line)
f.close()
return l | f54a28772890a8ec0af8f087d823767795551cb7 | 43,354 |
import yaml
def get_projects(fpath):
"""Return projects dictionary."""
with open(fpath) as fh:
projects = yaml.load(fh, Loader=yaml.FullLoader)
return projects | 37f131726ed57ace2e6af831d23ca4db4d721d92 | 43,357 |
import re
import random
def get_work_off_range(off_time, special = 0):
"""
下班打卡时间范围生成
:param off_time:
:return:
"""
times = list(map(int, re.split('[::]', off_time)))
after = random.randint(1, 4)
if special == 1:
after = random.randint(0, 1)
# start_minutes = (times[1]+10)%60
# start_hour = times[0] + int((times[1]+10)/60)
return '%d:%d-%d:%d' % (
times[0] + int((times[1] + after) / 60),
(times[1] + after) % 60,
times[0] + int((times[1] + after + 20) / 60),
(times[1] + after + 20) % 60
) | 090c7995916dd3c8eb99fded8c386942f917d903 | 43,358 |
def make_args(pcls):
"""
Returns an args tuple for the parameter class pcls.
"""
return tuple(range(1, len(pcls.__rndargs__) + 1)) | 5aed1d6b40ac40bef6d94a915c803875066e0833 | 43,360 |
import logging
import calendar
def fjd(datetime_inst):
""" convert datetime instance to FJD
"""
if datetime_inst.tzinfo is None:
logging.warning("datetime instance is not aware of its timezone."
" Result possibly wrong!")
return calendar.timegm(datetime_inst.utctimetuple()) / (24.*3600.) | 1d0977f33625314b4dbcc2f186a2e63cc610621b | 43,362 |
import random
def random_dictionary_input(input_variables):
"""
@param self: list of input parameters
@return: dictionary with random states of the input variables
"""
input_dictionary = {}
list_of_states = ['low', 'medium', 'high']
for node in input_variables:
new_state = random.choice(list_of_states)
input_dictionary[node] = new_state
return input_dictionary | 53b187f75fb39b6c526b59c92497d763b95ab934 | 43,363 |
def max_col_width_dict(dataframe, output_dict=None):
"""
Get column widths with padding.
"""
if output_dict is None:
output_dict = dict()
max_col_width_dict = {i:int(round(dataframe[i].map(lambda x: len(str(x))).max() * 1.5, 0)) for i in dataframe}
col_name_width_dict = {i:len(i) for i in dataframe.columns.values.tolist()}
for i in max_col_width_dict.keys():
for j in col_name_width_dict.keys():
if i == j:
if max_col_width_dict[i] > col_name_width_dict[j]:
output_dict[i] = max_col_width_dict[i]
else:
output_dict[j] = col_name_width_dict[j]
return output_dict | ea0dac3e079bdc7b4738fbf74e7456b717e522de | 43,364 |
import os
def file_gen_new(fname,fextend='txt',foriginal=True,bool_dot=True):
"""Generate new file name without overwritings
Args:
fname (str) : input file fname
fextend (str) : file extension
foriginal (bool): whether keep original
bool_dot (bool) : force check dot convention or not
Returns:
str : new file name
"""
filename = fname
pos = filename.rfind('.')
if bool_dot and pos != -1:
fname = filename[:pos]
fextend = filename[pos:]
else:
fextend = '.' + fextend
if foriginal is True:
try:
f = open(fname + fextend)
f.close()
except:
return fname + fextend
i = 1
filename = fname
while True:
fname = filename + '-' + str(i) + fextend
if not os.path.isfile(fname): break
i += 1
return fname | 7e49059c79f96cdff8132d99d7b767da8f03c5d6 | 43,365 |
def get_next_end_cycle_time(last_poll_time):
"""
This returns the next cycle time to poll to
This would be set as the end time
"""
return last_poll_time + 60 | 9fe92848a606b23b45f89ed9d8f1c4bee80c46b7 | 43,366 |
def rds_product_engine_match(product, engine):
""" Check whether an RDS reservation 'product' matches a running instance 'engine' """
return (product, engine) in (('postgresql','postgres'),
('mysql','mysql'), # note: not sure if this is correct
) | 4373ba6a51e5a5d80aeb4a410f60064becf2ede1 | 43,369 |
import yaml
def load_config(config_path):
"""Load configuration file in YAML format
Args :
config_path (string) : path to configuration file
Returns :
config (dict) : configuration in dictionary format
"""
with open(config_path) as file:
config = yaml.safe_load(file)
return config | 3d018b5df5e88d697e4ba55712422dd11ed287bd | 43,371 |
import os
def _check_uncompressed_kaggle_display_advertising_files(dataset_path):
"""check uncompressed kaggle display advertising files."""
file_name_list = ["train.txt", "test.txt", "readme.txt"]
file_size_list = [11147184845, 1460246311, 1927]
for file_name, file_size in zip(file_name_list, file_size_list):
file_path = os.path.join(dataset_path, file_name)
if not os.path.exists(file_path):
return False
else:
if os.path.getsize(file_path) != file_size:
print("************** {} may be error, need to download again **************".
format(file_path), flush=True)
return False
return True | 88f658526dc5423b02e6a9acd2b691c87cfbcbe2 | 43,372 |
import collections
def createMetisGraphFile(jsonSendgraph):
"""This method creates the contents that a graph input file needs to have in order to be processable by METIS
(http://glaros.dtc.umn.edu/gkhome/projects/gp/products?q=views/metis). A METIS input file consists of one header
line and of n lines, where n is the number of vertices in the graph. For our purposes the header line looks as
follows:
n m 011 1
n and m are the number of vertices and edges in the graph respectively while the latter two elements tell METIS
that we will have one weight on each vertex and that the edges are also weighted.
All remaining lines are of the form
vw v1 w1 ... w1 wN
where the ith line contains information about the ith vertex. On each line vw is the weight of the vertex,
v1..vN are the vertices adjacent to the ith vertex and w1..wN are the weights for these vertices.
more information about this format can be found here: http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/manual.pdf
(page 10 at the very top).
"""
graph_file = "tmp.graph"
# default is a dictionary of a default dictionary with a default weight of 0
edge_weights = collections.defaultdict(lambda: collections.defaultdict(lambda: 0)) # default value is 0
vertex_weights = collections.defaultdict(lambda: 1) # default weight of 1 - zero is not supported by metis
max_node_id = 0
# build the connection matrix
edge_count = 0
for link in jsonSendgraph['links']:
sourceId = int(link['source'])
targetId = int(link['target'])
value = int(link['value'])
if sourceId > max_node_id:
max_node_id = sourceId
if targetId > max_node_id:
max_node_id = targetId
if sourceId != targetId:
if edge_weights[sourceId][targetId] == 0: # only count the edge if it does not yet exist
edge_count += 1
edge_weights[sourceId][targetId] += value
# metis requires a reverse link, so we put a value of 1 if it does not yet exist
if edge_weights[targetId][sourceId] == 0:
edge_weights[targetId][sourceId] = 1
vertex_weights[targetId] += value
vertex_weights[sourceId] += value # count outgoing messages as well to vertex weight
# count nodes and links
vertex_count = max_node_id
graph_file_content = "{n} {m} 011 1".format(n=vertex_count, m=edge_count)
for from_vertex in range(1, vertex_count + 1): # indexed from 1..num_Vertex
vertex_line = ""
for to_vertex in edge_weights[from_vertex].keys():
if len(vertex_line) > 0:
vertex_line += " "
vertex_line += "{v} {w}".format(v=to_vertex, w=edge_weights[from_vertex][to_vertex])
# prepend vertex weight
vertex_line = "{vw} ".format(vw=vertex_weights[from_vertex]) + vertex_line
graph_file_content = graph_file_content + "\n" + vertex_line
with open(graph_file, 'w') as f:
f.write(graph_file_content)
# print("Metis file content:\n{0}".format(graph_file_content))
return graph_file | 2cbab4717e105f787d5792a0109485d8b4fc25e1 | 43,373 |
import re
def to_np(url):
"""Return a comment url converted to the np subdomain."""
return re.sub("https?://.*reddit\.com", "https://np.reddit.com", url) | 72a8efaf2f5c77184ae9d80bf268aa06aa414e7e | 43,375 |
def remove_duplicates(full_df, agg_numeric_by='mean', agg_object_by='first',
**kwargs):
"""Locate and remove duplicate timestamp entries if present in passed
dataframe.
Note that this module does not remove duplicate concentration values, only
searches for duplicated index (assume time-like) values.
Args:
full_df (pandas dataframe):
Sensor dataframe at recorded sampling frequency.
Returns:
full_df (pandas dataframe):
Modified sensor dataframe at recorded sampling frequency with
duplicated timestamps removed.
"""
indent = kwargs.get('print_indent', 0)
# Average duplicate entries, remove duplicated timestamps
dup_data = full_df[full_df.index.duplicated() == True]
if dup_data.empty:
print(f'{indent*"."}no duplicate timestamps found')
else:
col_order = list(full_df.columns)
original_df_len = full_df.shape[0]
obj_df = full_df.select_dtypes(include=['object', 'datetime'])
num_df = full_df.select_dtypes(exclude=['object', 'datetime'])
num_df = num_df.groupby(num_df.index).agg(agg_numeric_by)
obj_df = obj_df.groupby(obj_df.index).agg(agg_object_by)
full_df = num_df.join(obj_df)
full_df = full_df[col_order]
modified_df_len = full_df.shape[0]
n_duplicates = original_df_len - modified_df_len
print(f'{indent*"."}{str(n_duplicates)} duplicate timestamps found')
print(f'{(indent+2)*"."}removing duplicate entries')
return full_df | 5d82781a6d6e234182cf16ddbec5c134b5faf50e | 43,376 |
import sys
def string_type():
""" Find the String class for this version of Python.
Between Python 2.x and 3.x, classes and text handling has
changed a considerable amount.
Python 2.x uses ASCII strings with str() and Unicode with
unicode().
Python 3.x uses Unicode strings with str() and "strings of
bytes" as bytes().
"""
if sys.version_info.major >= 3:
return bytes # flake8: noqa
else:
# Python 2 or lower
return str | d586ed615c780ab83eab2e1231dbcbfddfabbdbd | 43,377 |
def hex_to_rgb(hex_val):
"""
https://stackoverflow.com/questions/29643352/converting-hex-to-rgb-value-in-python
"""
return tuple(int(hex_val[i : i + 2], 16) for i in (0, 2, 4)) | 466b362b96c2b1239e61667d5ba8c126e7acdec1 | 43,378 |
def find_total_denials(overall_paths):
"""return total denial of paths"""
cnt = 0
for path in overall_paths:
if not path:
cnt+=1
return cnt | 48681909f4053000e9e23340b20f2b571ae9a6bf | 43,379 |
import base64
def download_link(object_to_download, filename, download_link_text):
"""
Generates a link to download the given object_to_download.
Args:
object_to_download: str.
filename: str. filename and extension of file.
download_link_text: str. Text to display for download link.
Examples:
download_link(our_string, 'my_file.txt', 'Click here to download your text!')
"""
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(object_to_download.encode()).decode()
return f'<a href="data:file/txt;base64,{b64}" download="{filename}">{download_link_text}</a>' | a98d42f90370f73bd2282e042ab9c8a7ec2d7766 | 43,380 |
def MyGrep(hash_list, index_name, iname):
"""
hash_list: (list<subdict>)
subdict: (dict)
header -> value
index_name: (str) key in subdict that maps to index names
iname: (str) the index name we want.
Returns:
list<dict> minimized such that only dicts with wanted index name
are in it.
"""
new_list = []
for h in hash_list:
if h[index_name] == iname:
new_list.append(h)
return new_list | 34f346f80bfcac423ec89ae06c616dfc082fa2b0 | 43,381 |
def intersection(list1, list2):
"""
Compute and return the elements common to list1 and list2.
`list1` and `list2` can be sets, lists, or pandas.Series.
"""
return list(set(list1).intersection(set(list2))) | dab9e5c597316070e452505980381a0251ef3385 | 43,383 |
def heaviside(x):
"""
Heaviside step function: x -> x'\in{0,1}
Parameters
----------
x : float
Argument, to be corrected using the step function.
Returns
-------
integer
Binary step output.
"""
if (x < 0):
return 0
else:
if (x > 0):
return 1 | 6cb91f80813abaaa29a88bd43fcec2224b967c27 | 43,385 |
def timeseries_wrapper(timeseries, starttimeind=0):
"""Decorator to convert a list or numpy array into a function which accepts a timeind."""
def out(timeind):
return timeseries[timeind-starttimeind]
return out | f09036ea7b6f7dbecca5468e60b3c908bf159417 | 43,387 |
import re
def camel_to_snake_case(name):
"""Converts camelCase to the snake_case
Args:
name (str): Camel case name
Returns:
str: Name in stake case
"""
cunder = re.sub(r"(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", cunder).lower() | 7997a25ed8ac751d935d05078cf9b5f8d3251b9b | 43,389 |
def laplace(f, g_inv, g_det, X):
"""
Calculates Laplace(f), using the inverse metric g_inv, the determinant of
the metric g_det, all in variables X.
"""
r = 0
for i in range(len(X)):
for j in range(len(X)):
r += g_inv[i, j]*f.diff(X[i]).diff(X[j])
for sigma in range(len(X)):
for alpha in range(len(X)):
r += g_det.diff(X[sigma]) * g_inv[sigma, alpha] * \
f.diff(X[alpha]) / (2*g_det)
return r | 47bebab77205c47eabe2e7450263274a10a44c2f | 43,390 |
def hashWord(word):
"""Hashes a word into its similarity equivalent.
MXM becomes 010, ASDF becomes 0123, AFAFA becomes 01010, etc.
"""
seen = {}
out = []
i = 0
for c in word:
if c not in seen:
seen[c] = str(i)
i += 1
out.append(seen[c])
return ''.join(out) | 5516d9c4bed946d7f2e46cb08d24f4be2acc42d2 | 43,391 |
import os
import subprocess
def align_sequences_to_cm(cmfile, fasta_file, dest_dir=None):
"""
Aligns a fasta to a covariance model using cmalign
cmfile: A valid covariance model
fasta_file: A valid nucleotide fasta file
dest_dir: Destination directory where to generate any output
return: Returns path to the aligned sequences, otherwise
returns None if file does not exist
"""
if dest_dir is None:
dest_dir = os.path.split(fasta_file)[0]
out_filename = os.path.basename(fasta_file).partition('.')[0]
out_filename += "_aln.stk"
new_seed = os.path.join(dest_dir, out_filename)
cmd = "cmalign %s %s | grep -Ev '^(#=GR)' > %s" % (cmfile, fasta_file, new_seed)
subprocess.call(cmd, shell=True)
if os.path.exists(new_seed):
return new_seed
return None | 83b062329fd4da62aa90cda0c27b3286d4be152e | 43,392 |
import argparse
def parse_args(args=None):
"""Argument parser."""
parser = argparse.ArgumentParser(
usage=(
"crc [options]"
" -e [ENHANCER_FILE]"
" -c [CHROMOSOMES_FOLDER_PATH]"
" -g [GENOME]"
" -o [OUTPUTFOLDER]"
" -n [NAME]"
)
)
# Required flags
parser.add_argument("-e", "--enhancer_file", dest="enhancers", default=None, type=str,
help=(
"Provide a ROSE generated enhancer table "
"(_AllEnhancers_ENHANCER_TO_GENE.txt)"
), required=True)
parser.add_argument("-g", "--genome", dest="genome", default=None, type=str,
help=(
"Provide the build of the genome to be used for the analysis. "
"Currently supports HG19, MM10, and RN6"
), required=True)
parser.add_argument("-c", "--chrom-path", dest="chrom_path", type=str,
help=(
"Provide a path to a folder with a seperate fasta file for each "
"chromosome"
), required=True)
parser.add_argument("-o", "--output", dest="output", default=None, type=str,
help="Enter an output folder", required=True)
parser.add_argument("-n", "--name", dest="name", default=None, type=str,
help="Provide a name for the job", required=True)
# Either bams for valleys or subpeaks are needed
parser.add_argument("-b", "--bam", dest="bam", default=None, type=str,
help="Enter a comma separated list of bams of valley finding",
required=False)
parser.add_argument("-s", "--subpeaks", dest="subpeaks", default=None, type=str,
help="Enter a BED file of regions to search for motifs", required=False)
# Additional options
parser.add_argument("-m", "--mask", dest="mask_file", type=str,
help="Masking file in BED format", default=None, required=False)
parser.add_argument("-a", "--activity", dest="activity", default=None, type=str,
help="A table with active gene names in the first column", required=False)
parser.add_argument("-l", "--extension-length", dest="extension", default=100, type=int,
help=(
"Enter the length to extend subpeak regions for motif finding. "
"default is 100"
), required=False)
parser.add_argument("-N", "--number", dest="number", default=1, type=int,
help=(
"Enter the number of non overlapping motifs in a region required "
"to assign a binding event. Default=1"
), required=False)
parser.add_argument("--motifs", dest="motifs", default=False, type=str,
help="Enter additional PWM file for the analysis", required=False)
parser.add_argument("-t", "--tfs", dest="tfs", default=None, type=str,
help=(
"Enter additional TFs (comma separated) to be used in the bindinf "
"analysis"
), required=False)
parser.add_argument("--config", dest="config", default='', type=str,
help="Enter genome configuration file to overwrite default paths",
required=False)
return parser.parse_args(args) | 497e0905c78925cb58e656bb624b688c4805bbe0 | 43,393 |
import re
def _check_if_item_allowed(item_name, allow_patterns, disallow_patterns):
"""
Check if an item with ``item_name`` is allowed based on ``allow_patterns``
and ``disallow_patterns``.
Parameters
----------
item_name: str
Name of the item.
allow_patterns: list(str)
Selected item should match at least one of the re patterns. If the value is ``[None]``
then all items are selected. If ``[]``, then no items are selected.
disallow_patterns: list(str)
Items are deselected based on re patterns in the list: if an item matches at least one
of the patterns, it is deselected. If the value is ``[None]`` or ``[]`` then no items
are deselected.
Returns
-------
boolean
Indicates if the item is permitted based on ``allow_patterns`` and ``disallow_patterns``.
"""
item_is_allowed = False
if allow_patterns:
if allow_patterns[0] is None:
item_is_allowed = True
else:
for pattern in allow_patterns:
if re.search(pattern, item_name):
item_is_allowed = True
break
if item_is_allowed:
if disallow_patterns and (disallow_patterns[0] is not None):
for pattern in disallow_patterns:
if re.search(pattern, item_name):
item_is_allowed = False
break
return item_is_allowed | 6a14239913140130eb8ce0d12a797039a7625172 | 43,394 |
import re
def unificate_name(name):
"""Process whitespaces and make first letter upper."""
name = re.sub(r"[_ ]+", " ", name).strip()
if len(name) < 2:
return name.upper()
else:
return name[0].upper() + name[1:] | f7190f2264beb3f810addc50c844f3ad80802ab1 | 43,395 |
def py_opp(x):
"""
Function for python unary operator ``-``.
@param x floats
@return `-x`
"""
return -x | 4d7f6260da54eaa9ea3d6e71a10cdb61d4bba8c1 | 43,396 |
import logging
def query_stream_log_handler(logger):
"""Query stream handler from logger."""
if len(logger.handlers):
ch = logger.handlers[0]
else:
ch = logging.StreamHandler()
logger.addHandler(ch)
return ch | 67fc28298bdfa10c25c0be36eaed0922def3a1a3 | 43,397 |
import re
def validate_cron_string(cron_string, error_on_invalid=False):
"""
Validate that a string is a Unix cron string.
"""
# Note: This is also a Temporal function, but I'm trying to avoid making Temporal a dependency of BTU.
crontab_time_format_regex = re.compile(
r"{0}\s+{1}\s+{2}\s+{3}\s+{4}".format(
r"(?P<minute>\*(\/[0-5]?\d)?|[0-5]?\d)",
r"(?P<hour>\*|[01]?\d|2[0-3])",
r"(?P<day>\*|0?[1-9]|[12]\d|3[01])",
r"(?P<month>\*|0?[1-9]|1[012])",
r"(?P<day_of_week>\*|[0-6](\-[0-6])?)") # end of str.format()
) # end of re.compile()
if crontab_time_format_regex.match(cron_string) is None:
if error_on_invalid:
raise Exception(f"String '{cron_string}' is not a valid Unix cron string.")
return False
return True | 4cc4594bfc2fd2743f20fa9662a4083497d5e1fc | 43,399 |
def aws_region_name(request):
""" Returns AWS Region Name """
return request.config.getoption("--aws-region") | 0400379c336ef156d32448b562b7c011b7eb2894 | 43,400 |
def indentation_error():
"""Bad indentation."""
try:
exec("None\n None")
except IndentationError:
return "extra indentation to remove" | d79e97c085c3ef7645a155705eda879b6d919bd2 | 43,401 |
import os
def process_args(args):
""" Process the options got from get_args()
"""
if args.sre18_dev_dir == '' or not os.path.exists(args.sre18_dev_dir):
raise Exception("The specified sre18_dev_dir '{0}' not exist.".format(args.sre18_dev_dir))
return args | 98e4db2aedea4dcd5a46c85f5ce0fa93dab5acd8 | 43,402 |
def flatten(data):
"""Flatten out all list items in data."""
flatten_list = []
if type(data) == list:
for item in data:
flatten_list.extend(flatten(item))
return flatten_list
else:
return [data] | 6a5bea0f99e1c33ef178e0002f71e27d2efc36a0 | 43,403 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.