content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import math
def calc_angle(x: float, y: float) -> float:
"""
Calculation of angle based on axial projections
:param x: x axis value [-1 1]
:param y: y axis value [-1 1]
:return: angle [0 2Pi]
"""
if x == 0:
if y == 0:
angle = -1
elif y > 0:
angle = math.pi / 2
else:
angle = 3 * math.pi / 2
else:
angle = math.atan(y / x)
if x < 0:
angle = angle + math.pi
angle = angle % (2 * math.pi)
return angle | 721befad6395cc7aabe4a05a256c83fa9fe10466 | 40,145 |
def calc_rolling_agg(df, target_var, rolling_window,
hierarchy, agg_func="mean", min_periods=1):
"""
Calculates rolling aggregated count of orders by rolling window timeframe.
This function assumes that ther aren't any time gaps within your hierarchy.
"""
def rolling_calc(df, calc_col, rolling_window, agg_func):
column_name = f'{agg_func}_{rolling_window}_{target_var}'
df[column_name] = df[calc_col].rolling(window=rolling_window,
min_periods=min_periods)\
.agg(agg_func)
return df
rolling_df = df.groupby(hierarchy).apply(rolling_calc, target_var,
rolling_window, agg_func)
return rolling_df | 66614368ffb1dc7b53c4fa1174b2a2cb1cab3bbd | 40,146 |
def make_date(v):
"""
Convert a date string in DD.MM.YYYY format to YYYY-MM-DD.
>>> make_date("01.02.2003")
'2003-02-01'
"""
return "-".join(reversed(v.split("."))) | 6f8658aa80c7f118a138d648bad578eea57d12e4 | 40,147 |
def igLatinPay(words: str)->str:
"""Accepts a string and returns it translated into Pig Latin"""
word_list = words.split()
translation = ""
for word in word_list:
first_letter = word[0]
the_rest = word[1:]
translation += (the_rest + first_letter + "ay ")
return translation | cd69588431f335e0d207b16f8ae715eb72570bb2 | 40,148 |
import torch
def combine_variance(avg_a, count_a, var_a, avg_b, count_b, var_b):
"""
Compute variance of X given mean and variances of A and B, where X = A union B.
Reference: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#cite_note-:0-10
"""
if count_a + count_b <= 1:
return torch.zeros(var_a.size()).cuda()
delta = avg_b - avg_a
M2_a = var_a * (count_a - 1)
M2_b = var_b * (count_b - 1)
M2 = M2_a + M2_b + delta ** 2 * count_a * count_b / (count_a + count_b)
return M2 / (count_a + count_b - 1) | af2f1be4de2ce4ce82413a7a673f7e2f724c2523 | 40,149 |
def limit(value, max_value):
"""
Limits a value within a range
:param value:
:param max_value:
:return:
"""
if value > max_value:
return max_value
if value < -max_value:
return -max_value
return value | a956ce8d98806bf206f848bbedf23c1e4f9a9c1a | 40,151 |
def get_abs_axis_pos(rel_pos, ax):
"""
Find coordinates in terms of axis quantities, given relative coordinates from 0 to 1,
so that e.g. (1,1) refers to (xmax,ymax)
:param rel_pos: relative coordinates
:param ax: axis
:return: absolute coordinates
"""
xlims = ax.get_xlim()
ylims = ax.get_ylim()
return (xlims[0] + rel_pos[0] * (xlims[1] - xlims[0]), ylims[0] + rel_pos[1] * (ylims[1] - ylims[0])) | c2b2bdbcfff6fc71af9d41e21957bf999af5a982 | 40,152 |
def hex2color(hexnum):
"""RGBデータの16進数を(r, g, b)のタプルで返す。
hexnum: 16進数。
"""
b = int(hexnum & 0xFF)
g = int((hexnum >> 8) & 0xFF)
r = int((hexnum >> 16) & 0xFF)
return r, g, b | bd91169fc9f5d61612ea5c44192f88a29a2264bf | 40,153 |
def not_found(e):
"""Page not found."""
return "page not founderino" | 08da45a9a17bd51a3c3531210e5cef5b8bfbd79d | 40,154 |
def verifyConfigFile(config_file):
""" This function checks the whole text in order to search for ASCII
characters (7bit) since 8bit chars won't allow a proper boot process.
"""
charset_allowed = [chr(c) for c in range(128)]
for i,line in enumerate(config_file.split('\n')):
for character in line:
if character not in charset_allowed:
return i+1, line, character
return -1,-1 | cb101bdef49b9852396311dcba742c0be73a7be2 | 40,155 |
def get_fren_word(category_data):
"""
Takes the category_data list and appends the values corresponding to the
"french_word" key, to a new list called fren_words.
Params:
1) category_data - list containing all the information that corresponds to the user's selected level and category
Examples:
fren_words = get_fren_word(category_data)
> fren_words = ['chien','chat'...]
"""
fren_words = []
for data in category_data:
fren_word = data["french_word"]
fren_words.append(fren_word)
return fren_words | 1c7d5763dfa2165904df265b4ee0b950da9c48d7 | 40,156 |
def remove_pos(tt):
"""Given a POS tagged token (<token>, <pos>), return only the token.
"""
return tt[0] | 098e88b31276d45f1e66738ffbe5a9ac517e6e94 | 40,157 |
import re
def get_machine_code(instrument_model):
"""Get the machine code for an instrument's code
Parameters
----------
instrument_model: str
An instrument's model of the form A999999 or AA999999
Returns
-------
"""
# the machine code represents the first 1 to 2 letters of the
# instrument model
machine_code = re.compile(r'^([a-zA-Z]{1,2})')
matches = re.search(machine_code, instrument_model)
if matches is None:
raise ValueError('Cannot find a machine code. This instrument '
'model is malformed %s. The machine code is a '
'one or two character prefix.' % instrument_model)
return matches[0] | 020cbbb7dc0f81d9b5c975495749f1e5cecc7e54 | 40,158 |
def write_line(filename, line=""):
"""Helper method to open a file a write a line to it.
Args:
filename (str): Path to the file
line (str, optional): Line to write to the file. Defaults to "".
Returns:
bool: True if successfully written else False.
"""
try:
with open(filename, "w") as text_file:
print(line, file=text_file)
return True
except Exception as e:
print(f"Error while writing: {e}")
return False | 8b7e73d44c93af9067ec919b0bfff158c30442da | 40,159 |
def add(x, y):
"""A function that adds x and y together.
>>> add(1, 5)
6
>>> add(-8, 2.7)
-5.3
"""
return x + y | aa411ca04d23b324b4916278f9ee3fb7cc5e78ff | 40,160 |
def is_stored(self):
"""
Checks if the given model is stored in a previously defined
secondary storage system.
For the base model it's always considered (by default) to be
not stored (transient).
:rtype: bool
:return: If the current model is stored in the hypothetical
secondary storage system, considered always false (no persistence
layer present).
"""
# in case the is persisted method is present, it should
# be used to check if the model is stored in a secondary
# storage systems otherwise it's always considered to be
# a transient model (default case)
if hasattr(self, "is_persisted"): return self.is_persisted()
return False | 356cb045985700f15ffb51ba140f9d4518fc1d26 | 40,162 |
def CRC(msg):
"""计算CRC校验和
"""
s = 0
for i in range(0, len(msg), 2):
w = ord(msg[i+1]) + (ord(msg[i]) << 8)
t = s + w
s = (t & 0xffff) + (t >> 16)
return (~s & 0xffff) | 4ae243d6b47618d4013f1e898bd0a0725e09dbab | 40,163 |
import numpy
def _compute_spatial_statistic(gridded_data, log10_probability_map):
"""
aggregates the log1
Args:
gridded_data:
log10_probability_map:
"""
# returns a unique set of indexes corresponding to cells where earthquakes occurred
# this should implement similar logic to the spatial tests wrt undersampling.
# techincally, if there are are target eqs you can't compute this statistic.
if numpy.sum(gridded_data) == 0:
return numpy.nan
idx = numpy.unique(numpy.argwhere(gridded_data))
return numpy.sum(log10_probability_map[idx]) | 6ee4fb083540a0690ad830c6980bf87b152091c5 | 40,165 |
import math
def ceil_pow10(value):
"""Similar to math.ceil() but to a power of 10.
>>> floor_pow10(42)
100
"""
return 10 ** math.ceil(math.log10(value)) | 2fa2cf60645cdf2858e4182976a486cedbe54cad | 40,166 |
def is_owner_or_server_owner_check(ctx):
"""Returns true if it's the bot's owner or server owner"""
return ctx.bot.owner_id == ctx.message.author.id \
or ctx.message.author.id == ctx.message.server.owner.id | 072906fa5c58090f6ba018ac6fa1575c6fb0ecb6 | 40,167 |
def _adjust_bounds(model, rxn, bounds):
"""
Applied new bounds to specified reactions in a cobra model.
"""
skip = False
if bounds[0] < bounds[1]: # to fix the issue with negaive values above
try:
model.reactions.get_by_id(rxn).lower_bound = round(bounds[0], 1)
model.reactions.get_by_id(rxn).upper_bound = round(bounds[1], 1)
except KeyError:
print(f'Did not work for {rxn}')
skip = True
else:
try:
model.reactions.get_by_id(rxn).upper_bound = round(bounds[0], 1)
model.reactions.get_by_id(rxn).lower_bound = round(bounds[1], 1)
except KeyError:
print(f'Did not work for {rxn}')
skip = True
return model, skip | a2feb75a39995d662920bd218ebfe4c1126cc25d | 40,168 |
def simplifyData(DataofBAV, bestAttrSerl):
"""
将数据中对应最优划分属性的离散分量删除。
Args:
DataofBAV (): 数据
bestAttrSerl (): 要删除的下标
Returns: 列表,简化后的数据
"""
simplifiedData = DataofBAV.copy()
for sample in simplifiedData:
del sample[bestAttrSerl]
return simplifiedData | 4ae832d3b7d2cb64a84127ad22b07e361a85824a | 40,169 |
def clean_orcid(value):
"""
Minimal ORCID validation. Allowing for orcid.org/
"""
if value.find('orcid.org/') > -1:
return value.split('/')[-1]
else:
return value | f0273de55fed8da9129a53de5bc540abb50d73ad | 40,172 |
def _checkObs(data, obs):
"""
data: OI dict
obs: list of observable in ['|V|', 'V2', 'DPHI', 'T3PHI', 'FLUX', 'NFLUX']
returns list of obs actually in data
"""
ext = {'|V|':'OI_VIS', 'DPHI':'OI_VIS', 'PHI':'OI_VIS',
'V2':'OI_VIS2',
'T3PHI':'OI_T3', 'T3AMP':'OI_T3',
'FLUX':'OI_FLUX',
'NFLUX':'OI_FLUX',
}
return [o for o in obs if o in ext and ext[o] in data] | cf9212a74259d2a5673dea0b8d417d9b30c50c8f | 40,173 |
def fix_country_names(data):
"""Match the contract country names with the format used in the map data"""
replace_dict = { 'Yemen, Republic' : 'Yemen' ,
'Venezuela, Repu' : 'Venezuela',
'Syrian Arab Rep' : 'Syria',
'Slovak Republic' : 'Slovakia',
'Congo, Democrat' : 'Democratic Republic of Congo',
'Congo, Republic' : 'Republic of Congo',
'Egypt, Arab Rep' : 'Egypt',
'Gambia, The' : 'The Gambia',
'Iran, Islamic R' : 'Iran',
'Kyrgyz Republic' : 'Kyrgyzstan',
'Korea, Republic' : 'Republic of Korea',
"Lao People's De" : "Lao PDR",
"Macedonia, form" : "Macedonia",
"Central African" : "Central African Republic"
}
data['country_name_standardized'] = data['country'].replace(replace_dict)
return data | 7a8a6aff24df41403f24c6861b171711513822ee | 40,174 |
import sys
def python_module_exists(module_name):
"""Checks that a Python module is importable, and return True or False.
"""
if module_name in sys.modules:
return True
try:
__import__(module_name)
return True
except ImportError:
return False | a08cb381aeaebc208ad871ea443202d497d305cf | 40,175 |
def s3_bucket_exists(session, name):
"""Test for existence of an S3 bucket.
Note that this method can only test for the existence of buckets owned by
the user.
Args:
session (Session): Boto3 session used to lookup information in AWS.
name (string): Name of S3 bucket.
Returns:
(bool): True if bucket exists.
"""
client = session.client('s3')
resp = client.list_buckets()
for bucket in resp['Buckets']:
if bucket['Name'] == name:
return True
return False | 8005ea3eb81a8ce34f2def9ea5430fda60cad65c | 40,176 |
def get_input(text):
"""Prompt text and return text write by the user."""
return input(text) | 0460eb23c349179c5607b86a1a647c3e1e95e856 | 40,177 |
import csv
def make_rows(f, attributes):
"""(f, attributes) -> None"""
with open(f, 'a', newline='') as file:
writer = csv.writer(file)
for row in attributes:
writer.writerow(row)
with open(f) as file:
data = file.read()
return data | f3ea3bac58e30db8364bcd0cd5c4e6a5fe41d67d | 40,178 |
def dedupe_dictlist(x_in, keys):
"""
Before:
>>>[
>>> {"ts": 123, "A": "A", "B": "B"},
>>> {"ts": 124, "A": "B", "B": "B"},
>>> {"ts": 125, "A": "A", "B": "B"}
>>>]
After:
>>>[
>>> {'A': 'B', 'ts': 124, 'B': 'B'},
>>> {'A': 'A', 'ts': 125, 'B': 'B'}
>>>]
:param x_in:
:param keys:
:return:
"""
return [v for _, v in {'_'.join([str(d.get(k, '')) for k in keys]): d for d in x_in}.items()] | 4b47b7cae455da61b4f7385eb12b25c02a17ad1c | 40,179 |
import requests
def get_user(id):
"""Function that returns a given user
Args:
id (int): ID Employee
Returns:
[user]: [User Data]
"""
url = 'https://jsonplaceholder.typicode.com/'
user = requests.get(url + 'users', params={'id': id}).json()
return user | f40ad8b32b8bcd1c3dba4858a6dcd30cd15d101b | 40,180 |
def get_free_indexdata(tensor_list, index_list):
""" Figure out the numbers, dims, qims and dirs of all the free
indices of the network that we want the eigenvalues of.
"""
inds = []
dims = []
qims = []
dirs = []
for t, l in zip(tensor_list, index_list):
for j, k in enumerate(l):
if k < 0:
inds.append(k)
dims.append(t.shape[j])
if t.qhape is not None:
dirs.append(t.dirs[j])
qims.append(t.qhape[j])
else:
dirs.append(None)
qims.append(None)
inds, dims, qims, dirs = zip(*sorted(zip(inds, dims, qims, dirs),
reverse=True))
return inds, dims, qims, dirs | a81d68adf070fdda66f1f3b41be15353961988f6 | 40,181 |
def get_positive_values(x):
"""Return a list of values v from x where v > 0."""
result = []
for _x in x:
if _x > 0:
result.append(_x)
else:
return result
return result | 5245dc17e361bbc93596a537553c7878ebbba947 | 40,183 |
def JD_to_MJD(JD):
"""Convert Julian Day (JD) to Modified Julian Day (MJD).
"""
return JD - 2400000.5 | ee4a12effe0b2cb1eaeeaf092a7ad509f0230711 | 40,186 |
import torch
def log_sum_exp(inputs, keepdim=False, mask=None):
"""Numerically stable logsumexp on the last dim of `inputs`.
reference: https://github.com/pytorch/pytorch/issues/2591
Args:
inputs: A Variable with any shape.
keepdim: A boolean.
mask: A mask variable of type float. It has the same shape as `inputs`.
Returns:
Equivalent of log(sum(exp(inputs), keepdim=keepdim)).
"""
if mask is not None:
mask = 1. - mask
max_offset = -1e7 * mask
else:
max_offset = 0.
s, _ = torch.max(inputs + max_offset, dim=-1, keepdim=True)
inputs_offset = inputs - s
if mask is not None:
inputs_offset.masked_fill_(mask.byte(), -float('inf'))
outputs = s + inputs_offset.exp().sum(dim=-1, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(-1)
return outputs | f4e417e6f1f408abf243b21ad644c736aed61343 | 40,189 |
def model_netradiationequivalentevaporation(lambdaV = 2.454,
netRadiation = 1.566):
"""
- Name: NetRadiationEquivalentEvaporation -Version: 1.0, -Time step: 1
- Description:
* Title: NetRadiationEquivalentEvaporation Model
* Author: Pierre Martre
* Reference: Modelling energy balance in the wheat crop model SiriusQuality2:
Evapotranspiration and canopy and soil temperature calculations
* Institution: INRA/LEPSE Montpellier
* Abstract: It is given by dividing net radiation by latent heat of vaporization of water
- inputs:
* name: lambdaV
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 10
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 2.454
** inputtype : parameter
** unit : MJ kg-1
** description : latent heat of vaporization of water
* name: netRadiation
** min : 0
** default : 1.566
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : state
** datatype : DOUBLE
** inputtype : variable
** unit : MJ m-2 d-1
** description : net radiation
- outputs:
* name: netRadiationEquivalentEvaporation
** min : 0
** variablecategory : auxiliary
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** datatype : DOUBLE
** unit : g m-2 d-1
** description : net Radiation in Equivalent Evaporation
"""
netRadiationEquivalentEvaporation = netRadiation / lambdaV * 1000.0
return netRadiationEquivalentEvaporation | ab1ab613bf9350a318b75e1a020663a0d5376dff | 40,190 |
def handle_extension(extensions, f):
"""
Returns a decoder handler function for the list of extensions.
Extensions can be a space separated list of extensions.
Extensions can contain dots, in which case the corresponding number
of extension components must be present in the key given to f.
Comparisons are case insensitive.
Examples:
handle_extension("jpg jpeg", my_decode_jpg) # invoked for any file.jpg
handle_extension("seg.jpg", special_case_jpg) # invoked only for file.seg.jpg
"""
extensions = extensions.lower().split()
def g(key, data):
extension = key.lower().split(".")
for target in extensions:
target = target.split(".")
if len(target) > len(extension):
continue
if extension[-len(target):] == target:
return f(data)
return None
return g | 76bcf48d30e0fd894eddafb53b580f8d6a1b06bf | 40,192 |
def IsProcessAlive(pid, ppid=None):
"""Returns true if the named process is alive and not a zombie.
A PPID (parent PID) can be provided to be more specific to which process you
are watching. If there is a process with the same PID running but the PPID is
not the same, then this is unlikely to be the same process, but a newly
started one. The function will return False in this case.
Args:
pid: process PID for checking
ppid: specified the PID of the parent of given process. If the PPID does
not match, we assume that the named process is done, and we are looking at
another process, the function returns False in this case.
"""
try:
with open('/proc/%d/stat' % pid) as f:
stat = f.readline().split()
if ppid is not None and int(stat[3]) != ppid:
return False
return stat[2] != 'Z'
except IOError:
return False | 0c1bff8c3c4109ee6ed3f95f64c9184739a98157 | 40,193 |
def selectivity_formula(counts):
"""Return selectivity
counts: dict of counts, containing at least TP, FP, and FP_COMP
"""
tp = counts['TP']
fp = counts['FP']
fp_comp = counts['FP_COMP']
if not tp and fp==fp_comp:
return 0.0
selectivity = tp/(tp + (fp - fp_comp))
return selectivity | fe8124708f271b4312708da0e9f4892bf01d595f | 40,194 |
def average_confidence(model):
"""Average for the confidence of the predictions resulting from
running the training data through the model
"""
if model.boosting:
raise AttributeError("This method is not available for boosting"
" models.")
total = 0.0
cumulative_confidence = 0
groups = model.group_prediction()
for _, predictions in list(groups.items()):
for _, count, confidence in predictions['details']:
cumulative_confidence += count * confidence
total += count
return float('nan') if total == 0.0 else cumulative_confidence | 8c6584d558e0cabd04328751554c52f82fd02f66 | 40,195 |
def des_deep_exposure_time(bands=''):
"""
Sample from the DES deep field exposure time distribution
"""
# using shallow exposure times
dist = {'g': 175, 'r': 150, 'i': 200, 'z': 400}
return [dist[b] for b in bands.split(',')] | dacff690673662fea1587aa00d4e4cdff3b5f3ed | 40,197 |
def forecast_lstm(model, batch_size, X):
"""
make a one-step forecast
:param model: the model
:param batch_size: the batch size
:param X: the X to generate predictions from
:return: the predicted Y
"""
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0, 0] | 485ae331802d8d894920ba8f3db2f853a0b86067 | 40,201 |
import os
def name_file(file_csv):
""" Return name of out file. Example: function('Downloads/myfile.txt')"""
name = os.path.basename(file_csv)
file_name = os.path.splitext(name)[0]
return file_name | 955d6199162c2d6739b572313273ff307b7906cc | 40,202 |
import subprocess
def is_bigpicture(windowid):
"""Check STEAM_BIGPICTURE property of window"""
xprop_proc = subprocess.run(
['xprop', '-id', windowid, 'STEAM_BIGPICTURE'], stdout=subprocess.PIPE)
return bool("STEAM_BIGPICTURE(CARDINAL) = 1" in xprop_proc.stdout.decode("utf-8")) | 6755a213e490f614123907e7babfe1772898f333 | 40,203 |
def ntime_func(dt, t_e):
"""Merit function to determine best time
Weighted for the number of objects in each step
return float
"""
y=0
for j in range(len(dt)):
i=dt[j]/t_e
y += abs(i-round(i))
return y | fef4c5f81527a0801182f95cd2ca9a9b37b34353 | 40,204 |
def parser(word, objconf, skip=False, **kwargs):
""" Parses the pipe content
Args:
word (str): The string to parse
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: substr)
stream (dict): The original item
Returns:
dict: The item
Examples:
>>> from meza.fntools import Objectify
>>>
>>> item = {'content': 'hello world'}
>>> conf = {'start': 3, 'length': 4}
>>> args = item['content'], Objectify(conf)
>>> kwargs = {'stream': item, 'conf': conf}
>>> parser(*args, **kwargs) == 'lo w'
True
"""
end = objconf.start + objconf.length if objconf.length else None
return kwargs['stream'] if skip else word[objconf.start:end] | 2b43796af7776be5bdca1aec2854f0c5b1c5b799 | 40,207 |
def get_link_for_post(page):
"""
Search for link to post data in the page
:param page: The code of the page for search in it
:return: The link for post method or None
"""
page = str(page).replace("\\n", "\n")
# stage 1 - search for form-urlencoded
for line in page.strip().split('\n'):
# this is the post line
if 'application/x-www-form-urlencoded' in line:
tokens = line.strip().split(" ")
for t in tokens:
if 'action' in t and len(t) > 10:
return t.split(">")[0][8:-1]
# stage 2 - search for any post method
for line in page.strip().split('\n'):
# this is the post line
if 'method=\"post\"' in line.lower():
tokens = line.strip().split(" ")
for t in tokens:
if 'action' in t and len(t) > 10:
return t.split(">")[0][8:-1]
return None | 449b6de90359ef45ee320bd180f0bf707d9517eb | 40,208 |
def recursive_check(task, attr="rerun"):
"""Check if a task or any of its recursive dependencies has a given attribute set to True."""
val = getattr(task, attr, False)
for dep in task.deps():
val = val or getattr(dep, attr, False) or recursive_check(dep, attr)
return val | b85773b4dcadb20b97e2777c6736654bb1b72957 | 40,209 |
def interp_n2(t, x, y):
"""
Interpolation function for N * 2 value arrays.
Parameters
----------
t : float
Point for which the interpolation is calculated
x : 1-d array with two values
x-axis values
y : 2-d array with size N-by-2
Values corresponding to x
Returns
-------
N-by-1 array
interpolated values at `t`
"""
return y[:, 0] + (t - x[0]) * (y[:, 1] - y[:, 0]) / (x[1] - x[0]) | ee4da6eebcdb6c686a82966a2511f76d511b8e9f | 40,210 |
def clean_up(entry,replace_newlines=True):
"""Clean up field.
Any empty field is converted from None to a null string.
Within each field, leading/trailing whitespace are stripped
(typically stray trailing spaces or newlines), and internal
newlines are replaced with vertical bars.
Limitations: The replace_newlines option only applies to string
values, not lists of strings.
Arguments:
entry (str or list or None): value to be cleaned up
replace_newlines (boolean, optional) : whether or not to replace newlines in entries
Returns:
(str or list or None): cleaned-up value
"""
if (entry is None):
# case: empty field, read as None
cleaned = None
elif (type(entry) is list):
# case: entries in trailing columns aggregated into list
# handle recursively
cleaned = list(map(clean_up,entry))
else:
cleaned = entry.strip()
if (replace_newlines):
cleaned = cleaned.replace("\n"," | ")
return cleaned | 4e55b0a0799219ee1c81921630adb7908960b1db | 40,211 |
def flat_multi(multidict):
"""
Flattens any single element lists in a multidict.
Args:
multidict: multidict to be flattened.
Returns:
Partially flattened database.
"""
flat = {}
for key, values in multidict.items():
flat[key] = values[0] if type(values) == list and len(values) == 1 \
else values
return flat | 8f72545ca72611756ca1c0e68ffd1ca1664bf2ee | 40,213 |
import struct
import hashlib
def PBEWithMD5AndTripleDES_derivation(password, salt, iterations):
"""Compute a key and iv from a password and salt according to PBEWithMD5AndTripleDES"""
# Derivate the key and iv from the password and salt, in a weird way
assert len(salt) == 8
salt_halves = [salt[:4], salt[4:]]
if salt_halves[0] == salt_halves[1]:
# Invert the first half of the salt, with a typo in the algorithm
# for (i=0; i<2; i++) {
# byte tmp = salt[i];
# salt[i] = salt[3-i];
# salt[3-1] = tmp; // <-- typo '1' instead of 'i'
# }
s0, s1, s2, s3 = struct.unpack('BBBB', salt_halves[0])
salt_halves[0] = struct.pack('BBBB', s3, s0, s1, s3)
password_bytes = password.encode('ascii')
key_and_iv = b''
for salt_half in salt_halves:
last_value = salt_half
for _ in range(iterations):
last_value = hashlib.md5(last_value + password_bytes).digest()
key_and_iv += last_value
assert len(key_and_iv) == 32
key = key_and_iv[:24]
iv = key_and_iv[24:]
return key, iv | 88f52c94aba562d2c30a5427c89bdd8cc273eb4a | 40,214 |
def magic_index(a): # O(n)
"""Determine the magic index of a."""
for index, num in enumerate(a):
if index == num:
return index | d8f329454f01d4c72b0310a05f7b979a9cba4dc9 | 40,215 |
import os
import glob
def prepare_data(dataset="Train",Input_img=""):
"""
Args:
dataset: choose train dataset or test dataset
For train dataset, output data would be ['.../t1.bmp', '.../t2.bmp',..., 't99.bmp']
"""
if dataset == "Train":
data_dir = os.path.join(os.getcwd(), dataset) # Join the Train dir to current directory
data = glob.glob(os.path.join(data_dir, "*.png")) # "*.bmp")) # make set of all dataset file path
else:
if Input_img !="":
data = [os.path.join(os.getcwd(),Input_img)]
else:
data_dir = os.path.join(os.path.join(os.getcwd(), dataset), "Set5")
data = glob.glob(os.path.join(data_dir, "*.png")) #"*.bmp")) # make set of all dataset file path
return data | 26bdc5d01e28e2f7727b712c3d88d1bc4235db50 | 40,217 |
def add(a: int, b, c=5, d=7., e=None):
"""Some cool addition.
It's super complicated.
You know, adding and stuff.
Parameters
----------
b : int
This is the second complicated parameter
super complicated
e : int, optional
"""
if e is None:
e = 0
return a + b + c + d + e | 409bfd8823000324d4a4cbaa22e2530c4e6b8f76 | 40,219 |
import pydoc
import logging
def load_by_path(path):
"""Load functions or modules or classes.
Args:
path: path to modules or functions or classes,
such as: tf.nn.relu
Return:
modules or functions or classes
"""
path = path.strip()
if path == '' or path is None:
return None
components = path.split('.')
if components[0] == 'tf':
components[0] = 'tensorflow'
path = '.'.join(components)
try:
return pydoc.locate(path)
except pydoc.ErrorDuringImport:
logging.error('load %s failed' % path)
return None | b878b7b91d6885778c97c7f5cdbb0039e58176c1 | 40,220 |
import platform
import os
def manage_scan_directory():
"""
Creates a directory for storing the scan results and returns its filepath
:return: (str) The filepath of the scanLogs directory
"""
# Checks the devices operating system and creates a formatted String
if platform.system() == "Windows":
scan_dir = "\\scanLogs\\"
else:
scan_dir = "/scanLogs/"
# Checks if the directory already exists
if 'scanLogs' not in os.listdir(os.getcwd()):
# Takes the current files path and adds the directory string to create the new directory here
os.mkdir(os.path.join(os.getcwd() + scan_dir))
# Returns the newly created directory
return os.path.join(os.getcwd() + scan_dir) | 38b406e3d3f4cb2053d30710ef4cc09f9fd3110e | 40,221 |
def config_line(setting, value):
"""
Generate a single configuration line based on the setting and value
Parameters
----------
setting : str
The configuration setting
value : str
The value for the configuration setting
Returns
-------
str
The configuration line based on the setting and value
"""
if setting in [
'appendfilename', 'dbfilename', 'dbdir', 'dir', 'pidfile', 'unixsocket'
]:
value = repr(value)
return '{setting} {value}'.format(setting=setting, value=value) | 9999b1f49f5bce7c37ae1958f48eb0958218393a | 40,222 |
def analysis_dataset_config_for_message(analysis_dataset_configs, message):
"""
Gets the analysis dataset configuration to use to process this message, by looking-up the configuration that refers
to this message's engagement db "dataset" property.
:param analysis_dataset_configs: Dataset configurations to search for the one that relates to the given message.
:type analysis_dataset_configs: list of src.engagement_db_to_analysis.configuration.AnalysisDatasetConfiguration
:param message: Message to retrieve the analysis dataset configuration for.
:type message: engagement_database.data_models.Message
:return: Analysis dataset configuration to use for this message.
:rtype: src.engagement_db_to_analysis.configuration.AnalysisDatasetConfiguration
"""
for config in analysis_dataset_configs:
if message.dataset in config.engagement_db_datasets:
return config
raise ValueError(f"No analysis dataset configuration found for message '{message.message_id}', which has engagement"
f"db dataset {message.dataset}") | 983ea4428ff305a05eca65c5cc0fc062ee2e6801 | 40,224 |
def nxz(PAxz,PBxz,Npulse,P_times_Dj):
"""
Calculates the number of events in the X or Z sifted basis per pulse
intensity per time slot.
nx[j,t] or nz[j,t]; j = {1:3}, t = {1:Nt}
Parameters
----------
PAxz : float
Probability of Alice preparing a state in the X/Z basis.
PBxz : float
Probability of Bob measuring a state in the X/Z basis.
Npulse : integer/float
Number of pulses sent by Alice.
P_times_Dj : float, array
The element-wise multiplication of the intensity probability array P
with the expected detection rate per time slot array Dj.
Returns
-------
float, array
The number of events in the sifted X/Z basis.
"""
return PAxz*PBxz*Npulse*P_times_Dj | 3a51cc162a294ec4071341b01734046604ebdeae | 40,226 |
import math
def gamma_0(m, rho):
"""
See p124 immediately after Eq 2.16.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
term1 = 0.24
term2 = (1 - rho) * (m - 1) * (math.sqrt(4 + 5 * m) - 2 ) / (16 * m * rho)
return min(term1, term2) | 1e24a151530d1cc2157c386fb4748447788465e4 | 40,230 |
def get_Q(Q_dash):
"""熱損失係数
Args:
Q_dash(float): 熱損失係数(換気による熱損失を含まない)(W/m2K)
Returns:
float: 熱損失係数
"""
return Q_dash + 0.35 * 0.5 * 2.4 | 52aa8f8e888e8897f1c68b7d99d26a497b0bf9c5 | 40,231 |
from datetime import datetime
import os
def file_creation_date(file):
""" returns date and time of creation of the given file.
works on windows"""
return datetime.fromtimestamp(
int(os.path.getmtime(file))).strftime('%Y-%m-%d-%H.%M.%S') | 27413ece58e03c9f26a05443ac1ac264d78e31d0 | 40,232 |
def convert_quat_wxyz_to_xyzw(quat):
"""Convert quaternion from wxyz to xyzw representation."""
return quat[..., [1, 2, 3, 0]] | ed02298c4f7ff61297fdcb6f40aa394045b9a812 | 40,233 |
def yaml(path, _pod):
"""Retrieves a yaml file from the pod."""
return _pod.read_yaml(path) | ca610b1b916a83a535e578e3b6f102c36f0ed4af | 40,234 |
def repl_func(matchobj, group_selector: str) -> str:
"""Pick replacement from match object."""
if matchobj.group(0) != "":
return matchobj.group(group_selector)
return "" | 3f65d1db813b71dcc1e64f7657cb1df8c415017a | 40,235 |
def ParseNinjaDepsOutput(ninja_out, out_dir, skip_obj):
"""Parse ninja output and get the header files"""
all_headers = {}
# Ninja always uses "/", even on Windows.
prefix = '../../'
is_valid = False
obj_file = ''
for line in ninja_out:
if line.startswith(' '):
if not is_valid:
continue
if line.endswith('.h') or line.endswith('.hh'):
f = line.strip()
if f.startswith(prefix):
f = f[6:] # Remove the '../../' prefix
# build/ only contains build-specific files like build_config.h
# and buildflag.h, and system header files, so they should be
# skipped.
if f.startswith(out_dir) or f.startswith('out'):
continue
if not f.startswith('build'):
all_headers.setdefault(f, [])
if not skip_obj:
all_headers[f].append(obj_file)
else:
is_valid = line.endswith('(VALID)')
obj_file = line.split(':')[0]
return all_headers | c07965aec983429c7b19de36ff6e267a30e2965e | 40,236 |
import time
def test_args(a, b= '2'):
""" Dummy *args test function """
time.sleep(a/2)
return str(a) + str(b) | 3d88b7302c5a9bf6dc05dec231126fdfdb59546c | 40,237 |
def snake_case_to_title(string):
""" A convenience function that converts a snake-cased function name into title case
:param str string: A string encoded using snake case
:return: A converted string
"""
return string.replace('_', ' ').title() | 9229ebbfbe48fd13da17616a9b81dae01cfcb303 | 40,238 |
def 取最大数(数值列表):
"""
传入要对比的列表,如(1,2,3),返回里面最大的数字
:param 数值列表: (1,2,3)
:return: 3
"""
return max(数值列表) | 111eba5c9c37e7656ca410447e1ac5a3f0041ea7 | 40,239 |
def q(ser, cmd, wait=True):
"""query"""
ser.flushInput()
ser.write(cmd.encode())
if wait:
return ser.readline().decode()
return None | 97ee88fcafaa6291cd02c1229347b815fcaf34c1 | 40,240 |
import os
def is_hardlinked_replica(dir1: str, dir2: str) -> bool:
"""Returns True if directories have same hard-linked files."""
for walk1, walk2 in zip(os.walk(dir1), os.walk(dir2)):
root1, dirs1, files1 = walk1
root2, dirs2, files2 = walk2
files1.sort()
files2.sort()
if files1 != files2:
return False
# With topdown=True (default for os.walk), sorting also ensures that the
# traversal order is same. Otherwise it technically depends on the file
# system.
dirs1.sort()
dirs2.sort()
if dirs1 != dirs2:
return False
for file1, file2 in zip(files1, files2):
inode1 = os.lstat(os.path.join(root1, file1)).st_ino
inode2 = os.lstat(os.path.join(root2, file2)).st_ino
if inode1 != inode2:
return False
return True | c87f1cbc0cc626adb4e8992a9162a16e2050538d | 40,244 |
def _(n):
"""Single dispatch for Integrals."""
return f'{type(n)} numbers. Integral single dispatch.' | ceb66935608801b3204b1b31cba77b83ae4ea921 | 40,246 |
def ucc(graph):
"""Identify connected components in undirected graph.
Examples:
>>> ucc(graph)
{1: ['A', 'B', 'C', 'E', 'D', 'F', 'G'], 2: ['H', 'I', 'J'], 3: ['K']}
"""
visited = []
# Group each vertex with a component.
components = {}
# Component marker
num_cc = 0
for v in graph:
if v not in visited:
num_cc += 1
components[num_cc] = [v]
# BFS
q = [v]
visited.append(v)
while q:
current = q.pop(0)
for a in graph[current]:
if a not in visited:
visited.append(a)
q.append(a)
components[num_cc].append(a)
return components | 60688d8c4bc025e4cb12bbfff08c07cc29a611bd | 40,247 |
import datetime
def epoch_to_datetime(value):
"""
Converts epoch(unix) time into python datatime
"""
# return datetime.datetime.fromtimestamp(value)
return datetime.datetime.fromtimestamp(value).strftime('%c') | 5e725f3b6e43828fc08e9fb68ba3ce6db13ae8b6 | 40,248 |
import random
def random_number(bits):
"""Generate a random integer that will cleanly fit in a number of bits."""
max, min = 2**bits - 1, 2**(bits - 1)
return random.randint(min, max) | 89e7f167bc7af35b7193a7c148f863fd1e185a74 | 40,251 |
def check_ALL_DS(DS_ES_X_Map):
"""
ES used with ALL as DS can not be used with any other DS.
This function checks if this is true.
"""
ES_with_ALL = [row[1] for row in DS_ES_X_Map if row[0] == "ALL"]
ES_without_ALL = [ES for ES in ES_with_ALL
for row in DS_ES_X_Map if row[0] != "ALL"]
return len(ES_without_ALL) == 0 | 58b2f2fd4a4a1f20bba74aa6150d91169a4a9695 | 40,252 |
def count_chars(text):
"""Scrieti o functie care primeste ca parametru un sir de caractere.
Returneaza un dictionar in care cheile sunt caracterele dn componenta sirului de caractere,
iar valorile sunt reprezentate de numarul de aparitii ale caracterului in text.
Exemplu: Pentru sirul "Ana are mere.":
{'A': 1, ' ': 2, 'n': 1, 'a': 2, 'r': 2, 'e': 3, 'm': 1, '.': 1}.
"""
uniquechars = set(text)
output = {}
for char in uniquechars:
output[char] = text.count(char)
return output | bebb3c41af20a316b5c1f28b53e7c6e14007064f | 40,254 |
def default_zero(input):
"""
Helper function that returns the input, replacing anything Falsey
(such as Nones or empty strings) with 0.0.
"""
return round(input if input else 0.0, 2) | d5c06c9f0449277e5fc2b8083335fa7e35595305 | 40,256 |
def get_opacities(opacity):
"""
Provide defaults for all supported opacity settings.
"""
defaults = {
'wireframe' : 0.05,
'scalar_cut_plane' : 0.5,
'vector_cut_plane' : 0.5,
'surface' : 1.0,
'iso_surface' : 0.3,
'arrows_surface' : 0.3,
'glyphs' : 1.0
}
if isinstance(opacity, dict):
opacities = opacity
default = None
else:
opacities = {}
default = opacity
for key in ['wireframe', 'scalar_cut_plane', 'vector_cut_plane',
'surface', 'iso_surface', 'arrows_surface', 'glyphs']:
if default is None:
opacities.setdefault(key, defaults[key])
else:
opacities.setdefault(key, default)
return opacities | 6066100d9dcafeede19ff2a8edfd8807fed29566 | 40,257 |
def unquote_colors(context):
"""
URL unqote colors from context.
"""
for k, v in context.items():
if len(v) == 9 and v.startswith("%23"):
context[k] = "#" + v[3:]
return context | 040fa15b3f480db7762128b08d0e053a83697f50 | 40,258 |
def split_name(name):
"""
Split a name into two names. If there is only one name, the last name will be
empty. If there are more than two, the extra names will be appended to the last
name.
Args:
name (str): A name to split into first name, last name
Returns:
tuple: (first, last)
"""
if name is None:
return "", ""
names = name.split(maxsplit=1)
if len(names) == 0:
return "", ""
else:
return names[0], " ".join(names[1:]) | e399cc06f780a2c0139de93c1070a50f4224b38d | 40,260 |
import os
import json
def save_radarPass_object_json(radar_pass, data_path):
"""Saves a radarPass instance to a json file with a standard filename format."""
pass_timestamp = radar_pass.cloudsat['timestamp'][len(radar_pass.cloudsat['timestamp'])//2]\
.strftime('%Y%m%d_%H%M%S')
fname = 'radarPass_plot_'+pass_timestamp+'.json'
radar_dict = radar_pass.get_json_serializable_obj()
radar_dict['timestamp'] = [tstamp.strftime(
'%Y%m%d_%H%M%S.%f') for tstamp in radar_dict['timestamp']]
with open(os.path.join(data_path, 'radar_passes', fname), 'w') as f:
json.dump(radar_dict, f)
return pass_timestamp | 12497ceca9793740ef8523166f35835a3c495671 | 40,261 |
def _GetAllBuildersInConfig(trybot_config):
"""Returns a list of all variable builders referenced in config."""
all_config_builders = set()
for builders in trybot_config.itervalues():
for builder in builders.itervalues():
waterfall_builder = builder.get('waterfall_trybot')
flake_builder = builder.get('flake_trybot')
all_config_builders.add(waterfall_builder)
all_config_builders.add(flake_builder)
return list(all_config_builders) | 1796a221aebcb9724568e49bde781bc568f867a1 | 40,262 |
def filter_process_by_name(name):
"""Filter process by process name."""
if name in ["cp", "tee", "date", "Null", "recon-all"]:
return True
return False | 2b33d14e7e1bd3e6e09ce8af97a90e01d10dbe59 | 40,263 |
def net_longwave_up(tC_water, total_longwave_down):
"""
Description:
Calculates the net upward longwave radiation flux. Note, this routine
by itself does not calculate the related L2 data product NETLIRR, which
specifically requires input sea surface skin temperatures corrected for
warmlayer and coolskin effects.
Implemented by:
2014-09-01: Russell Desiderio. Initial code.
Usage:
Rnl = net_longwave_up(tC_water, total_longwave_down)
where
Rnl = net longwave radiation [W/m^2]; positive in the upward direction
tC_water = water temperature [deg_C]
total_longwave_down = measured downward longwave radiation (LONGIRR_L1)
[W/m^2]; positive in the downward direction.
eps(ilon) is the blackbody emissivity; a loose application of Kirchoff's
thermal radiation law sets the IR reflection coefficient as (1 - eps).
total longwave radiation down = IR
reflected longwave radiation up = (1-eps) * IR
blackbody energy radiated up from sea surface = eps * sigma * Tsea^4
Rnl(net upward) = IR_up - total_IR_down
= eps*sigma*T^4 + (1-eps)*IR - IR
= eps * (sigma*T^4 - IR)
References:
OOI (2014). Data Product Specification for L2 BULKFLX Data Products.
Document Control Number 1341-00370.
https://alfresco.oceanobservatories.org/ (See: Company Home >>
OOI >> Controlled >> 1000 System Level >>
1341-00370_Data_Product_Spec_BULKFLX_OOI.pdf)
"""
sigma = 5.67e-8 # Stefan-Boltzmann constant [W/(m^2 K^4)]
eps = 0.97
c2k = 273.15 # degC to kelvin conversion constant
Rnl = eps * (sigma * (tC_water + c2k) ** 4 - total_longwave_down)
return Rnl | 9295558e13a8e5cc251f4960d6d28e846f46ec52 | 40,264 |
import os
def dropexts(filename, sep = os.path.sep, extsep = os.path.extsep):
"""Remove all chained extensions from a file name
>>> dropexts("/home/marcin.jan/ala.ma.kota.txt")
'/home/marcin.jan/ala'
>>> dropexts("/home/marcin.jan/ala")
'/home/marcin.jan/ala'
"""
name = filename.rsplit(sep, 1)[-1]
cut = name.find(extsep)
if cut < 0: return filename
extpos = len(filename) - len(name) + cut
return filename[:extpos] | 788f4ecf161bfb8299222de781562ae9063899fd | 40,265 |
def insertIndexes(a, b):
"""Inserts 0 in list a at all indexes in list b"""
for x in range(0, len(b)):
a.insert(b[x], 0)
return() | ac9237fec320a951315ebf88b8d1ed9f4aa9833f | 40,266 |
import time
def _CalculatePastDate(days_ago, now=None):
"""Calculates the timestamp N days ago from now."""
if now is None:
now = int(time.time())
ts = now - days_ago * 24 * 60 * 60
return ts | 1a0affad807e1380dbb61a093cbc956dd3e86107 | 40,267 |
def assert_uniqueness_clause(property: str, node: str = 'node') -> str:
"""
Returns the *part* of a statement that ensures a property of a node is unique.
Parameters
----------
property : str
Name of the mean-to-be-unique property
node : str, optional
Name of the node (coming from other statement)
Returns
-------
out: str
Neo4j statement
"""
return f"ASSERT {node}.`{property}` IS UNIQUE" | 2433dfe24df0b58264387310b7875ce78ea7b3ac | 40,268 |
def findLinker(seq, linker):
"""
Match the linker in the read sequence.
"""
pos = -1
for i in range(len(seq) - 9):
seed = seq[i:i + 9]
if linker.startswith(seed):
pos = i
break
return pos | 93e767552d289a004eb10385d15e46ad90e785f3 | 40,269 |
import re
def get_labels(labels_args):
"""
Function: get_labels.
description
Args:
labelsArgs - [type/description]
Returns:
return description
"""
if not labels_args:
return None
labels = {}
for label in labels_args.split(','):
if re.search('=', label):
(name, value) = label.split('=')
labels[name] = value
return labels | 2ab620874d428ab446cd6512a0bbc6322f7cdf3c | 40,270 |
def split_lengths_for_ratios(nitems, *ratios):
"""Return the lengths of the splits obtained when splitting nitems
by the given ratios"""
lengths = [int(ratio * nitems) for ratio in ratios]
i = 1
while sum(lengths) != nitems and i < len(ratios):
lengths[-i] += 1
i += 1
assert sum(lengths) == nitems, f'{sum(lengths)} != {nitems}\n{ratios}'
return lengths | 8d2ccdd028afe74309d955e3a3a6741c87411b0b | 40,271 |
def doc_delete(col, cond):
"""
Only use delete_one()
:param col: Collection object
:param cond: Condition
:return:
"""
result = col.delete_one(cond)
return "Deleted:", result.deleted_count | 1071675844055d3bf258a2e677a58152cedcbcda | 40,273 |
def filter_corpus_category(corpus, category):
"""Returns only corrections with a defined category from the full corpus format.
Args:
corpus (dict): Corpus dictionary, loaded with 'load_enelvo_format_full'.
category (str): Selected category.
Returns:
list A list of tuples with format (noisy_word,correction) if noisy_word belongs to ``category``.
"""
corrs = []
for i in corpus:
for e in corpus[i]["errs"]:
if e["cat"] == category:
corrs.append((e["word"], e["corr"]))
return corrs | e0fd35aaad9d0af429e21686fe56fcfdcf651533 | 40,274 |
def to_date_obj(date):
""" Transforms a date into an obj """
obj = {}
obj['year'] = date.year
obj['month'] = date.month
obj['day'] = date.day
obj['hour'] = date.hour
obj['minute'] = date.minute
obj['second'] = date.second
obj['microsecond'] = date.microsecond
obj['tzinfo'] = str(date.tzinfo)
return obj | 34fcc0234a76c4644dc13ada139d11c0559ba4ea | 40,276 |
def read(name):
"""
Read file in local current working directory and return the contents
:param name: The name of the file
:type name: string
:returns: string -- Contents of file
"""
return open(name).read() | 243f3e0c5818e3e498d3be14a5dea0a59495e417 | 40,278 |
def get_range_score(weighted_node, biz_count, proj_count):
"""
get range_socre and normalized_range_score(0~99.99)
:param weighted_node:
:param biz_count:
:param proj_count:
:return:
"""
range_score = round((weighted_node + biz_count + proj_count), 2)
a = 1 if weighted_node / 32.0 > 1 else weighted_node / 32.0
b = 1 if biz_count / 4.0 > 1 else biz_count / 4.0
c = 1 if proj_count / 7.0 > 1 else proj_count / 7.0
normalized_range_score = (
round((a + b + c) / 3.0 * 100, 2)
if round((a + b + c) / 3.0 * 100, 2) < 100
else 99.9
)
return range_score, normalized_range_score | af9113a05903817c97f0e89c7fc56c73b505592d | 40,279 |
import re
def clean_documentation_for_cli(doc, cleandoc):
"""
Cleans the documentation before integrating
into a command line documentation.
@param doc documentation
@param cleandoc a string which tells how to clean,
or a function which takes a function and
returns a string
The function removes everything after ``.. cmdref::`` and ``.. cmdreflist``
as it creates an infinite loop of processus if this command
is part of the documentation of the command line itself.
"""
for st in ('.. versionchanged::', '.. versionadded::',
'.. cmdref::', '.. cmdreflist::'):
if st in doc:
doc = doc.split(st)[0]
if isinstance(cleandoc, (list, tuple)):
for cl in cleandoc:
doc = clean_documentation_for_cli(doc, cl)
return doc
else:
if isinstance(cleandoc, str):
if cleandoc == 'epkg':
reg = re.compile('(:epkg:(`[0-9a-zA-Z_:.*]+`))')
fall = reg.findall(doc)
for c in fall:
doc = doc.replace(c[0], c[1].replace(':', '.'))
return doc
elif cleandoc == 'link':
reg = re.compile('(`(.+?) <.+?>`_)')
fall = reg.findall(doc)
for c in fall:
doc = doc.replace(c[0], c[1].replace(':', '.'))
return doc
else:
raise ValueError( # pragma: no cover
"cleandoc='{0}' is not implemented, only 'epkg'.".format(cleandoc))
elif callable(cleandoc):
return cleandoc(doc)
else:
raise ValueError( # pragma: no cover
"cleandoc is not a string or a callable object but {0}".format(type(cleandoc))) | 096f8b3df7487b824d1bd42f554ea397ffb2abc9 | 40,280 |
import json
def _dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""为simplejson增加datetime, date类型转换支持
datetime类型转换为时间戳
date类型转换为'%Y-%m-%d'格式的字符串
"""
return json.dumps(obj, skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular,
allow_nan=allow_nan, cls=cls, indent=indent, separators=separators,
encoding=encoding, **kw) | 1ffa1effeafb9623f99bdefe1bf308a1c5a629a7 | 40,281 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.