content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def get_service_status(dut, service):
"""
@summary: Get the ActiveState and SubState of a service. This function uses the systemctl tool to get the
ActiveState and SubState of specified service.
@param dut: The AnsibleHost object of DUT. For interacting with DUT.
@param service: Service name.
@return: Returns a dictionary containing ActiveState and SubState of the specified service, for example:
{
"ActivateState": "active",
"SubState": "running"
}
"""
output = dut.command("systemctl -p ActiveState -p SubState show %s" % service)
result = {}
for line in output["stdout_lines"]:
fields = line.split("=")
if len(fields) >= 2:
result[fields[0]] = fields[1]
return result
|
ee7bfa6f2ea5f8bc9314d7f2462992bde8c1e675
| 140,032
|
from pathlib import Path
def get_directory(infile):
"""
Get directory of infile (return str).
"""
directory = str(Path(infile).parents[0]) + '/'
return directory
|
5fce103b04a5024a371d29212501d5dae407f60f
| 492,119
|
def heat_flux_to_temperature(heat_flux: float, exposed_temperature: float = 293.15):
"""Function returns surface temperature of an emitter for a given heat flux.
:param heat_flux: [W/m2] heat flux of emitter.
:param exposed_temperature: [K] ambient/receiver temperature, 20 deg.C by default.
:return temperature: [K] calculated emitter temperature based on black body radiation model.
"""
epsilon = 1.0 # radiation view factor
sigma = 5.67e-8 # [W/m2/K4] stefan-boltzmann constant
# E_dash_dash_dot = epsilon * sigma * (T_1 ** 4 - T_0 ** 4) # [W/m2]
return ((heat_flux / sigma / epsilon) + exposed_temperature ** 4) ** 0.25
|
bb6fc94e8468859d135de264754a1b41ccf8048d
| 110,746
|
def get_http_host(request):
"""Obtener url con http|https mas dominio.
El dominio es el obtenido de request.get_host().
Args:
request (HttpRequest)
Returns:
str: Un string con el protocol y el domain http(s)://example.com/.
"""
return '{}://{}'.format('https' if request.is_secure() else 'http', request.get_host())
|
5891b638fc1192eab366980b893636a3354ed2d0
| 487,231
|
def flattenDict(dict):
"""
Takes a dictionary with aggregated values and turns each value into a key
with the aggregated key (from dict) as the corresponding value.
"""
flat_dict = {p: g for g, sublist in dict.items() for p in sublist}
return flat_dict
|
dca39ff54b6026c119a7adcc2314c33cffc91158
| 118,295
|
def dict2tsv(condDict):
"""Convert a dict into TSV format."""
string = str()
for i in condDict:
string += i + "\t" + "{%f, %f}" % condDict[i] + "\n"
return string
|
c73f8e3158ade699cc4589d541f05397f559d190
| 11,129
|
import hashlib
def calculate_hash(file_bytes):
""" Will calculate the sha256 digest of the bytes provided. """
m = hashlib.sha256()
m.update(file_bytes)
return m.digest()
|
be1a8cad6aa8a5748013a76e731d75dfd5af504e
| 150,831
|
import re
def regex_token_replace(sentence: str, token: str, replacement: str) -> str:
"""
replace all occurrences of a target token with its replacement
:param sentence: input sentence to be modified
:param token: target token to be replaced
:param replacement: replacement word for the target token
:return: sentence with the all occurrences of the target token substituted by its replacement
"""
replace_map = [
[token, replacement],
[token.capitalize(), replacement.capitalize()],
[token.upper(), replacement.upper()],
]
for j in range(len(replace_map)):
pattern = re.compile(
r"\b{}\b".format(replace_map[j][0])
) # \b indicates a word boundary in regex
sentence = re.sub(pattern, replace_map[j][1], sentence)
return sentence
|
ac63e2a85be1b4f05b98be8b6f42d1eb8f334961
| 685,515
|
import re
def _scientific_notation(value, sci_lims, fmt):
"""Converts value to scientific notation
Parameters
----------
value : float
Value to process
sci_lims : (int, int)
See description in :py:func:`write_result`
fmt : 'txt', 'pdf', or 'csv'
File format.
"""
# If user speicifed to never user scientific notation, or the value
# does not have a sufficiently high exponent, or the file type is a csv,
# simply return the value unedited, as a string.
if (sci_lims is None or
(abs(value) < 10 ** sci_lims[1] and
abs(value) >= 10 ** (sci_lims[0])) or
fmt == 'csv'):
return str(value)
# Convert to scientific notation
# Regex is used to remove any trailing zeros, ie:
# 12345000e8 -> 12345e8
value = re.sub(
r'0+e', 'e', f'{value:e}'.replace('+0', '').replace('-0', '-'),
)
if fmt == 'txt':
return value
# For LaTeX, convert to \num{value}
# This is a siunitx macro which takes a value of the form
# 1234e5 and converts to 1234x10⁵
elif fmt == 'pdf':
return f'\\num{{{value}}}'
|
b46986f677e4f9b6e8ce76fac2f4314e10cabc4d
| 477,317
|
from typing import List
import re
def filter_paths(paths: List[str], filter_regex: str) -> List[str]:
"""
Filters paths from the given list.
Args:
paths: List of paths to be filtered.
filter_regex: Regex to be used for filtering paths.
Returns:
List of path that satisfy regex.
"""
pattern = re.compile(filter_regex)
return [path for path in paths if (not pattern or bool(pattern.search(path)))]
|
f690b75689b6f712f0a25c26bea0eefa158bde2d
| 334,742
|
def is_completions_display_value(x):
"""Enumerated values of ``$COMPLETIONS_DISPLAY``"""
return x in {"none", "single", "multi"}
|
caddb44cbd13de654ae4108f794c37e53f9e89ce
| 359,785
|
def validate_reponse_with_serializer(serialized_obj, response):
"""
Often times we wish to test a JSON response to make sure it matches
expected serialization. This utility function takes in such a
response and the serialization and returns true or false depending
on if all fields match.
"""
if (len(response) != len(serialized_obj)):
return False
for k in serialized_obj:
if not k in response:
return False
if serialized_obj[k] != response[k]:
return False
return True
|
e98ec2108e9f2fa048579070cf71bb41e0cb620c
| 129,579
|
def get_optime(mc):
""" Get optime of primary in the replica set.
Changed in version 3.2.
If using protocolVersion: 1, optime returns a document that contains:
- ts, the Timestamp of the last operation applied to this member of the replica set from the oplog.
- t, the term in which the last applied operation was originally generated on the primary.
If using protocolVersion: 0, optime returns the Timestamp of the last operation applied to this member of the replica set from the oplog.
Refer to https://docs.mongodb.com/manual/reference/command/replSetGetStatus/
"""
rs_status = mc['admin'].command({'replSetGetStatus': 1})
members = rs_status.get('members')
if not members:
raise Exception('no member in replica set')
for member in rs_status['members']:
role = member.get('stateStr')
if role == 'PRIMARY':
optime = member.get('optime')
if isinstance(optime, dict) and 'ts' in optime: # for MongoDB v3.2
return optime['ts']
else:
return optime
raise Exception('no primary in replica set')
|
77b53dfb54fcda155232d14711c97c11cab888de
| 148,465
|
def mutate(df, **kwargs):
"""
Creates new variables (columns) in the DataFrame specified by keyword
argument pairs, where the key is the column name and the value is the
new column value(s).
Args:
df (pandas.DataFrame): data passed in through the pipe.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> mutate(x_plus_y=X.x + X.y) >> select_from('x') >> head(3)
x y z x_plus_y
0 3.95 3.98 2.43 7.93
1 3.89 3.84 2.31 7.73
2 4.05 4.07 2.31 8.12
"""
return df.assign(**kwargs)
|
874fec9cc6d756c57b9002fcb7c5c3131a29a9a8
| 67,588
|
def import_cytoband(file, target, delimiter='\t'):
"""
Returns the genomic region from the cytoband composite file
for a given target
"""
bp_chr = list()
bp_coords = list()
with open(file, 'r') as fh:
for line in fh:
fields = line.rstrip().split()
if len(fields) != 6:
continue
(chrom, start, end, _, _, key) = fields
start = int(start)
end = int(end)
# handle ambiguous regions
if key.find(target) != -1:
bp_chr.append(chrom)
bp_coords.append(start)
bp_coords.append(end)
bp_coords = sorted(bp_coords)
chrom = bp_chr[0]
start = bp_coords[0]
end = bp_coords[-1]
region = {"chrom" : chrom, "start": start, "end": end}
return region
#return f'{chrom}:{start}-{end}'
|
200a56dbe6f3a1c92d8a78ecfec7f7a319bfb1f6
| 278,701
|
def get_bmap(net, m, bmap):
"""
Updates the B-map with the invisibles enabling marking m
Parameters
--------------
net
Petri net
m
Marking
bmap
B-map
Returns
--------------
trans_list
List of invisibles that enable m
"""
if m not in bmap:
bmap[m] = []
for t in net.transitions:
if t.label is None:
if m <= t.out_marking:
bmap[m].append(t)
return bmap[m]
|
8b2ad3c8ddf49872773f7877630bb7616c95936b
| 317,087
|
from typing import List
from typing import Tuple
def compare_triplets(a: List[int], b: List[int]) -> Tuple[int, ...]:
"""
>>> compare_triplets((5, 6, 7), (3, 6, 10))
(1, 1)
"""
zip_ab = tuple(zip(a, b))
# alice, bob = 0, 0
# for x, y in zip_ab:
# if x > y:
# alice += 1
# elif x < y:
# bob += 1
alice, bob = sum(a > b for a, b in zip_ab), sum(a < b for a, b in zip_ab)
return alice, bob
|
1ce3d19c0eace387ed96d2f282e7144b4e225510
| 685,537
|
import functools
def newTab(func):
"""
Runs the function in a new tab, then closes and
switches back context once it finishes.
"""
@functools.wraps(func)
def nt(driver, *args, **kwargs):
driver.execute_script("window.open('', '_blank')")
oldWindow = driver.current_window_handle
driver.switch_to_window(driver.window_handles[-1])
func(driver, *args, **kwargs)
driver.close()
driver.switch_to_window(oldWindow)
return nt
|
c9d31e485a9da38ee07a34c34b83ee9ed355c7b2
| 538,391
|
def apply(*args, func=None):
"""Call `func` passing unpacked args to it"""
if func is None:
func = args[0]
return func(*args[1:])
else:
return func(*args)
|
d90cac8c75df1f0e6c29dc9d3cab29dbf4b37550
| 546,851
|
def get_lowercase_action_list(action_list):
"""
Given a list of actions, return the list but in lowercase format
"""
new_action_list = []
for action in action_list:
new_action_list.append(str.lower(action))
return new_action_list
|
ee5e2e53409fb50919a573f9eee4b230372831a3
| 143,417
|
import re
def humanize_placeholders(msgid):
"""Convert placeholders to the (google translate) service friendly form.
%(name)s -> __name__
%s -> __item__
%d -> __number__
"""
return re.sub(
r'%(?:\((\w+)\))?([sd])',
lambda match: r'__{0}__'.format(
match.group(1).lower() if match.group(1) else 'number' if match.group(2) == 'd' else 'item'),
msgid)
|
26802cbeac5bdce433f8b8e43d88a9d26a9cee35
| 615,143
|
def generate_independent_parameters(rng):
"""Return a dictionary with values for each independent parameter."""
ret = {}
ret["wiredTigerCursorCacheSize"] = rng.randint(-100, 100)
ret["wiredTigerSessionCloseIdleTimeSecs"] = rng.randint(0, 300)
ret["wiredTigerConcurrentWriteTransactions"] = rng.randint(16, 256)
ret["wiredTigerConcurrentReadTransactions"] = rng.randint(16, 256)
ret["wiredTigerStressConfig"] = rng.choice([True, False])
if rng.choice(3 * [True] + [False]):
# The old retryable writes format is used by other variants. Weight towards turning on the
# new retryable writes format on in this one.
ret["storeFindAndModifyImagesInSideCollection"] = True
return ret
|
54439a5b14811dd450c5151fb0e0c0bb5f0a83d2
| 506,814
|
import yaml
def make_dataconfig(input_dir, sub, ses, anat, func, acquisition='alt+z', tr=2.0):
"""Generates the data_config file needed by cpac
Arguments:
input_dir {str} -- Path of directory containing input files
sub {int} -- subject number
ses {int} -- session number
anat {str} -- Path of anatomical nifti file
func {str} -- Path of functional nifti file
acquisition {str} -- acquisition method for funcitonal scan
tr {float} -- TR (seconds) of functional scan
Returns:
None
"""
Data = [{
'subject_id': sub,
'unique_id': f'ses-{ses}',
'anat': anat,
'func': {
'rest_run-1': {
'scan': func,
'scan_parameters': {
'acquisition': acquisition,
'tr': tr
}
}
}
}]
config_file = f'{input_dir}/data_config.yaml'
with open(config_file,'w',encoding='utf8') as outfile:
yaml.dump(Data, outfile, default_flow_style=False)
return config_file
|
1b1977a6ea9ecf530cb39185b48f970f1cc13f49
| 401,753
|
import re
def first_char_index(lines, regex):
"""Get the index of the first position for a description character
Use this to find where the first character after the : should be placed so
that each comment can line up.
:param lines: list of strings to find the index for
:param regex: Regex to identify the line to compare with
:returns: The index of the first position for description characters,
or None if no lines matched the given regex
"""
for line in lines:
if re.match(regex, line):
parts = line.split(":")
before = len(parts[1])
after = len(parts[1].strip())
return len(parts[0]) + 1 + (before - after)
return None
|
e31794f46f67f79008e761267b42c6766b5dd370
| 60,098
|
from typing import List
from typing import Optional
from typing import Tuple
def find_by_key(
input_list: List[dict], key: str, value: str
) -> Optional[Tuple[dict, int]]:
"""Utility method to find an element in a list of dictionary by a specific key"""
for element in input_list:
if element[key] == value:
return element, input_list.index(element)
raise ValueError(f"No Element with {key}={value} found in the list")
|
776100cef499f59edc3ffa4b8aed645a2eaa52df
| 315,891
|
def _to_signed32(n):
"""Converts an integer to signed 32-bit format."""
n = n & 0xffffffff
return (n ^ 0x80000000) - 0x80000000
|
b5562063cc0467222f3d972eca9305dddbe4e05e
| 43,664
|
from typing import Dict
def format_error(status: int, description: str) -> Dict[str, str]:
"""
Uniform error format for API responses.
:param status: the error status code
:param description: the error description
:return: a dictionary describing the error
"""
errors = {
400: 'Bad Request',
401: 'Unauthorized',
404: 'Not Found',
500: 'Internal Server Error'
}
return {'error': errors.get(status) or '(undefined)',
'error_description': description}
|
135058517a04c38f1321c02ce8ce29e447d1920f
| 585,781
|
def isempty(iterable):
"""
Return True if iterable is a empty object.
iterable : list,tuple,str
Iterable object
"""
if not iterable:
return True
else:
return False
|
fa14ec3cc7c551c1655e6c6572699f8a16993548
| 316,217
|
import _struct
def from_native_uint32( raw_bytes, offset ):
""" Reads a native-Endian 32-bit unsigned integer
from an array of bytes. """
return _struct.unpack_from( "=I", raw_bytes, offset )[ 0 ], offset + 4
|
5e846b9fcafa6bda9c9276609466235410e5d02b
| 448,238
|
def linear_annealing(n, total, p_initial, p_final):
"""Linearly interpolates a probability between p_initial and p_final.
Current probability is based on the current step, n. Used to linearly anneal
the exploration probability of the RLTuner.
Args:
n: The current step.
total: The total number of steps that will be taken (usually the length of
the exploration period).
p_initial: The initial probability.
p_final: The final probability.
Returns:
The current probability (between p_initial and p_final).
"""
if n >= total:
return p_final
else:
return p_initial - (n * (p_initial - p_final)) / (total)
|
2f79b56efd11477a1f649e9b374891ff09632c7f
| 18,633
|
def LongestFromList(ls: list):
"""
Determines the object with the longest length inside a list.
Args:
ls: A list containing an array of objects.
Returns:
The object with the longest length from the list.
None if not applicable.
"""
try:
current = ls[0]
for obj in ls:
if len(obj) > len(current):
current = obj
return current
except:
# There could be many reasons for which above code fails:
# ls of length 0, obj of a type without support for len(), or ls is not an iterable.
return None
|
5f7f801fa8761b4a1e2ff7c636c01a8c1e0ef47b
| 193,633
|
def remove_low_std(X,std_val=0.01):
"""
Function that marks columns in the feature matrix based on their standard deviation.
Parameters
----------
X : pandas DF
feature matrix
std_val : float
minimal standard deviation for one feature to not be marked
Returns
-------
list
feature names that do not meet the standard deviation requirement
"""
rem_f = []
# Get the standard deviation per column
std_dist = X.std(axis=0)
# If below filter; add to list
rem_f.extend(list(std_dist.index[std_dist<std_val]))
return(rem_f)
|
b8c2315dd708a911fdb5861cb5b485fe428dbc69
| 192,566
|
import pickle
import base64
def decode_object(string: str) -> object:
"""Convert a base64 string to an object.
:param string: a base64 string encoding an object
:return: an object upon success, None otherwise
"""
try:
return pickle.loads(base64.b64decode(string))
except TypeError:
return None
|
7f64f3f80e19bc5af1adec9d0ff7c39951e7b996
| 253,327
|
from typing import Counter
def count_lookup(text):
"""Returns a lookup table for c, returning the
number of characthers in text lexically smaller than c.
"""
char_count = Counter(text)
lookup = {}
current_count = 0
for c in sorted(list(set(text))):
lookup[c] = current_count
current_count += char_count[c]
return lookup
|
c292b51436df703f4646f036b5a3cdaa1130363a
| 444,616
|
def fetch_url(date, country=None):
"""
Function fetches the url of the most recent report.
:param date: datetime object
:param country: str
:return: str
"""
DATA_URL = ("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/"
"csse_covid_19_daily_reports/{}.csv".format(date.date().strftime("%m-%d-%Y")))
if country == "US":
DATA_URL = ("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/"
"csse_covid_19_daily_reports_us/{}.csv".format(date.date().strftime("%m-%d-%Y")))
return DATA_URL
|
d2e3b689aab45baba1bfc840d627673e5c9db569
| 304,362
|
def _matches_coupling_map(dag, coupling_map):
"""Iterate over circuit gates to check if all multi-qubit couplings
match the qubit coupling graph in the backend.
Parameters:
dag (DAGCircuit): DAG representation of circuit.
coupling_map (list): Backend coupling map, represented as an adjacency list.
Returns:
bool: True if all gates readily fit the backend coupling graph.
False if there's at least one gate that uses multiple qubits
which does not match the backend couplings.
"""
match = True
for _, data in dag.multi_graph.nodes(data=True):
if data['type'] == 'op':
gate_map = [qr[1] for qr in data['qargs']]
if len(gate_map) > 1:
if gate_map not in coupling_map:
match = False
break
return match
|
9ef80ba26262a461e850f841f8621cd35b32a1c0
| 469,982
|
def noop(value: float) -> float:
"""Don't do anything"""
return value
|
fe4db9988f299d3daa57907eda3e3456bf215377
| 473,588
|
from typing import Mapping
def parse_kv_list(params):
"""Create a dict from a "key=value" list.
Parameters
----------
params : sequence of str or mapping
For a sequence, each item should have the form "<key>=<value". If
`params` is a mapping, it will be returned as is.
Returns
-------
A mapping from backend key to value.
Raises
------
ValueError if item in `params` does not match expected "key=value" format.
"""
if isinstance(params, Mapping):
res = params
elif params:
def check_fmt(item):
if "=" not in item:
raise ValueError(
"Expected 'key=value' format but got '{}'"
.format(item))
return item
res = dict(p.split("=", 1) for p in map(check_fmt, params))
else:
res = {}
return res
|
083f0941f23a61d9a99ad5bf08749a50a0038248
| 637,414
|
import json
def json_to_string(js_dic, **kwargs):
"""Converts a dictionary to a string."""
indent = kwargs.pop("indent", 2)
ensure_ascii = kwargs.pop("ensure_ascii", False)
return json.dumps(js_dic,
indent=indent,
ensure_ascii=ensure_ascii,
**kwargs)
|
9b21cd3a5696f86949cb2c9ce3a3492859bbc696
| 185,764
|
import requests
def no_such_resource(url, request):
"""Indicate not found error, when invalid resource requested."""
return {'status_code': requests.codes.NOT_FOUND}
|
5d02e500975fe07938b1e74ce6ce702e51f72d16
| 211,899
|
def escape(st):
"""
Escapes the characters ``\\\\``, ``\\t``, ``\\n``, ``\\r`` and ``"`` in
the given string ``st`` and returns it.
"""
return st.replace('\\', r'\\')\
.replace('\t', r'\t')\
.replace('\r', r'\r')\
.replace('\n', r'\n')\
.replace('\"', r'\"')
|
72d0c5875d8b7e9a4f5a94a2c19db1eba18af3f0
| 421,397
|
def to_iterable(item_or_iterable):
"""
Ensures input is iterable
>>> from rpw.utils.coerce import to_iterable
>>> to_iterable(SomeElement)
[SomeElement]
Args:
any (iterable, non-iterable)
Returns:
(`iterable`): Same as input
"""
if hasattr(item_or_iterable, '__iter__'):
return item_or_iterable
else:
return [item_or_iterable]
|
1e482e21bc93d229a1010715749badf6369622de
| 386,828
|
def load_from_file(ids, data_path = "."):
"""
Takes an array of ids or single id and loads that srt file and returns
the srt files in a dictionary with IDs as keys.
"""
if type(ids) is int or type(ids) is str:
with open(data_path+str(ids)+".srt","r") as f:
data = f.read()
return {ids:data}
elif type(ids) is list:
all_data = []
for this_id in ids:
try:
with open(data_path+str(this_id)+".srt","r") as f:
this_data = f.read()
all_data.append((this_id,this_data))
except FileNotFoundError:
print("There wasn't a file for ", this_id)
return dict(all_data)
|
55b7eda06e6d7a2e928745e3ab3bba273bfb9723
| 648,490
|
def overlap_between(pair1, pair2):
""" checks whether there is an overlap between two start/stop position pairs """
# ensure that start < stop position
pair1.sort()
start1, stop1 = pair1
pair2.sort()
start2, stop2 = pair2
if stop1 < start2: return 0
elif stop2 < start1: return 0
else: return 1
|
006037d746ccdfb77cca260c351ac07ef781f6bf
| 567,091
|
def _drop_geoms(gdf, geoms, series=False):
"""Drop a subset of geometries from a geopandas dataframe.
Parameters
----------
gdf : geopandas.GeoDataFrame
Dataframe of geometries to search.
geoms : list
Either a list or a list of lists.
series : bool
Search a geoseries. Default is ``False``.
Returns
-------
gdf : geopandas.GeoDataFrame
Retained geometries in a dataframe.
"""
if series:
drop_geoms = geoms
else:
drop_geoms = set([item for sublist in geoms for item in sublist])
if None in drop_geoms:
drop_geoms.remove(None)
gdf = gdf[~gdf.index.isin(drop_geoms)]
return gdf
|
4a1f7824fb6f518c455f16e313ab203666701440
| 573,908
|
from functools import reduce
def uniquify(iterable):
""" Uniquify Iterable
>>> uniquify(([1, 1, 2, 3, 3, 3, 4, 5, 5]))
[1, 2, 3, 4, 5]
"""
return reduce(lambda l, x: l if x in l else l + [x], iterable, [])
|
b1e8d06e9cbcc87d17c540f4baac94c95834d104
| 126,324
|
def get_multiple_model_method(model):
"""
It returns the name of the Multiple Model Chain element of the model.
Parameters
----------
model :
A Scikit-learn model instance
Returns
-------
The multiple model method for a mining model.
"""
if model.__class__.__name__ == 'GradientBoostingClassifier':
return 'modelChain'
elif model.__class__.__name__ == 'GradientBoostingRegressor':
return 'sum'
elif model.__class__.__name__ == 'RandomForestClassifier':
return 'majorityVote'
elif model.__class__.__name__ in ['RandomForestRegressor','IsolationForest']:
return 'average'
|
a87525ab9eedfb46443319f771b2275756b7188e
| 42,542
|
def cli(ctx):
"""Get the list of all data tables.
Output:
A list of dicts with details on individual data tables.
For example::
[{"model_class": "TabularToolDataTable", "name": "fasta_indexes"},
{"model_class": "TabularToolDataTable", "name": "bwa_indexes"}]
"""
return ctx.gi.tool_data.get_data_tables()
|
f718231f7a1a852f52d3d77dea8325e8e9ae5af4
| 691,006
|
import re
def version_date(version):
"""Return date from version dev part as an integer containing digits
`yyyymmdd`. Return 0 if date information is not available.
"""
if version and isinstance(version[-1], str):
m = re.match(r'.*([12]\d\d\d[01]\d[0123]\d)', version[-1])
if m is not None:
return int(m.groups()[0])
return 0
|
29808b72a315f6dfb3a18c61cf8936f747694067
| 477,345
|
def get_sec_since_midnight(date):
"""
Return seconds since midnight
>>> from datetime import datetime
>>> get_sec_since_midnight(datetime(year=2014, month=1, day=1, second=42))
42
"""
midnight = date.replace(hour=0, minute=0, second=0, microsecond=0)
delta = date - midnight
return delta.seconds
|
690b3cc215b46bf212cb5916f208053c38ad017b
| 154,882
|
def set_fba_name(source, year):
"""
Generate name of FBA used when saving parquet
:param source: str, source
:param year: str, year
:return: str, name of parquet
"""
return source if year is None else f'{source}_{year}'
|
a7c5c484de1badad53c7cdb5ba6afb9bb81ec524
| 671,281
|
def find_text(text, parts,
names = ['CHARNOSNGLQUOTE', 'CHARNODBLQUOTE']):
"""
If no match, return nothing.
>>> grammar = "import := 'using'"
>>> parts = ebnf_parser.parse(grammar)[1]
>>> find_text(grammar, parts, ['digit'])
Return first text matching.
>>> find_text(grammar, parts, ['literal'])
"'using'"
By default find characters in single or double quotes.
>>> find_text(grammar, parts)
'using'
>>> double_quote = 'import := "using"'
>>> parts = ebnf_parser.parse(double_quote)[1]
>>> find_text(double_quote, parts)
'using'
If multiple text found, only return the first found.
>>> first = 'float_suffix := "f" / "F"'
>>> parts = ebnf_parser.parse(first)[1]
>>> find_text(first, parts)
'f'
"""
found = ''
if parts:
for tag, begin, end, part in parts:
if tag in names:
found += text[begin:end]
break
else:
found_part = find_text(text, part, names)
if found_part and not found:
found += found_part
if found:
return found
|
de7bff84bb378b3b0cc06bbce969a115dfcc73f4
| 387,578
|
import re
def load_txt_to_dict_kv(filepath, delimiter = ','):
"""Given a filepath, load the contents in the format key,value into a dictionary in the format (key:value)
Keyword arguments:
filepath -- path to text file
delimiter - type of delimiter to be used, default value is ','
"""
d = {}
with open(filepath) as f:
for line in f:
if not line.startswith("#"):
line = re.sub('\n', '',line)
(key, val) = line.split(delimiter)
d[key] = val
return d
|
e946f26634e821bca6337ce5e3d7cd19861ddb89
| 528,783
|
def get_review_permalink(review, anchor_pattern=None):
"""
Get the permalink for a review, optionally specifying the format of the
named anchor to be appended to the end of the URL.
Example::
{% get_review_permalink review "#r%(id)s-by-%(user_name)s" %}
"""
if anchor_pattern:
return review.get_absolute_url(anchor_pattern)
return review.get_absolute_url()
|
7d4672bc6a0cd957925d0368752c41c55f387dee
| 369,326
|
import re
def group_tag(string):
"""Extract group tag."""
left_br = '><!-- '
right_br = ' <=< ACCEPT -->'
accept = re.search(left_br + '.+' + right_br, string)
if not accept:
return ''
return re.sub(left_br, '', re.sub(right_br, '', accept.group(0)))
|
58fe8a9e49e0cdc66bb6081db4f58a416bae8d62
| 278,960
|
def salary_columns(html_text, context='current'):
""" salary_columns returns the column names
for the salary information
@param **html_text** (*str*): String of the HTML response
from SALARY_URL
@param **context** (*str*): 'current' signifies that current
and future salaries are being pulled. Otherwise, historical
values are returned
Returns:
**html_text** (*str*): Truncated string of the HTML
response from a Hoopshype URL with the column information
removed
**column_list** (*list*): List of column names for
salary information
"""
if context == 'current':
col_count = 6
else:
col_count = 2
column_list = []
for col_count in range(0, col_count):
start_ind = html_text.find('>') + 1
end_ind = html_text.find('</td>')
column_list.append(html_text[start_ind:end_ind])
html_text = html_text[end_ind + 5:]
return html_text, column_list
|
6cbd3c922d9debae8db5318766d263d10b4c123d
| 126,040
|
import string
def process_line(line: str) -> list:
"""Removes punctuation, hyphens and withespaces from lines.
"""
removables = string.punctuation + '“' + '”' + "‘" + "’"
mytable = "".maketrans({str(i): "" for i in removables})
line = line.replace("-", " ")
line = line.replace("—", " ")
line = line.strip(string.whitespace)
line = line.translate(mytable).lower()
return line.split()
|
5fa5039b254351b87d637171081248551d5977e7
| 455,959
|
import re
def is_api_cloudflare_input_key(prog, inp, api):
"""Test input for Cloudflare key and set in the api object if so.
If the input is a key input ('key:...') then set the key in the
'api' object to it.
Args:
prog (State): don't remove me because this function is looped over
with other functions that do take this argument and use it.
inp (str): input to check.
api (ApiCloudflare): the api object to set.
Returns:
bool: 'True' if key in 'api' was set to 'inp', 'False' if not.
"""
if re.match(r'key:\w+$', inp):
api.key = inp[4:]
return True
return False
|
05e4a81d8727358bf91a800353b50d29d66b3d43
| 292,231
|
def scores(L):
"""Get the scores (position * alphabet score) for all words in list."""
scores = []
for key, val in enumerate(L):
scores.append(val * (key + 1))
return scores
|
98327f6ff050b160deff683009750424bf60b35c
| 669,380
|
import socket
def ip_is_localhost(host_ip: str) -> bool:
"""
>>> ip_is_localhost('127.0.0.1')
True
>>> ip_is_localhost('localhost')
True
>>> ip_is_localhost('192.168.168.17')
False
>>> ip_is_localhost('192.168.168.254')
False
"""
host_ip = socket.gethostbyname(host_ip)
local_host_ip = socket.gethostbyname('localhost')
if host_ip == local_host_ip or host_ip.startswith('127.'):
return True
else:
return False
|
7b70e974d7392fa362779d13b883326c5f0c4652
| 244,702
|
def _GetElemByName(name, from_list):
"""Gets an element from the given list by its name field.
Raises an error if it doesn't find exactly one match.
"""
elems = [e for e in from_list if e.name == name]
if len(elems) != 1:
raise ValueError('Expected 1 item named %s, found %d' % (name, len(elems)))
return elems[0]
|
71a5dcdea6cd3f5f6f32accb7939a9b2fff38c73
| 58,220
|
def isJudge(contest_data, user):
""" Helper method for checking if a user is judge of the contest
:param contest_data:a Contest object
:param user: a User object
:return: boolean type. True if the user is a judge of the contest, or False if not.
"""
contest_judges = contest_data.contest_admins.all()
for judge in contest_judges:
if user == judge:
return True
return False
|
13866c671bdb3512b4f1a138e8b794242dad22eb
| 295,883
|
def common_values(list1, list2):
"""
Returns the common values of two sorted arrays
"""
i, j, common, len_a, len_b = 0, 0, 0, len(list1), len(list2)
while i < len_a and j < len_b:
if list1[i] > list2[j]:
j += 1
elif list1[i] < list2[j]:
i += 1
else:
common += 1
i += 1
j += 1
return common
|
99f0c487c26ecee48497459557cad4904d8b089f
| 85,087
|
def rotated_array_search(input_list, number):
"""
Find the index by searching in a rotated sorted array
Args:
input_list(array), number(int): Input array to search and the target
Returns:
int: Index or -1
"""
# As array is sorted (even if it is rotated), with a little tweak to the binary search logic we can use
# binary search and achieve the desired solution in ln(n)
n = len(input_list)
start = 0
end = n - 1
while start <= end:
m = (start + end) // 2
if input_list[m] == number:
return m
elif input_list[start] > input_list[end]: # check if array part [start, end] is rotated or not
# if input_list[start] > input_list[end] then that means the array part we are checking still contains
# rotated numbers
if number <= input_list[end]:
# if number <= input_list[end] then that means number is in range [m+1, end]
start = m + 1
else:
# if number > input_list[end] then that means number is in range [start, m-1]
end = m - 1
# below are the cases executed only when current array range [start, end] is not rotated
elif number > input_list[m]:
# number is in range [m+1, end]
start = m + 1
else:
# number < mid number so number is in range [start, m-1]
end = m - 1
return -1
|
8aaeeefbf614a9a25fc698158fdf61cd2aef24f2
| 198,765
|
def get_term_name_from_rdf(class_, data):
"""Looks for label for class in the rdf graph"""
name = None
try:
name = data.rdfGraph.label(class_).__str__()
except AttributeError:
pass
return name
|
a62905e634c9c898464f3bd4bb4ccf043a3c8057
| 452,992
|
def find_free_id(scenario: dict, find_min_free_id=True):
"""
Find a free id for a new pedestrian/target
:param scenario: dictionary containing a scenario's data
:param find_min_free_id: if True, finds the minimum free id (less efficient), otherwise simply a free id (more efficient)
:return: a free id (int)
"""
busy_ids = set()
# iterate over pedestrians to collect their (already busy) ids
dynamic_elems = scenario['scenario']['topography']['dynamicElements']
for elem in dynamic_elems:
if elem['type'] == 'PEDESTRIAN':
busy_ids.add(elem['attributes']['id'])
# iterate over targets to collect their (already busy) ids
targets = scenario['scenario']['topography']['targets']
for t in targets:
busy_ids.add(t['id'])
if not find_min_free_id:
return max(busy_ids) + 1 # simply return the max busy id + 1 (which will be free)
# otherwise sort the busy ids and find the minimum free one
sorted_ids = sorted(list(busy_ids))
try:
# in case sorted_ids is empty, this would cause an IndexError
prev_id = sorted_ids[0]
for id in sorted_ids[1:]:
if abs(id - prev_id) > 1:
return prev_id + 1
# if the end of the list has been reached without finding a free id, return the max id + 1
return sorted_ids[-1] + 1
except IndexError:
# it means the list of ids is empty, so return simply 1
return 1
|
9dfce4a39983d7aecb1a4e2147640354c624834b
| 387,285
|
from datetime import datetime
def timestamp_zip(string: str) -> str:
"""
Adds the current date and a .zip ending to the given string.
Example:
> timestamp("foo")
"foo-1997-09-14-1253.zip"
"""
timestamp = datetime.today().strftime("%Y-%m-%d-%H%M")
return f"{string}-{timestamp}.zip"
|
ceb20c46b0620d5f98714634f74d4b6759d7a274
| 670,733
|
def parse_prodtype(prodtype):
"""
From a prodtype line, returns the set of products listed.
"""
return set(map(lambda x: x.strip(), prodtype.split(',')))
|
9b0d4b68243031f07a1460496da967faec7407d9
| 284,945
|
def _unescape_nl(text):
"""Convert escaped newline characters ``\\n`` back into newlines"""
return str(text).replace('\\n', '\n')
|
e46c8fe294315ff86eb5ebd9e8999d752253cdec
| 546,332
|
def _get_channel_rank(image_format):
"""Returns the rank of the channel dimension."""
if image_format not in ['HWCN', 'NHWC', 'NCHW']:
raise ValueError('Unimplemented images format: {:s}.'.format(image_format))
return image_format.find('C')
|
c8a8163fd49a95f62baf109b97d57869e57d63c3
| 577,124
|
import math
def get_speed(vehicle, meters=False):
"""
Compute speed of a vehicle in Km/h.
Parameters
----------
meters : bool
Whether to use m/s (True) or km/h (False).
vehicle : carla.vehicle
The vehicle for which speed is calculated.
Returns
-------
speed : float
The vehicle speed.
"""
vel = vehicle.get_velocity()
vel_meter_per_second = math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)
return vel_meter_per_second if meters else 3.6 * vel_meter_per_second
|
884e1e4e1fde1c9c6b0981ed7ae8973a9fe5a0e1
| 210,245
|
def representative_feature(path, values):
"""Helper function for TSEL filter. Returns the representative node of a
given path.
Args:
path (list): Path containing some node names.
values (dict): values containing nodes and their values.
Returns:
str: Name of most valuable/representative node of the given path.
"""
max_value = -1
rep_node = None
for node in path:
if node == "VRN":
continue
if values[node] > max_value:
max_value = values[node]
rep_node = node
return rep_node
|
6e8119e0f83106f2b023b46acd4d00b092e5e715
| 105,195
|
import math
def get_biorhythms(days_elapsed: int):
"""바이오리듬을 반환합니다.
Args:
days_elapsed: 출생일로부터의 경과일
Returns:
바이오리듬(신체, 감성, 지성)의 튜플
"""
def biovalue(circle, elaspeddays):
"""바이오리듬 값(%)"""
return math.sin(2 * math.pi * elaspeddays / circle) * 100
physical = biovalue(23, days_elapsed)
emotional = biovalue(28, days_elapsed)
mental = biovalue(33, days_elapsed)
return physical, emotional, mental
|
5f284903d38d2e83595a488cda92194b4b6c8005
| 635,932
|
import re
def mmyy_valid_date(date_str):
"""
Check if a date string is a valid date. Expected format is mmyy.
:param date_str: a date string
:return: date_str or None
"""
check = re.match("^(0[1-9]|1[0-2])\/?([0-9]{2})$", date_str)
if check:
return date_str
else:
return None
|
a915b9ef59f3e5d2e6b8b97ef033c81a2e34ff26
| 510,576
|
import json
def get_config(config_path):
""" Open a Tiler config and return it as a dictonary """
with open(config_path) as config_json:
config_dict = json.load(config_json)
return config_dict
|
72a2133b44ffc553ad72d6c9515f1f218de6a08c
| 3,049
|
def split_path(path, assumed_namespace="A", heuristic_split=True):
"""
split a path into the namespace and a URL
when a namespace is missing this function returns a configurable default namespace
as desired this function can apply a heuristic split to distinguish between what is likely a namespace and/or url
:param path: the path to split into a namespace and a url
:param assumed_namespace: the default namespace to return if no namespace is found
:param heuristic_split: use heuristics to identify what is a namespace and what is part of a url
:return: a pair consisting of the namespace and the url
"""
splits = path.split("/")
if len(splits) == 0:
return assumed_namespace, ""
elif len(splits) == 1:
return assumed_namespace, splits[0]
else:
if heuristic_split:
if len(splits[0]) == 1:
return splits[0], "/".join(splits[1:])
else:
return assumed_namespace, "/".join(splits[0:])
else:
return splits[0], "/".join(splits[1:])
|
dd0dc3b95a9ee288d1cc92f3c70bc05e20fd4a5b
| 499,020
|
def input(feature:str, value, disabled:bool=False)->str:
"""
Return a html feature input with a feature name and default value.
Args:
feature (str): name of feature
value (str): default value
disabled (bool): disable the input. Defaults to False.
"""
return f"""
<div style="display:flex;flex-direction:column;">
<label for="{feature}">{feature}</label>
<input id="{feature}" type="text" value="{value}" name="{feature}" {'disabled' if disabled else ''}>
</div>
"""
|
6a6773474221c1f693700dfb1cf0af46d0591dff
| 396,310
|
def const(a, b):
"""``const :: a -> b -> a``
Constant function.
"""
return a
|
1b3e03d98ab495d1795d3e89d0a57728b1dcef47
| 700,144
|
from typing import Tuple
def earliest_bus_at(timestamp: int, bus_ids: list) -> Tuple[int, int]:
"""Find the earliest bus that goes to the airport."""
difference = []
for bus in bus_ids:
difference.append(bus - timestamp % bus)
return bus_ids[difference.index(min(difference))], min(difference)
|
5634817dc32e272dc710375c7b5f08a088cc39aa
| 382,480
|
import time
def format_date(date, date_format):
"""
日期格式说明
%y 两位数的年份
%Y 四位数的年份
%m 月份
%d 日期
%H 24小时制的小时数
%I 12小时制的小时数
%M 分钟
%S 秒
%w 星期
更多格式请参考:
https://www.runoob.com/python/python-date-time.html
:param date: 日期
:param date_format: 日期格式
:return: 日期字符串
"""
return time.strftime(date_format, date)
|
4123d137c7624ccd9553896fdba50ce8d61d02b8
| 376,665
|
def create_edge_adjacency(npoints:int):
"""This describes how the points are connected with each other
example:
[[0, 1],
[1, 2],
[3, 4],
[4, 0]]
This says point 0 is connected to 1. 1 is connected to 2 and eventually 4 is connected to 0.
This edge definition reflects the connectivity of an airfoil geometry.
Args:
npoints (int): Number of points in an airfoil
Returns:
List[(int,int)]: List of point connectivities
"""
edges = list()
for i in range(1,npoints):
edges.append([i-1,i])
edges.append([len(edges),0])
return edges
|
9583322baff0d8d3ffaae7dd7a1457b9853e5009
| 64,806
|
def index_of_nth_base(gappedseq, n):
"""Return the index of the nth non-gapped base in the sequence
where n=0 is the first."""
nongapped = 0
for i, b in enumerate(gappedseq):
if b == '-':
continue
if nongapped == n:
return i
nongapped += 1
raise ValueError(
"Could not find {0}'th non-gapped base in sequence".format(n))
|
2552adbc19ad528c44a1c9ff013c04789581fb11
| 127,537
|
def flatten_opts(opts):
"""Flattens a multi-level addict.Dict or native dictionnary into a single
level native dict with string keys representing the keys sequence to reach
a value in the original argument.
d = addict.Dict()
d.a.b.c = 2
d.a.b.d = 3
d.a.e = 4
d.f = 5
flatten_opts(d)
>>> {
"a.b.c": 2,
"a.b.d": 3,
"a.e": 4,
"f": 5,
}
Args:
opts (addict.Dict or dict): addict dictionnary to flatten
Returns:
dict: flattened dictionnary
"""
values_list = []
def p(d, prefix="", vals=[]):
for k, v in d.items():
if isinstance(v, dict):
p(v, prefix + k + ".", vals)
elif isinstance(v, list):
if isinstance(v[0], dict):
for i, m in enumerate(v):
p(m, prefix + k + "." + str(i) + ".", vals)
else:
vals.append((prefix + k, str(v)))
else:
vals.append((prefix + k, v))
p(opts, vals=values_list)
return dict(values_list)
|
472dd4127cea358e3f8dded7ee365acfeb430804
| 300,521
|
def find_peak_linearly(ls: list) -> int:
"""Finds the index of the first peak in ls.
If there's no peak or the list is empty, -1 is returned.
Time complexity: O(n), where len(ls) == n."""
for i in range(1, len(ls) - 1):
if ls[i - 1] <= ls[i] >= ls[i + 1]:
return i
return -1
|
7ab80b1067cb47687c830f28ac42d834eba9fa42
| 92,808
|
def critical_events_in_outputs(healthcheck_outputs):
"""Given a list of healthcheck pairs (output, healthy), return
those which are unhealthy.
"""
return [healthcheck for healthcheck in healthcheck_outputs if healthcheck[-1] is False]
|
38a36b0f2d3d42fb5e86ebf0ab69efbe121b576a
| 634,814
|
def get_perfect_squares(num: int) -> list:
"""
Get a mininum number of squares sum up to specific integer num.
@param num: a positive integer target of squares sum.
@return: a list of square numbers.
"""
if num <= 0:
return []
squares = []
results = []
for i in range(1, num):
square = i * i
if square <= num:
squares.append(square)
else:
break
sum = num
siz = len(squares)
idx = siz - 1
while idx >= 0 and sum > 0:
n = squares[idx]
diff = sum - n
if diff >= 0:
results.append(n)
sum -= n
else:
idx -= 1
# sum should be zero at this point
return results
|
def0af4f660a6734b6ce3883f1bebb7d1158d947
| 544,870
|
def replace_last(string, old, new):
"""
Replace the last occurrence of a pattern in a string
:param string:
string
:type string: ``str``
:param old:
string to find
:type old: ``str``
:param new:
string to replace
:type new: ``str``
:returns: ``str``
"""
return string[::-1].replace(old[::-1], new[::-1], 1)[::-1]
|
747597b43e3ae3286285d76acecd3fe871e43dc2
| 339,662
|
import operator
def getattrd(obj, name):
"""Same as ``getattr()``, but allow dot notation lookup."""
try:
return operator.attrgetter(name)(obj)
except AttributeError:
return None
|
7cccb0da46c1bc7c9280ce38ac978bced385822a
| 105,386
|
def line(x, alpha=1, c=0):
""" Returns the y-value of the line with coefficients:
- alpha (steepness)
- c (offset)
at point x
"""
return alpha*x + c
|
4e98e08da1eea4fc0e57b79feed8900c542cc8c0
| 174,855
|
def show_toolbar(request):
"""Prevent DjDT from appearing in Django-CMS admin page iframes"""
path = request.get_full_path()
if 'cms/' in path or 'admin/' in path:
return False
return True
|
63e0771f2c6b97395b56b2b945d79ea9b74abefc
| 59,236
|
def divide_into_chunks(array, chunk_size):
"""Divide a given iterable into pieces of a given size
Args:
array (list or str or tuple): Subscriptable datatypes (containers)
chunk_size (int): Size of each piece (except possibly the last one)
Returns:
list or str or tuple: List of chunks
"""
return [array[i:i + chunk_size] for i in range(0, len(array), chunk_size)]
|
ed48ebcba3833e5d2923b6dcc6ff572c316873af
| 677,380
|
from functools import reduce
def invalid_line(row):
"""Ignore lines that are all empty."""
return len(row) == reduce(lambda x, y: x + 1 if y == '' else 0, row, 0)
|
735d3e50bf0f170f966286acacda447c45d8e0f5
| 456,159
|
def filename_from_url(url):
"""
Get the filename component of the URL
>>> filename_from_url('http://example.com/somefile.zip')
'somefile.zip'
>>> filename_from_url('http://oceandata.sci.gsfc.nasa.gov/Ancillary/LUTs/modis/utcpole.dat')
'utcpole.dat'
"""
return url.split('/')[-1]
|
e3038c4c71dc7d03b7b5ac36d570fcfe74ca744d
| 351,205
|
import math
def smoothedsigmoid(x, b=1):
"""
English:
b controls smoothness, lower = smoother
Japanese:
b は緩やかさを調整します。b が小さいほど緩やかに(変化が小さく)なります。
"""
return 1 / (1 + math.exp(- b * x))
|
014bec11a761fcf19c9e5885a1fa870115b90a00
| 41,192
|
import random
def rand_x_digit_num(x, leading_zeroes=True):
"""Return an X digit number, leading_zeroes returns a string, otherwise int"""
if not leading_zeroes:
# wrap with str() for uniform results
return random.randint(10 ** (x - 1), 10 ** x - 1)
else:
if x > 6000:
return ''.join([str(random.randint(0, 9)) for i in range(x)])
else:
return '{0:0{x}d}'.format(random.randint(0, 10 ** x - 1), x=x)
|
e2ee30f1316eaa2320c46f67378c99e9674c8830
| 276,618
|
import pickle
def get_stop_words(k = 200):
"""get stop words specific to the corpus.
:param k: the threshold for counting as stop word.
:return: a stop word set.
"""
with open('./data/stop_words_candidates.pkl', 'rb') as f:
freq = pickle.load(f)
return set(k for k, v in freq[:k])
|
ff1bb74526977de1a4954b25e80ab7ccef57daad
| 86,793
|
import torch
def intersect_centered(wh_a: torch.Tensor, wh_b: torch.Tensor) -> torch.Tensor:
"""Calculates intersection of same centered boxes
Args:
wh_a (torch.Tensor): torch.Tensor(A,2) as width,height
wh_b (torch.Tensor): torch.Tensor(B,2) as width,height
Returns:
torch.Tensor: torch.Tensor(A,B)
"""
A = wh_a.size(0)
B = wh_b.size(0)
min_w = torch.min(
wh_a[:, [0]].unsqueeze(1).expand(A, B, 2),
wh_b[:, [0]].unsqueeze(0).expand(A, B, 2),
)
# [A,2] -> [A,1,2] -> [A,B,2]
min_h = torch.min(
wh_a[:, [1]].unsqueeze(1).expand(A, B, 2),
wh_b[:, [1]].unsqueeze(0).expand(A, B, 2),
)
# [B,2] -> [1,B,2] -> [A,B,2]
return min_w[:, :, 0] * min_h[:, :, 0]
|
6c4aa2daa7fdad8ace57df3f6abd75b622a996f8
| 368,666
|
from pathlib import Path
def BrukerListFiles(PATH,Recursive=False):
"""List all SPC (EMX) and DTA (Elixys) files in the PATH.
Args:
PATH ([str]): path of the folder containing the Bruker files.
Recursive (bool, optional): [to check the folder and subfolders]. Defaults to False.
"""
exts = [".DTA",".dta",".spc",".SPC"]
if Recursive:
files = [p for p in Path(PATH).rglob('*') if p.suffix in exts]
else:
files = [p for p in Path(PATH).iterdir() if p.suffix in exts]
return(list(sorted(files)))
|
75f34662972f6bf4d3ef26f1a20e8a4c44e1edca
| 289,101
|
import math
def _parseSeconds(data):
"""
Formats a number of seconds into format HH:MM:SS.XXXX
"""
total_seconds = data.total_seconds()
hours = math.floor(total_seconds / 3600)
minutes = math.floor((total_seconds - hours * 3600) / 60)
seconds = math.floor(total_seconds - hours * 3600 - minutes * 60)
remainder = total_seconds - 3600 * hours - 60 * minutes - seconds
return "%02d:%02d:%02d%s" % (
hours,
minutes,
seconds,
(".%s" % str(round(remainder, 8))[2:]) if remainder > 0 else "",
)
|
b01a66f66dc3cdc930aff29069c865cab5278d08
| 13,695
|
from typing import Type
from typing import Iterable
from typing import Optional
from textwrap import dedent
def get_docstring(
cls: Type,
*,
flatten: bool = False,
fallback_to_ancestors: bool = False,
ignored_ancestors: Iterable[Type] = (object,),
) -> Optional[str]:
"""Get the docstring for a class with properly handled indentation.
:param cls: the class, e.g. MyClass.
:param flatten: flatten the docstring into a single line by replacing all newlines with spaces
and stripping leading indentation.
:param fallback_to_ancestors: if the class does not have docstring defined, try to use docstring
from its superclasses, if any. This traverses in the MRO order, i.e. tries to use its
direct parent, then grandparent, and ultimately `object()`.
:param ignored_ancestors: if `fallback_to_ancestors` is True, do not use the docstring from
these ancestors.
"""
if cls.__doc__ is not None:
docstring = cls.__doc__.strip()
else:
if not fallback_to_ancestors:
return None
# Fallback to ancestors in MRO order.
ancestor_docstring = next(
(
ancestor_cls.__doc__.strip()
for ancestor_cls in cls.mro()[1:]
if ancestor_cls not in ignored_ancestors and ancestor_cls.__doc__ is not None
),
None,
)
if ancestor_docstring is None:
return None
docstring = ancestor_docstring
newline_index = docstring.find("\n")
if newline_index == -1:
return docstring
# Fix indentation of lines after the first line.
lines = [docstring[:newline_index], *dedent(docstring[newline_index + 1 :]).splitlines()]
if flatten:
return " ".join(line.strip() for line in lines if line).strip()
return "\n".join(lines)
|
402d7365749f8eb687a145de50ab4330c2a129fc
| 590,278
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.