content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def home():
"""Return the home page."""
response = flask.render_template(
'index.html',
metrics=SUPPORTED_METRICS.keys())
return response, 200 | 36,900 |
def data_to_segments_uniform(x, n_segments, segment_ranges=True):
""" Split data into segments of equal size (number of observations)."""
return split_equal_bins(x, n_segments) | 36,901 |
def create_waf_sensor(data, name, comment):
"""Create waf sensor."""
data.id_or_name = name
data.comment = comment | 36,902 |
def load_all_sheets(file_name):
"""
Load from a xls(x) file all its sheets to a pandas.DataFrame as values to sheet_names as keys in a dictionary
Parameters
----------
file_name : str, Path
file_name to load from
Returns
-------
dict
dictionary containing the sheet_names as keys and pandas.DataFrame representing the xls(x) sheets
``{sheet_name: pandas.DataFrame}``
"""
file_name = Path(file_name)
excel_file = ExcelFile(file_name)
return load_these_sheets(file_name, list(excel_file.sheet_names)) | 36,903 |
def rate_limit(limit=1000, interval=60):
"""Rate limit for API endpoints.
If the user has exceeded the limit, then return the response 429.
"""
def rate_limit_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
key: str = f"Limit::{request.remote_addr}:{datetime.datetime.now().minute}"
current_request_count = cache.get(key=key)
if current_request_count and int(current_request_count) >= limit:
return {
"message": f"Too many requests. Limit {limit} in {interval} seconds",
}, HTTPStatus.TOO_MANY_REQUESTS
else:
pipe = cache.pipeline()
pipe.incr(key, 1)
pipe.expire(key, interval + 1)
pipe.execute()
return func(*args, **kwargs)
return wrapper
return rate_limit_decorator | 36,904 |
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(filteredapp):
return AuthProtocol(filteredapp, conf)
return auth_filter | 36,905 |
def parse(stylesheet):
"""Parse a stylesheet using tinycss2 and return a StyleSheet instance.
:param stylesheet: A string of an existing stylesheet.
"""
parsed_stylesheet = tinycss2.parse_stylesheet(
stylesheet, skip_comments=True, skip_whitespace=True
)
css = qstylizer.style.StyleSheet()
for node in parsed_stylesheet:
if node.type == "error":
raise ValueError("Cannot parse Stylesheet: " + node.message)
selector = tinycss2.serialize(node.prelude).strip()
declaration_list = tinycss2.parse_declaration_list(
node.content, skip_comments=True, skip_whitespace=True
)
for declaration in declaration_list:
if declaration.type == "declaration":
prop = declaration.name.strip()
css[selector][prop] = tinycss2.serialize(declaration.value).strip()
return css | 36,906 |
def test_suite():
"""Returns a test suite of all the tests in this module."""
test_classes = [TestNetCDFPointUtilsConstructor,
TestNetCDFPointUtilsFunctions1,
TestNetCDFPointUtilsGridFunctions
]
suite_list = map(unittest.defaultTestLoader.loadTestsFromTestCase,
test_classes)
suite = unittest.TestSuite(suite_list)
return suite | 36,907 |
def compute_max_cut(n: int, nodes: List[int]) -> int:
"""Compute (inefficiently) the max cut, exhaustively."""
max_cut = -1000
for bits in helper.bitprod(n):
# Collect in/out sets.
iset = []
oset = []
for idx, val in enumerate(bits):
iset.append(idx) if val == 0 else oset.append(idx)
# Compute costs for this cut, record maximum.
cut = 0
for node in nodes:
if node[0] in iset and node[1] in oset:
cut += node[2]
if node[1] in iset and node[0] in oset:
cut += node[2]
if cut > max_cut:
max_cut_in, max_cut_out = iset.copy(), oset.copy()
max_cut = cut
max_bits = bits
state = bin(helper.bits2val(max_bits))[2:].zfill(n)
print('Max Cut. N: {}, Max: {:.1f}, {}-{}, |{}>'
.format(n, np.real(max_cut), max_cut_in, max_cut_out,
state))
return helper.bits2val(max_bits) | 36,908 |
def get_capacity_potential_from_enspreso(tech: str) -> pd.Series:
"""
Return capacity potential (in GW) per NUTS2 region for a given technology, based on the ENSPRESO dataset.
Parameters
----------
tech : str
Technology name among 'wind_onshore', 'wind_offshore', 'wind_floating', 'pv_utility' and 'pv_residential'
Returns
-------
nuts2_capacity_potentials: pd.Series
Series storing technical potential per NUTS2 region.
"""
accepted_techs = ['wind_onshore', 'wind_offshore', 'wind_floating', 'pv_utility', 'pv_residential']
assert tech in accepted_techs, f"Error: tech {tech} is not in {accepted_techs}"
path_potential_data = f"{data_path}generation/vres/potentials/source/ENSPRESO"
# For wind, summing over all wind conditions is similar to considering taking all available land and a capacity per
# area of 5MW/km2
if tech == 'wind_onshore':
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_WIND_ONSHORE_OFFSHORE.XLSX'),
sheet_name='Raw data', index_col=1, skiprows=5)
onshore_wind = cap_potential_file[
(cap_potential_file['ONOFF'] == 'Onshore') &
(cap_potential_file['Scenario'] == 'EU-Wide high restrictions') &
(cap_potential_file['Subscenario - not cumulative'] == '2000m setback distance')]
nuts2_capacity_potentials_ds = onshore_wind['GW_Morethan25%_2030_100m_ALLTIMESLICESAVERAGE_V112'].copy()
elif tech == 'wind_offshore':
offshore_categories = ['12nm zone, water depth 0-30m', '12nm zone, water depth 30-60m',
'Water depth 0-30m', 'Water depth 30-60m']
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_WIND_ONSHORE_OFFSHORE.XLSX'),
sheet_name='Wind Potential EU28 Full', index_col=1)
offshore_wind = cap_potential_file[
(cap_potential_file['Unit'] == 'GWe') &
(cap_potential_file['Onshore Offshore'] == 'Offshore') &
(cap_potential_file['Scenario'] == 'EU-Wide low restrictions') &
(cap_potential_file['Wind condition'] == 'CF > 25%') &
(cap_potential_file['Offshore categories'].isin(offshore_categories))]
nuts2_capacity_potentials_ds = offshore_wind.groupby(offshore_wind.index)['Value'].sum()
elif tech == 'wind_floating':
floating_categories = ['12nm zone, water depth 60-100m Floating',
'Water depth 60-100m Floating', 'Water depth 100-1000m Floating']
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_WIND_ONSHORE_OFFSHORE.XLSX'),
sheet_name='Wind Potential EU28 Full', index_col=1)
offshore_wind = cap_potential_file[
(cap_potential_file['Unit'] == 'GWe') &
(cap_potential_file['Onshore Offshore'] == 'Offshore') &
(cap_potential_file['Scenario'] == 'EU-Wide low restrictions') &
(cap_potential_file['Wind condition'] == 'CF > 25%') &
(cap_potential_file['Offshore categories'].isin(floating_categories))]
nuts2_capacity_potentials_ds = offshore_wind.groupby(offshore_wind.index)['Value'].sum()
elif tech == 'pv_utility':
# ODO: maybe parametrize this, if we decide to stick with it
land_use_high_irradiance_potential = 0.05
land_use_low_irradiance_potential = 0.00
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_SOLAR_PV_CSP_85W.XLSX'),
sheet_name='Raw Data Available Areas', index_col=0,
skiprows=[0, 1, 2, 3], usecols=[1, 43, 44, 45, 46],
names=["NUTS2", "Agricultural HI", "Agricultural LI",
"Non-Agricultural HI", "Non-Agricultural LI"])
capacity_potential_high = cap_potential_file[["Agricultural HI", "Non-Agricultural HI"]].sum(axis=1)
capacity_potential_low = cap_potential_file[["Agricultural LI", "Non-Agricultural LI"]].sum(axis=1)
nuts2_capacity_potentials_ds = capacity_potential_high * land_use_high_irradiance_potential + \
capacity_potential_low * land_use_low_irradiance_potential
else: # 'pv_residential'
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_SOLAR_PV_CSP.XLSX'),
sheet_name='NUTS2 170 W per m2 and 3%', skiprows=2, index_col=2)
nuts2_capacity_potentials_ds = cap_potential_file['PV - roof/facades']
updated_potential_per_tech = update_enspreso_capacity_potential(nuts2_capacity_potentials_ds, tech).round(6)
return updated_potential_per_tech | 36,909 |
def use_nearby_search(url, next_page=False, request_count=0):
"""Call nearby search API request.
Parameters
----------
url: str
URL to use to send a Nearby Search Request in Google Maps Place Search API
next_page: boolean, optional(default=False)
whether or not the URL is to request next page using next_page_token
request_count: int, optional(default=0)
the count of the previously-sent same requests; used only when next_page=True
Returns
-------
data: dict
returned API response
check https://developers.google.com/places/web-service/search#find-place-responses for its structure
status: str
status of the API response
check https://developers.google.com/places/web-service/search#PlaceSearchStatusCodes for details
"""
while True:
if next_page:
time.sleep(3)
try:
# get API response
print("API request made.")
response = urllib.request.urlopen(url)
except IOError:
pass # retry
else: # if no IOError occurs
data = json.loads(response.read().decode('utf-8'))
status = data['status']
if status == "OK":
break
elif (status == "INVALID_REQUEST") & next_page: # if next_page_token is not valid yet
if request_count >= 3:
print(f"Failed to receive a valid API response for 3 times for {url}.")
break # stop requesting after 3 trials
else:
print("...Key is not valid yet.")
request_count += 1
data, status = use_nearby_search(url + "&request_count=" + str(request_count), next_page,
request_count)
break
else:
break
return data, status | 36,910 |
def _GetGoogleAuthtoken(account_type, user, password, service, source):
"""This function authenticates the user in the specified service using
the provided authentication data.
Args:
account_type: Type of the account to login, could be GOOGLE or any other
string if the account is external.
user: Name of the user to be logged in.
password: Password of the user to be logged in.
service: Service where the user wants to log in, for example, 'ah'.
source: Name of the application requesting the user authentication.
Returns:
The authentatication token for the user if the supplied data is correct.
Raises:
lib.AuthenticationError: This exception is raised if the HTTP response is
403 - Forbidden, in this case the error is parsed and returned to the
user in the exception.
urllib2.HTTPError: This exception is raised for any other HTTP error.
"""
# Create a request for Google's Client login, with the specied data.
auth_request_data_map = {
'accountType': account_type,
'Email': user,
'Passwd': password,
'service': service,
'source': source
}
auth_request_data = urllib.urlencode(auth_request_data_map)
auth_url = 'https://www.google.com/accounts/ClientLogin'
auth_request = urllib2.Request(auth_url, auth_request_data)
try:
# Create a custom opener, make the request and extract the body.
http_opener = _GetHTTPOpener()
auth_response = http_opener.open(auth_request)
auth_response_body = auth_response.read()
# Parse the response data as a dictionary and return the 'Auth' key.
auth_response_data = _ParseBodyAsDict(auth_response_body)
return auth_response_data['Auth']
except urllib2.HTTPError as e:
# Check if the error was a 403 - Forbidden. In that case, forward the
# exception as an authentication error. Otherwise, just forward the
# exception.
if e.code == 403:
# Parse the error body as a dictionary and forward the exception as an
# authentication error.
response_dict = _ParseBodyAsDict(e.read())
raise AuthenticationError(auth_request.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise | 36,911 |
def trim_filters(response):
"""Trim the leading and trailing zeros from a 1-D array or sequence, leaving
one zero on each side. This is a modified version of numpy.trim_zeros.
Parameters
----------
response : 1-D array or sequence
Input array.
Returns
-------
first : int
Index of the last leading zero.
last : int
Index of the first trailing zero.
"""
first = 0
for i in response:
if i != 0.:
if first == 0:
first += 1 # to avoid filters with non-zero edges
break
else:
first = first + 1
last = len(response)
for i in response[::-1]:
if i != 0.:
if last == len(response):
last -= 1 # to avoid filters with non-zero edges
break
else:
last = last - 1
first -= 1
last += 1
return first, last | 36,912 |
def main():
"""
Does the database exist? Create if not, then
continue the script by calling check_booking()
"""
if os.path.isfile(database):
pass
else:
create_db()
conn = connect_db(database)
logger.info('Beginning run...')
with conn:
is_bookable(conn)
check_booking(conn)
teardown(driver)
conn.close() | 36,913 |
def print_metrics(met_dict):
"""
Given a metrics dictionary, print the values for:
- Loss
- Accuracy
- Precision
- Recall
- F1
"""
print("Loss: {:.3f}".format(met_dict['Loss']))
print("Accuracy: {:.3f} %".format(met_dict['Accuracy'] * 100))
print("Precision: {:.3f}".format(met_dict['Precision']))
print("Recall: {:.3f}".format(met_dict["Recall"]))
print("F1 binary: {:.3f}".format(met_dict['F1_binary']))
print("F1 macro: {:.3f}".format(met_dict['F1_macro'])) | 36,914 |
def is_spanning(graph, subgraph):
"""
Return True or False by passing graph and subgraph through function V
to check if the subgraph uses all verticies of the original graph
Parameters
----------
graph = A networkx graph.
subgraph = A networkx subgraph of 'graph'.
Returns
-------
True if the subgraph is spanning.
False if the subgraph is not spanning.
"""
return V(graph) == V(subgraph) | 36,915 |
def get_commit_msg() -> str:
"""
Return the last commit message.
"""
result = subprocess.run(
'git show -s --format=%s'.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
if result.stderr: # no commit yet
return '\n'
return result.stdout | 36,916 |
def get_class_membership(alpaca, class_id, membership):
"""Given a class ID, return a set of IDs of all classes of which that
class is a member.
"""
class_ast = find_class_defn(alpaca, class_id)
get_membership(alpaca, class_ast.classes, membership) | 36,917 |
def relative_paths(root: Path, paths: list) -> List[str]:
"""
Normalises paths from incoming configuration and ensures
they are all strings relative to root
"""
result = []
for path in paths:
# more hacks for exclusions I'm not happy about
# maybe we should subclass Path to make this cleaner?
exclusion = path.startswith("!")
if exclusion:
path = path[1:]
# make sure paths are relative!
if isinstance(path, Path):
inp = str(path.relative_to(root))
elif isinstance(path, str):
inp = path
if os.path.isabs(path):
inp = os.path.relpath(path, root)
else:
raise NotImplementedError()
if exclusion:
inp = "!" + inp
result.append(inp)
return result | 36,918 |
def domain_loss_roi(pred, domain_label):
"""
ROI-level domain adversarial loss
"""
if DEBUG:
print('\tDA-ROI loss')
device_id = pred.get_device()
target_label = Variable(
torch.FloatTensor(pred.data.size()).fill_(float(domain_label))
).cuda(device_id)
loss_da_roi = F.binary_cross_entropy_with_logits(pred, target_label)
if net_utils.is_nan_loss(loss_da_roi):
loss_da_roi *= 0
return loss_da_roi | 36,919 |
def qm9():
"""configuration for the QM9 dataset"""
dbpath = './data/qm9.db'
dataset = 'QM9'
property_mapping = {Properties.energy: QM9.U0,
Properties.dipole_moment: QM9.mu,
Properties.iso_polarizability: QM9.alpha} | 36,920 |
def clean_text(text):
"""
A function to pre-process text
Parameters
----------
text : string
the string to be processed
Returns
-------
text : string
a clean string
"""
tok = WordPunctTokenizer()
pat1 = r'@[A-Za-z0-9]+'
pat2 = r'https?://[A-Za-z0-9./]+'
combined_pat = r'|'.join((pat1, pat2))
soup = BeautifulSoup(text, 'lxml')
souped = soup.get_text()
stripped = re.sub(combined_pat, '', souped)
try:
clean = stripped.decode("utf-8-sig").replace(u"\ufffd", "?")
except:
clean = stripped
letters_only = re.sub("[^a-zA-Z]", " ", clean)
lower_case = letters_only.lower()
words = tok.tokenize(lower_case)
return (" ".join(words)).strip() | 36,921 |
def train_one_epoch(dataloader, model, optimizer, device, writer, epoch, cfg):
""" Trains the model for one epoch. """
model.train()
optimizer.zero_grad()
metrics = []
n_batches = len(dataloader)
progress = tqdm(dataloader, desc='TRAIN', leave=False)
for i, sample in enumerate(progress):
input_and_target = sample[0]
input_and_target = input_and_target.to(device)
# split channels to get input, target, and loss weights
n_channels = input_and_target.shape[1]
images, targets, weights = input_and_target.split((n_channels - 2, 1, 1), dim=1)
logits = model(images)
loss = F.binary_cross_entropy_with_logits(logits, targets, weights)
predictions = torch.sigmoid(logits)
soft_dice, soft_jaccard = dice_jaccard(targets, predictions)
loss.backward()
batch_metrics = {
'loss': loss.item(),
'soft_dice': soft_dice.item(),
'soft_jaccard': soft_jaccard.item()
}
metrics.append(batch_metrics)
postfix = {metric: f'{value:.3f}' for metric, value in batch_metrics.items()}
progress.set_postfix(postfix)
if (i + 1) % cfg.optim.batch_accumulation == 0:
optimizer.step()
optimizer.zero_grad()
if (i + 1) % cfg.optim.log_every == 0:
batch_metrics.update({'lr': optimizer.param_groups[0]['lr']})
n_iter = epoch * n_batches + i
for metric, value in batch_metrics.items():
writer.add_scalar(f'train/{metric}', value, n_iter)
if cfg.optim.debug and (i + 1) % cfg.optim.debug_freq == 0:
writer.add_images('train/inputs', images, n_iter)
writer.add_images('train/targets', targets, n_iter)
writer.add_images('train/predictions', predictions, n_iter)
metrics = pd.DataFrame(metrics).mean(axis=0).to_dict()
metrics = {k: {'value': v, 'threshold': None} for k, v in metrics.items()}
return metrics | 36,922 |
def test_alias_function():
"""Test 4: Generate markup based on an element using an (alias w/function) parameter to explicitly correlate data and elements"""
template = get_template('contacts-alias')
def alias_name(p, e, k, v):
eq_(k, 'name')
return 'foo'
weld(template('.contact')[0], data, dict(alias=dict(name=alias_name,\
title='title')))
check_contacts(template) | 36,923 |
def baseurl(request):
"""
Return a BASE_URL template context for the current request.
"""
if request.is_secure():
scheme = 'https://'
else:
scheme = 'http://'
return {'BASE_URL': scheme + request.get_host(), } | 36,924 |
def geolocate(address, bounds=None, country=None, administrative_area=None, sensor=False):
"""
Resolves address using Google Maps API, and performs some massaging to the output result.
Provided for convenience, as Uber relies on this heavily, and the desire to give a simple 'batteries included' experience.
See https://developers.google.com/maps/documentation/geocoding/ for more details
"""
params = {
'address': address,
'sensor': str(sensor).lower()
}
components = []
if country:
components.append('country:' + country)
if administrative_area:
components.append('administrative_area:' + administrative_area)
if bounds:
params['bounds'] = '|'.join(['{},{}'.format(x.latitude, x.longitude) for x in bounds])
if components:
params['components'] = '|'.join(components)
response = requests.get('http://maps.googleapis.com/maps/api/geocode/json', params=params)
if not response.ok:
raise GeolocationExcetion(response.text)
data = response.json()
if data['status'] not in ['OK', 'ZERO_RESULTS']:
raise GeolocationExcetion(data)
all_results = data.get('results', [])
for result in all_results:
coords = result.get('geometry', {}).get('location')
if coords:
result['latitude'] = coords['lat']
result['longitude'] = coords['lng']
return all_results | 36,925 |
def _cmdy_hook_class(cls):
"""Put hooks into the original class for extending"""
# store the functions with the same name
# that defined by different plugins
# Note that current (most recently added) is not in the stack
cls._plugin_stacks = {}
def _original(self, fname):
"""Get the original function of self, if it is overridden"""
# callframe is oringally -1
frame = self._plugin_callframe.setdefault(fname, -1)
frame += 1
self._plugin_callframe[fname] = frame
return cls._plugin_stacks[fname][frame]
cls._original = _original
orig_init = cls.__init__
def __init__(self, *args, **kwargs):
self._plugin_callframe = {}
orig_init(self, *args, **kwargs)
cls.__init__ = __init__
if cls.__name__ == "CmdyHolding":
orig_reset = cls.reset
@wraps(orig_reset)
def reset(self, *args, **kwargs):
# clear the callframes as well
self._plugin_callframe = {}
orig_reset(self, *args, **kwargs)
return self
cls.reset = reset
# self is not a decorator, we don't return cls | 36,926 |
def pair_verify(
credentials: HapCredentials, connection: HttpConnection
) -> PairVerifyProcedure:
"""Return procedure object used for Pair-Verify."""
_LOGGER.debug(
"Setting up new AirPlay Pair-Verify procedure with type %s", credentials.type
)
if credentials.type == AuthenticationType.Null:
return NullPairVerifyProcedure()
if credentials.type == AuthenticationType.Legacy:
srp = LegacySRPAuthHandler(credentials)
srp.initialize()
return AirPlayLegacyPairVerifyProcedure(connection, srp)
srp = SRPAuthHandler()
srp.initialize()
if credentials.type == AuthenticationType.HAP:
return AirPlayHapPairVerifyProcedure(connection, srp, credentials)
return AirPlayHapTransientPairVerifyProcedure(connection, srp) | 36,927 |
def get_file_type_and_ext(filename):
"""
Return file type and extension if the file can be previewd online,
otherwise, return unknown type.
"""
fileExt = os.path.splitext(filename)[1][1:].lower()
if fileExt in get_conf_text_ext():
return (TEXT, fileExt)
filetype = FILEEXT_TYPE_MAP.get(fileExt)
if filetype:
return (filetype, fileExt)
else:
return ('Unknown', fileExt) | 36,928 |
def load_data(
datapath=None,
minstorms=3,
minbmps=3,
combine_nox=True,
combine_WB_RP=True,
remove_grabs=True,
grab_ok_bmps="default",
balanced_only=True,
fix_PFCs=True,
excluded_bmps=None,
excluded_params=None,
as_dataframe=False,
**dc_kwargs
):
"""Prepare data for categorical summaries
Parameter
---------
datapath : Path-like, optional
Path to the raw data CSV. If not provided, the latest data will be
downloaded.
minstorms : int (default = 3)
Minimum number of storms (monitoring events) for a BMP study to be included
minbmps : int (default = 3)
Minimum number of BMP studies for a parameter to be included
combine_nox : bool (default = True)
Toggles combining NO3 and NO2+NO3 into as new parameter NOx, giving
preference to NO2+NO3 when both parameters are observed for an event.
The underlying assuption is that NO2 concentrations are typically much
smaller than NO3, thus NO2+NO3 ~ NO3.
combine_WB_RP : bool (default = True)
Toggles combining Retention Pond and Wetland Basin data into a new
BMP category: Retention Pond/Wetland Basin.
remove_grabs : bool (default = True)
Toggles removing grab samples from the dataset except for:
- biological parameters
- BMPs categories that are whitelisted via *grab_ok_bmps*
grab_ok_bmps : sequence of str, optional
BMP categories for which grab data should be included. By default, this
inclues Retention Ponds, Wetland Basins, and the combined
Retention Pond/Wetland Basin category created when *combine_WB_RP* is
True.
balanced_only : bool (default = True)
Toggles removing BMP studies which have only influent or effluent data,
exclusively.
fix_PFCs : bool (default = True)
Makes correction to the category of Permeable Friction Course BMPs
excluded_bmps, excluded_params : sequence of str, optional
List of BMPs studies and parameters to exclude from the data.
as_dataframe : bool (default = False)
When False, a wqio.DataCollection is returned
Additional Parameters
---------------------
Any additional keword arguments will be passed to wqio.DataCollection.
Returns
-------
bmp : pandas.DataFrame or wqio.DataCollection
"""
othergroups = dc_kwargs.pop("othergroups", ["category", "units"])
pairgroups = dc_kwargs.pop("pairgroups", ["category", "units", "bmp_id", "site_id", "storm"])
rescol = dc_kwargs.pop("rescol", "res")
qualcol = dc_kwargs.pop("qualcol", "qual")
ndval = dc_kwargs.pop("ndval", ["ND", "<"])
stationcol = dc_kwargs.pop("stationcol", "station")
paramcol = dc_kwargs.pop("paramcol", "parameter")
bmp = (
_load_raw_data(datapath)
.pipe(_clean_raw_data)
.pipe(
_prepare_for_summary,
minstorms=minstorms,
minbmps=minbmps,
combine_nox=combine_nox,
combine_WB_RP=combine_WB_RP,
remove_grabs=remove_grabs,
grab_ok_bmps=grab_ok_bmps,
balanced_only=balanced_only,
fix_PFCs=fix_PFCs,
excluded_bmps=excluded_bmps,
excluded_params=excluded_params,
)
)
if as_dataframe:
return bmp
return wqio.DataCollection(
bmp,
rescol=rescol,
qualcol=qualcol,
ndval=ndval,
stationcol=stationcol,
paramcol=paramcol,
othergroups=othergroups,
pairgroups=pairgroups,
**dc_kwargs
) | 36,929 |
def sign(request):
"""
Returns a signed URL (for file upload) and an OTP
"""
credentials, project_id = auth.default()
if credentials.token is None:
# Perform a refresh request to populate the access token of the
# current credentials.
credentials.refresh(requests.Request())
# Connecting to the bucket
client = Client()
bucket = client.get_bucket(BUCKET)
#
file_name = generate_filename()
object = bucket.blob(file_name)
# Mandatory header
headers = {
"X-Goog-Content-Length-Range": f"{MIN_SIZE},{MAX_SIZE}" # limitting the upload file size
}
# mandatory fields
sign_request = {
"version": "v4",
"expiration": timedelta(seconds=EXPIRE_AFTER_SECONDS),
"service_account_email": credentials.service_account_email,
"access_token": credentials.token,
"method": "PUT",
"virtual_hosted_style": True
}
# Adding information in the request
request_json = request.get_json()
# Content MD5 is a standard integrity check in GCS
content_md5 = ''
# If the use-case requires stronger checks a stronger hashing algorithm
# such as SHA-256 should be used, but the check has to be done after the object
# has landed in the bucket as Google Cloud Storage does not support SHA256 as
# as an integrity checking machanism as of Jan 2022
if request_json and 'content-md5' in request_json:
content_md5 = request_json['content-md5']
sign_request['content_md5'] = content_md5
content_sha256 = ""
# GCS API will not perform the hash validation for PUT requests. Ideally this must be stored
# somewhere else (e.g. in a database) so that the files content can be read and SHA256 hash
# of the content can be calculated after the object lands in the bucket. This code avoides
# that step
if request_json and 'content-sha256' in request_json:
content_sha256 = request_json['content-sha256']
headers['x-content-sha256'] = content_sha256
uid = 0
if request_json and 'user-id' in request_json:
uid = int(request_json['user-id'])
# Adding custom headers in the request
if "headers" in request_json:
try:
for key, val in request_json['headers'].iteritems():
headers[key] = str(val)
except:
#TODO: log what the issue is. but this is just for a PoC
pass
# adding the OTP
OTP = generate_otp(
file_name,
user_id=uid,
contet_hash=content_sha256 if len(content_sha256) > 0 else content_md5 # prefer SHA256 if present
)
headers['x-otp'] = OTP
# Adding headers to the request
sign_request['headers']=headers
# Debugging
# debug = sign_request.copy()
# debug['access_token']='###'
# debug['expiration']=str(EXP)
return json.dumps({
'url': object.generate_signed_url(**sign_request),
'otp': OTP,
#'debug': debug,
#'request': request.get_json()
}) | 36,930 |
def _parse_squeue_state(squeue_out, job_id):
"""Parse "state" column from squeue output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`squeue` output is empty or job_id is not found.
"""
invalid_job_str = "Invalid job id specified"
if invalid_job_str in squeue_out:
return "u"
lines = squeue_out.split('\n')
for line in lines:
if "JOBID" in line:
continue
elif len(line.strip()) == 0:
continue
else:
returned_id = line.split()[0]
state = line.split()[4]
logger.debug("Squeue for job %i returned ID: %s, State: %s" % (job_id, returned_id, state))
return state
return "u" | 36,931 |
def test_parse_url_opaque_path_state(caplog):
"""Validation error in opaque path state."""
caplog.set_level(logging.INFO)
urlstring = "sc:\\../%GH"
base = "about:blank"
_ = parse_url(urlstring, base)
assert len(caplog.record_tuples) > 0
assert caplog.record_tuples[-1][0].startswith(_MODULE_NAME)
assert caplog.record_tuples[-1][1] == logging.INFO
assert caplog.record_tuples[-1][2].startswith(
"Found incorrect percent-encoding in"
)
assert caplog.record_tuples[-1][2].endswith("at position 7") | 36,932 |
def parser_IBP_Descriptor(data,i,length,end):
"""\
parser_IBP_Descriptor(data,i,length,end) -> dict(parsed descriptor elements).
This descriptor is not parsed at the moment. The dict returned is:
{ "type": "IBP", "contents" : unparsed_descriptor_contents }
(Defined in ISO 13818-1 specification)
"""
return { "type" : "IBP", "contents" : data[i+2:end] } | 36,933 |
def test_next_turn():
"""Test a boards `next_turn` function.
Check if the functions's behavoir is correct.
To do so initialize an instance of the Board class
and assert the functions output with different setups.
"""
board = Board()
assert_obj_attr(board, "player", "white")
assert_obj_func(board, "next_turn", None, None)
assert_obj_attr(board, "player", "black")
assert_obj_func(board, "next_turn", None, None)
assert_obj_attr(board, "player", "white") | 36,934 |
def build_county_list(state):
"""
Build the and return the fips list
"""
state_obj = us.states.lookup(state)
logging.info(f"Get fips list for state {state_obj.name}")
df_whitelist = load_data.load_whitelist()
df_whitelist = df_whitelist[df_whitelist["inference_ok"] == True]
all_fips = df_whitelist[
df_whitelist["state"].str.lower() == state_obj.name.lower()
].fips.tolist()
return all_fips | 36,935 |
def vw_train(data_file,
l2=None,
l1=None,
keep=None,
ignore=None,
quadratic=None,
passes=None,
model_file=None,
learn_rate=0.1,
holdout=False,
other=None):
"""
Function programmatically calls VW for training.
Optionally writes out the model file containing the learned weights.
Params:
data_file - the training data in VW's input format
l2 - (optional) the L2 regularization parameter
l1 - (optional) the L1 regularization parameter
NOTE: In VW, this is per row, so it should be small ~ 1e-7
keep - (optional) string with the first letters of namespaces to use,
others are ignored. Default is use all namespaces.
ignore - (optional) string with the first letters of the namespaces to ignore.
VW uses all of the others. Default is ignore none/use all.
NOTE: At most one of keep and ignore can be specified.
quadratic - string with first letters of all namespaces that should be crossed
to make quadratic terms. Uses all pairs of these.
passes - (optional) the number of passes to use. Default is 1.
model_file - (optional) A file to write the final learned models out to.
If not specified, training is run, and there is output,
but no model is saved.
learn_rate - default 0.5
holdout - default True. Use VW defaults for holdout. If False, no holdout.
other - A list of strings to pass to VW as command line arguments
Returns:
nothing, but writes out the final regressor at <model_file>
"""
cmdline = ['vw',
'-d', data_file,
'-b', '26',
'-l', str(learn_rate),
'--loss_function', 'logistic',
'--progress', '1000000']
if l2 is not None:
cmdline.extend(['--l2', str(l2)])
if l1 is not None:
cmdline.extend(['--l1', str(l1)])
if passes is not None:
cmdline.extend(['--passes', str(passes), '-c'])
if model_file is not None:
cmdline.extend(['-f', model_file])
if keep is not None and ignore is not None:
raise ValueError('At most one of --keep and --ignore can be specified.')
if keep is not None:
arg = '--keep'
names = keep
if ignore is not None:
arg = '--ignore'
names = ignore
if keep is not None or ignore is not None:
for n in names:
cmdline.extend([arg, n])
if quadratic is not None:
for (a,b) in itertools.combinations(quadratic, 2):
cmdline.extend(['-q', a+b])
if not holdout:
cmdline.append('--holdout_off')
if other is not None:
cmdline.extend(other)
subprocess.call(cmdline) | 36,936 |
def num_materials_per_bin(config_path, database_path, generation=None, output_path="-"):
"""outputs materials per bin for cover visualization script in blender"""
config = load_config_file(config_path)
prop1range = config['prop1range']
prop2range = config['prop2range']
num_bins = config['number_of_convergence_bins']
VoidFraction.set_column_for_void_fraction(config['void_fraction_subtype'])
engine, session = db.init_database(config["database_connection_string"])
generation = 500
_, _, bin_counts, _, _, _ = load_restart_db(generation, num_bins, prop1range, prop2range, session)
with click.open_file(output_path, 'w') as f:
np.savetxt(f, bin_counts, "%d", delimiter=",") | 36,937 |
def is_comment(obj):
"""Is comment."""
import bs4
return isinstance(obj, bs4.Comment) | 36,938 |
def insert_user(username: str) -> Tuple[int, str]:
"""
Inserts a new user. If the desired username is already taken,
appends integers incrementally until an open name is found.
:param username: The desired username for the new user.
:return A tuple containing the id and name of the new user.
"""
db = get_db()
new_user_id = None
count = 0
while new_user_id is None:
temp_name = username
if count != 0:
temp_name += str(count)
try:
cur = db.execute('INSERT INTO user (username) VALUES (?)', [temp_name])
new_user_id = cur.lastrowid
except sqlite3.IntegrityError:
count += 1
db.commit()
cur.close()
return new_user_id, temp_name | 36,939 |
def test_tanium_simulate_bad_input_file(tmp_path):
"""Test simulate with bad input file call."""
config = configparser.ConfigParser()
config_path = pathlib.Path('tests/data/tasks/tanium/demo-tanium-to-oscal.config')
config.read(config_path)
config.remove_option('task.tanium-to-oscal', 'input-dir')
config.set('task.tanium-to-oscal', 'input-dir', 'tests/data/tasks/tanium/input-bad')
section = config['task.tanium-to-oscal']
section['output-dir'] = str(tmp_path)
tgt = tanium_to_oscal.TaniumToOscal(section)
retval = tgt.simulate()
assert retval == TaskOutcome.SIM_FAILURE
assert len(os.listdir(str(tmp_path))) == 0 | 36,940 |
def gather(tensor, tensor_list=None, root=0, group=None):
"""
Sends tensor to root process, which store it in tensor_list.
"""
rank = distributed.get_rank()
if group is None:
group = distributed.group.WORLD
if rank == root:
assert(tensor_list is not None)
distributed.gather(tensor, gather_list=tensor_list, group=group)
else:
distributed.gather(tensor, dst=root, group=group) | 36,941 |
def main():
"""Lists as piles
"""
# Create a stack
my_stack = [1, 2, 3, 4]
print("my_stack", my_stack)
# Push values on the stack
my_stack.append(5)
my_stack.append(6)
my_stack.append(7)
print("my_stack", my_stack)
# Pop values from the stack
print("Poped value", my_stack.pop())
print("my_stack", my_stack)
print("Poped value", my_stack.pop())
print("my_stack", my_stack)
print("Poped value", my_stack.pop())
print("my_stack", my_stack)
print("Poped value", my_stack.pop())
print("my_stack", my_stack) | 36,942 |
def get_gateway(ctx, name):
"""Get the sdk's gateway resource.
It will restore sessions if expired. It will read the client and vdc
from context and make get_gateway call to VDC for gateway object.
"""
restore_session(ctx, vdc_required=True)
client = ctx.obj['client']
vdc_href = ctx.obj['profiles'].get('vdc_href')
vdc = VDC(client, href=vdc_href)
gateway = vdc.get_gateway(name)
gateway_resource = Gateway(client, href=gateway.get('href'))
return gateway_resource | 36,943 |
def fn_getdatetime(fn):
"""Extract datetime from input filename
"""
dt_list = fn_getdatetime_list(fn)
if dt_list:
return dt_list[0]
else:
return None | 36,944 |
def ai_check_mate(board, checkmate_moves, enemy_color, computer_turn):
"""in the case that ai does not have any move that gives it material advantage, it attempts to check mate by
choosing moves that leave the enemy king with as few possible moves as possible
:param board: the logical board
:param computer_turn: to know if it is the computer's turn or not
:param checkmate_moves: the moves given by the main ai that are do not result in material loss
:param enemy_color: the color of the enemy pieces
:param computer_turn: to know if it is the computer's turn or not
"""
possible_moves_semi_coordinates = (-1, 0, 1)
enemy_king_possible_moves = 9 # 8 is the actual maximum number of moves a king can perform
initial_enemy_king_moves = 0
best_move = (-1, -1, -1, -1)
best_center_distance_punishment = -40
enemy_king_coordinates = logic.find_king(board, enemy_color)
king_row, king_column = enemy_king_coordinates[0], enemy_king_coordinates[1]
for row in possible_moves_semi_coordinates:
for column in possible_moves_semi_coordinates:
if logic.verify_move_final(board, king_row + row, king_column + column,
king_row, king_column, True, False):
initial_enemy_king_moves += 1
for move in checkmate_moves:
potential_board = logic.verify_move_final(board, move[0], move[1], move[2], move[3], True, False)
king_moves = 0
enemy_king_coordinates = logic.find_king(potential_board, enemy_color)
king_row, king_column = enemy_king_coordinates[0], enemy_king_coordinates[1]
enemy_king_center_distance = (abs(3.5 - king_row) + abs(3.5 - king_column))
center_distance_punishment = 0
for row in possible_moves_semi_coordinates:
for column in possible_moves_semi_coordinates:
if logic.verify_move_final(potential_board, king_row + row, king_column + column,
king_row, king_column, True, False):
king_moves += 1
enemy_king_new_center_distance = abs(3.5 - (king_row + row)) + abs(3.5 - (king_column + column))
if enemy_king_new_center_distance > enemy_king_center_distance:
center_distance_punishment += enemy_king_new_center_distance/10
else:
center_distance_punishment -= 5
if king_moves < enemy_king_possible_moves:
enemy_king_possible_moves = king_moves
best_move = move
if center_distance_punishment >= best_center_distance_punishment:
best_center_distance_punishment = center_distance_punishment
best_move = move
if best_move[0] == -1:
print("checkmate ai did not find any moves")
logic.select_piece(board, best_move[2], best_move[3])
logic.move_or_select_piece(board, (best_move[1], best_move[0]), logic.square_size, logic.piece_selected, computer_turn, True) | 36,945 |
def test_monitoring_after_ocp_upgrade(pre_upgrade_monitoring_pvc):
"""
After ocp upgrade validate all monitoring pods are up and running,
its health is OK and also confirm no new monitoring
pvc created instead using previous one.
"""
pod_obj_list = pod.get_all_pods(namespace=defaults.OCS_MONITORING_NAMESPACE)
POD.wait_for_resource(
condition=constants.STATUS_RUNNING,
resource_count=len(pod_obj_list),
timeout=180,
)
post_upgrade_monitoring_pvc = get_list_pvc_objs_created_on_monitoring_pods()
assert len(pre_upgrade_monitoring_pvc) == len(post_upgrade_monitoring_pvc), (
"Before and after ocp upgrade pvc are not matching"
f"pre_upgrade_monitoring_pvc are {[pvc_obj.name for pvc_obj in pre_upgrade_monitoring_pvc]}."
f"post_upgrade_monitoring_pvc are {[pvc_obj.name for pvc_obj in post_upgrade_monitoring_pvc]}"
)
before_upgrade_pv_list = []
after_upgrade_pv_list = []
for before_upgrade_pvc_obj in pre_upgrade_monitoring_pvc:
before_upgrade_pv_list.append(
before_upgrade_pvc_obj.get().get("spec").get("volumeName")
)
for after_upgrade_pvc_obj in post_upgrade_monitoring_pvc:
after_upgrade_pv_list.append(
after_upgrade_pvc_obj.get().get("spec").get("volumeName")
)
assert after_upgrade_pvc_obj.get().get("status").get("phase") == "Bound"
assert set(before_upgrade_pv_list) == set(
after_upgrade_pv_list
), "Before and after ocp upgrade pv list are not matching"
assert prometheus_health_check(), "Prometheus health is degraded" | 36,946 |
def greenplum_kill_process(process_id):
"""
:param process_id: int
:return: None
"""
query = """
select pg_cancel_backend({0});
select pg_terminate_backend({0});
""".format(process_id)
return greenplum_read(query) | 36,947 |
def test_signature():
"""check svs_search"""
tp = product.TextProduct(get_test_file("TOR.txt"))
assert tp.get_signature() == "CBD" | 36,948 |
async def rauch_lancet_de_novos(result, limiter):
""" get de novo data for Rauch et al. intellectual disability exome study
De novo mutation data sourced from supplementary tables 2 and 3 from
Rauch et al. (2012) Lancet 380:1674-1682
doi: 10.1016/S0140-6736(12)61480-9
Returns:
data frame of de novos, including gene symbol, functional consequence
(VEP format), chromosome, nucleotide position
"""
logging.info('getting Rauch et al Lancet 2012 de novos')
# obtain the supplementary material
temp = tempfile.NamedTemporaryFile()
download_file(url, temp.name)
s2 = extract_table_s2(temp)
del s2['cq']
s3 = extract_table_s3(temp)
data = s2.append(s3, ignore_index=True)
coords = await fix_hgvs_coordinates(limiter, data['hgvs_genomic'])
data['chrom'], data['pos'], data['ref'], data['alt'] = coords
# define the study details
data['person_id'] += '|rauch'
data['person_id'] = data['person_id'].str.replace('‐', '-')
data['study'] = "10.1016/S0140-6736(12)61480-9"
data['confidence'] = 'high'
vars = set()
for i, row in data.iterrows():
var = DeNovo(row.person_id, row.chrom, row.pos, row.ref, row.alt,
row.study, row.confidence, 'grch37')
vars.add(var)
result.append(vars) | 36,949 |
def check_app_auth(headers):
"""Authenticate an application from Authorization HTTP header"""
import base64
try:
auth_header = headers["Authorization"]
except KeyError:
return False
# Only handle HTTP Basic authentication
m = re.match("Basic (\w+==)", auth_header)
if not m:
return False
encoded = m.groups()[0].encode('ascii')
decoded = base64.decodestring(encoded).decode('ascii')
m = re.match("([^:]+):(.+)", decoded)
if not m:
# Invalid authorization format
return False
app_user, app_pass = m.groups()
global app_auth
try:
if app_auth[app_user] == app_pass:
return True
except KeyError:
# No such user, fall through
pass
return False | 36,950 |
def print_title(title_text):
"""Print inputted text between a series of dashes to mark important text.
Used for outputting information to the user while the program is scraping data
Keyword argument:
title_text -- the text to be printed in a title format
"""
print "\n-----------------------------------------------------"
print title_text
print "-----------------------------------------------------\n" | 36,951 |
def setupConnection():
"""
Create connection to database, to be shared by table classes. The file
will be created if it does not exist.
"""
dbPath = conf.get('db', 'path')
conn = builder()(dbPath)
return conn | 36,952 |
def max_pool(ip):
"""does a 2x2 max pool, crops off ends if not divisible by 2
ip is DxHxW
op is DxH/2xW/2
"""
height = ip.shape[1] - ip.shape[1]%2
width = ip.shape[2] - ip.shape[2]%2
h_max = np.maximum(ip[:,:height:2,:], ip[:,1:height:2,:])
op = np.maximum(h_max[:,:,:width:2], h_max[:,:,1:width:2])
return op | 36,953 |
def test_wrong_cli_type(runner: Runner) -> None:
"""Loading command with a cli attribute that is not a click command raises an error"""
result = runner.invoke(['wrong_cli_type'])
assert isinstance(result.exception, AssertionError)
assert str(result.exception) == (
'Expected command module attribute tests.test_cli.commands.wrong_cli_type.cli '
"to be a <class 'click.core.Command'> instance but got <class 'function'> "
'instead'
) | 36,954 |
def register(linter):
"""Register the reporter classes with the linter."""
linter.register_reporter(JSONReporter) | 36,955 |
def get_bin_vals(global_config):
"""
Creates bin values for grasping widths according to bounds defined in config
Arguments:
global_config {dict} -- config
Returns:
tf.constant -- bin value tensor
"""
bins_bounds = np.array(global_config['DATA']['labels']['offset_bins'])
if global_config['TEST']['bin_vals'] == 'max':
bin_vals = (bins_bounds[1:] + bins_bounds[:-1]) / 2
bin_vals[-1] = bins_bounds[-1]
elif global_config['TEST']['bin_vals'] == 'mean':
bin_vals = bins_bounds[1:]
else:
raise NotImplementedError
if not global_config['TEST']['allow_zero_margin']:
bin_vals = np.minimum(bin_vals, global_config['DATA']['gripper_width'] - global_config['TEST']['extra_opening'])
torch_bin_vals = torch.tensor(bin_vals, dtype=torch.float32)
return torch_bin_vals | 36,956 |
def dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=False, form_fun=None,
interp_table=None):
"""
Returns the phase shift due to the Doppler delay for subhalos of mass, mass
TODO: add use_closest option
"""
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum("ij, ij -> i", r0_vec, v_vec) # kpc^2/yr
t0 = -r0_v / np.square(v_mag) # year
b_vec = r0_vec + v_vec * t0[:, np.newaxis] # (N, 3), kpc
b_mag = np.linalg.norm(b_vec, axis=1) # (N)
tau = b_mag / v_mag # year
b_hat = b_vec / b_mag[:, np.newaxis]
v_hat = v_vec / v_mag[:, np.newaxis]
b_d = np.dot(b_hat, d_hat)
v_d = np.dot(v_hat, d_hat)
x = np.subtract.outer(t, t0) / tau
x0 = -t0 / tau
prefactor = (
const.yr_to_s
* const.GN
/ (const.km_s_to_kpc_yr * const.c_light * np.square(v_mag))
)
if interp_table is None:
bd_term = (np.sqrt(1 + x ** 2) + x) - (np.sqrt(1 + x0 ** 2) + x0)
vd_term = np.arcsinh(x) - np.arcsinh(x0)
sig = bd_term * b_d - vd_term * v_d
if 'M' in list(profile):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
rv = ((3 * profile['M'] / (4 * np.pi)) * (1 / 200) * (1 / const.rho_crit)) ** (1 / 3)
form_func = np.where(r_cl<rv, form(r_cl / rv, profile['c']), 1) # (N)
sig = form_func * sig
else:
if form_fun is not None:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
sig = form_func * sig
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = b_mag / profile['rs']
bd_term0, vd_term0 = interp_table.bd_vd_terms(x0, y)
y.shape = (1,-1)
y = np.broadcast_to(y,x.shape)
bd_term, vd_term = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
sig = profile['rhos'] * profile['rs']**3 * (bd_term * b_d + vd_term * v_d)
sig = prefactor * sig
# sum the signal over all the events
return np.sum(sig, axis=-1) | 36,957 |
def calculate_Hubble_flow_velocity_from_cMpc(cMpc, cosmology="Planck15"):
"""
Calculates the Hubble flow recession velocity from comoving distance
Parameters
----------
cMpc : array-like, shape (N, )
The distance in units of comoving megaparsecs. Must be 1D or scalar.
cosmology : string or astropy.cosmology.core.FLRW
The cosmology to assume whilst calculating distance. Default: Planck15.
Returns
-------
a : array-like, shape (N, )
The scale factor.
"""
cosmo = utils.get_cosmology_from_name(cosmology)
H0 = cosmo.H0
scale_factor = utils.calculate_scale_factor_from_cMpc(cMpc, cosmology=cosmology)
proper_dist = cMpc * apu.Mpc / scale_factor
velocity = proper_dist * H0
return velocity | 36,958 |
def CollapseDictionary(mapping):
"""
Takes a dictionary mapping prefixes to URIs
and removes prefix mappings that begin with _ and
there is already a map to their value
>>> from rdflib import URIRef
>>> a = {'ex': URIRef('http://example.com/')}
>>> a['_1'] = a['ex']
>>> len(a)
2
>>> a.values()
[rdflib.term.URIRef(%(u)s'http://example.com/'), rdflib.term.URIRef(%(u)s'http://example.com/')]
>>> CollapseDictionary(a)
{'ex': rdflib.term.URIRef(%(u)s'http://example.com/')}
>>> a
{'ex': rdflib.term.URIRef(%(u)s'http://example.com/'), '_1': rdflib.term.URIRef(%(u)s'http://example.com/')}
"""
def originalPrefixes(item):
return item.find('_') + 1 == 1
revDict = {}
for k, v in list(mapping.items()):
revDict.setdefault(v, set()).add(k)
prefixes2Collapse = []
for k, v in list(revDict.items()):
origPrefixes = []
dupePrefixes = []
# group prefixes for a single URI by whether or not
# they have a _ prefix
for rt, items in itertools.groupby(v, originalPrefixes):
if rt:
dupePrefixes.extend(items)
else:
origPrefixes.extend(items)
if origPrefixes and len(v) > 1 and len(dupePrefixes):
# There are allocated prefixes for URIs that were originally
# given a prefix
assert len(origPrefixes) == 1
prefixes2Collapse.extend(dupePrefixes)
return dict([(k, v) for k, v in list(mapping.items()) if k not in prefixes2Collapse]) | 36,959 |
def do_pre_context(PreContextSmToBeReversedList, PreContextSmIdList, dial_db):
"""Pre-context detecting state machine (backward).
---------------------------------------------------------------------------
Micro actions are: pre-context fullfilled_f
DropOut --> Begin of 'main' state machine.
BLC --> ReloadStateBackward
EndOfStream --> 'error'
Variables (potentially) required:
pre_context_fulfilled_f[N] --> Array of flags for pre-context
indication.
RETURNS: [0] generated code text
[1] reload state BACKWARD, to be generated later.
"""
if not PreContextSmToBeReversedList: return [], None
analyzer_txt, \
analyzer = __do_state_machine(PreContextSmToBeReversedList, engine.BACKWARD_PRE_CONTEXT,
dial_db, ReverseF=True)
epilog_txt = _get_pre_context_epilog_definition(dial_db)
txt = analyzer_txt
txt.extend(epilog_txt)
for sm_id in PreContextSmIdList:
variable_db.require("pre_context_%i_fulfilled_f", Index = sm_id)
return txt, analyzer | 36,960 |
async def check_user_cooldown(ctx: Context, config: Config, cooldown: dict):
"""Check if command is on cooldown."""
command = ctx.command.qualified_name
last = cooldown[command]["last"]
rate = cooldown[command]["rate"]
per = cooldown[command]["per"]
uses = cooldown[command]["uses"]
now = utc_timestamp(datetime.utcnow())
if now >= last + per:
cooldown[command] = {
"last": utc_timestamp(datetime.utcnow()),
"rate": rate,
"per": per,
"uses": 1
}
return True
else:
if uses < rate:
cooldown[command] = {
"last": last,
"rate": rate,
"per": per,
"uses": uses + 1
}
return True
return False | 36,961 |
def apply_keymaps(window):
"""
Given a window, check it to see if it it contains a project, and if so
activate any key bindings present in it.
This will remove the key bindings for a project that used to have them
if it no longer does, which could happen if the project is changed by
some external factor, such as source control.
"""
project = project_name(window)
if project is None:
return
data = window.project_data() or {}
keys = data.get("keys", [])
keymap = []
for key in keys:
platform = key.pop("platform", None)
if platform != '!' + _platform_name or platform == _platform_name:
keymap.append(add_context(key, project))
try:
folder = keymap_dir(project)
if keymap and not os.path.isdir(folder):
os.mkdir(folder)
filename = keymap_file_path(project)
if keymap:
with open(filename, "w") as file:
file.write(json.dumps(keymap, indent=4))
else:
cleanup_keymap(filename, folder)
except Exception as e:
sublime.status_message("Error generating project specific bindings")
raise e | 36,962 |
def round_robin(units, sets=None):
""" Generates a schedule of "fair" pairings from a list of units """
if len(units) % 2:
units.append(None)
count = len(units)
sets = sets or (count - 1)
half = count / 2
schedule = []
for turn in range(sets):
pairings = []
for i in range(half):
if units[i] is None or units[count-i-1] is None:
continue
pairings.append((units[i], units[count-i-1]))
units.insert(1, units.pop())
schedule.append(pairings)
return schedule | 36,963 |
def test_pipeline_load(testbed: SparkETLTests):
"""Test pipeline.load method using the mocked spark session and introspect the calling pattern\
to make sure spark methods were called with intended arguments
.. seealso:: :class:`SparkETLTests`
"""
# calling the extract method with mocked spark and test config
pipeline.load(df=testbed.mock_df, config=testbed.config, logger=testbed.config)
# introspecting the spark method call
testbed.mock_df.write.save.assert_called_once_with(path='/user/stabsumalam/pyspark-tdd-template/output/user_pageviews', mode='overwrite')
testbed.mock_df.reset_mock() | 36,964 |
def filter_df(p_df:pd.DataFrame, col_name:str, value, keep:bool=True, period=None):
"""
Filter a dataframe based on a specific date
Parameters :
p_df : pandas.DataFrame
The original dataframe
col_name : str
The dataframe column name where the filter will be applyed
value : item or list
The value used to filter the specified column
keep : bool
If it must keep or not the selected value
Return : pandas.DataFrame
The filtered initial dataframe
"""
if type(value) == list:
operator = 'not in' if keep == False else 'in'
else:
operator = "==" if keep == True else "!="
df = p_df.query(f"{col_name} {operator} @value")
return df | 36,965 |
def _VerifyOptions(options):
"""Verify the passed-in options.
Args:
options: The parsed options to verify.
Returns:
Boolean, True if verification passes, False otherwise.
"""
if options.endpoints_service and not options.openapi_template:
logging.error('Please specify openAPI template with --openapi_template '
'in deploying endpoints.')
return False
if options.openapi_template and not options.endpoints_service:
logging.error('Please specify endpoints service with --endpoints_service '
'in deploying endpoints.')
return False
if (options.endpoints_service and
options.project_id not in options.endpoints_service):
logging.error('The project "%s" is not matched to the endpoints service '
'"%s".', options.project_id, options.endpoints_service)
return False
return True | 36,966 |
def program_cancel(app, view:View, context):
"""Exit the program"""
app.msg_info("exit")
sys.exit(0) | 36,967 |
def force_delegate(func: _F) -> _F:
"""
A decorator to allow delegation for the specified method even if cls.delegate = False
"""
func._force_delegate = True # type: ignore[attr-defined]
return func | 36,968 |
def add_dataframe_to_bq_as_str_values(frame, dataset, table_name,
column_types=None, col_modes=None,
project=None, overwrite=True):
"""Adds (either overwrites or appends) the provided DataFrame to the table
specified by `dataset.table_name`. Automatically adds an ingestion time
column and coverts all other values to a string.
frame: pandas.DataFrame representing the data to add.
dataset: The BigQuery dataset to write to.
table_name: The BigQuery table to write to.
column_types: Optional dict of column name to BigQuery data type. If
present, the column names must match the columns in the
DataFrame. Otherwise, table schema is inferred.
col_modes: Optional dict of modes for each field. Possible values include
NULLABLE, REQUIRED, and REPEATED. Must also specify
column_types to specify col_modes.
overwrite: Whether to overwrite or append to the BigQuery table."""
__add_ingestion_ts(frame, column_types)
json_data = __convert_frame_to_json(frame)
for sub in json_data:
for key in sub:
sub[key] = str(sub[key])
__dataframe_to_bq(frame, dataset, table_name, column_types, col_modes,
project, json_data, overwrite) | 36,969 |
def create_signature(key_dict, data):
"""
<Purpose>
Return a signature dictionary of the form:
{'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'sig': '...'}.
The signing process will use the private key in
key_dict['keyval']['private'] and 'data' to generate the signature.
The following signature schemes are supported:
'RSASSA-PSS'
RFC3447 - RSASSA-PSS
http://www.ietf.org/rfc/rfc3447.
'ed25519'
ed25519 - high-speed high security signatures
http://ed25519.cr.yp.to/
Which signature to generate is determined by the key type of 'key_dict'
and the available cryptography library specified in 'settings'.
>>> ed25519_key = generate_ed25519_key()
>>> data = 'The quick brown fox jumps over the lazy dog'
>>> signature = create_signature(ed25519_key, data)
>>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature)
True
>>> len(signature['sig'])
128
>>> rsa_key = generate_rsa_key(2048)
>>> signature = create_signature(rsa_key, data)
>>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature)
True
>>> ecdsa_key = generate_ecdsa_key()
>>> signature = create_signature(ecdsa_key, data)
>>> securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature)
True
<Arguments>
key_dict:
A dictionary containing the keys. An example RSA key dict has the
form:
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
The public and private keys are strings in PEM format.
data:
Data to be signed. This should be a bytes object; data should be
encoded/serialized before it is passed here. The same value can be be
passed into securesystemslib.verify_signature() (along with the public
key) to later verify the signature.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'key_dict' is improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'key_dict'
specifies an unsupported key type or signing scheme.
TypeError, if 'key_dict' contains an invalid keytype.
<Side Effects>
The cryptography library specified in 'settings' is called to perform the
actual signing routine.
<Returns>
A signature dictionary conformant to
'securesystemslib_format.SIGNATURE_SCHEMA'.
"""
# Does 'key_dict' have the correct format?
# This check will ensure 'key_dict' has the appropriate number of objects
# and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
# The key type of 'key_dict' must be either 'rsa' or 'ed25519'.
securesystemslib.formats.ANYKEY_SCHEMA.check_match(key_dict)
# Signing the 'data' object requires a private key. Signing schemes that are
# currently supported are: 'ed25519', 'ecdsa-sha2-nistp256',
# 'ecdsa-sha2-nistp384' and rsa schemes defined in
# `securesystemslib.keys.RSA_SIGNATURE_SCHEMES`.
# RSASSA-PSS and RSA-PKCS1v15 keys and signatures can be generated and
# verified by rsa_keys.py, and Ed25519 keys by PyNaCl and PyCA's
# optimized, pure python implementation of Ed25519.
signature = {}
keytype = key_dict['keytype']
scheme = key_dict['scheme']
public = key_dict['keyval']['public']
private = key_dict['keyval']['private']
keyid = key_dict['keyid']
sig = None
if keytype == 'rsa':
if scheme in RSA_SIGNATURE_SCHEMES:
private = private.replace('\r\n', '\n')
sig, scheme = securesystemslib.rsa_keys.create_rsa_signature(
private, data, scheme)
else:
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' RSA signature scheme specified: ' + repr(scheme))
elif keytype == 'ed25519':
public = binascii.unhexlify(public.encode('utf-8'))
private = binascii.unhexlify(private.encode('utf-8'))
sig, scheme = securesystemslib.ed25519_keys.create_signature(
public, private, data, scheme)
# Continue to support keytypes of ecdsa-sha2-nistp256 and ecdsa-sha2-nistp384
# for backwards compatibility with older securesystemslib releases
elif keytype in ['ecdsa', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384']:
sig, scheme = securesystemslib.ecdsa_keys.create_signature(
public, private, data, scheme)
# 'securesystemslib.formats.ANYKEY_SCHEMA' should have detected invalid key
# types. This is a defensive check against an invalid key type.
else: # pragma: no cover
raise TypeError('Invalid key type.')
# Build the signature dictionary to be returned.
# The hexadecimal representation of 'sig' is stored in the signature.
signature['keyid'] = keyid
signature['sig'] = binascii.hexlify(sig).decode()
return signature | 36,970 |
def test_laplacian(deriv_1d_data):
"""Test laplacian with simple 1D data."""
laplac = laplacian(deriv_1d_data.values, coordinates=(deriv_1d_data.x,))
# Worked by hand
truth = np.ones_like(deriv_1d_data.values) * 0.2133333 * units('delta_degC/cm**2')
assert_array_almost_equal(laplac, truth, 5) | 36,971 |
def parse_line(line):
""" Parse a queue trace line into a dict
"""
line = line.split()
result = {}
if len(line) < 12:
return result
result["event"] = line[0]
result["time"] = float(line[1])
result["from"] = int(line[2])
result["to"] = int(line[3])
result["type"] = line[4]
result["size"] = int(line[5])
result["flags"] = line[6]
result["fid"] = int(line[7])
result["src"] = line[8]
result["dst"] = line[9]
result["seqnum"] = int(line[10])
result["pktid"] = int(line[11])
return result | 36,972 |
def gaussian_linear_combination(distributions_and_weights: Dict):
""" Computes the PDF of the weighted average of two Gaussian variables. """
assert isinstance(distributions_and_weights, dict)
assert all(
isinstance(dist, MultivariateNormal)
for dist in distributions_and_weights.keys()
)
return MultivariateNormal(
loc=sum(
[
dist.loc * weight
for dist, weight in distributions_and_weights.items()
]
),
covariance_matrix=sum(
[
dist.covariance_matrix * (weight ** 2)
for dist, weight in distributions_and_weights.items()
]
),
) | 36,973 |
def open():
"""Open the HTML documentation in a browser"""
index_path = cwd/'_build/html/index.html'
webbrowser.open(index_path.absolute().as_uri()) | 36,974 |
def check_pattern_startswith_slash(pattern):
"""
Check that the pattern does not begin with a forward slash.
"""
regex_pattern = pattern.regex.pattern
if regex_pattern.startswith('/') or regex_pattern.startswith('^/'):
warning = Warning(
"Your URL pattern {} has a regex beginning with a '/'. "
"Remove this slash as it is unnecessary.".format(describe_pattern(pattern)),
id="urls.W002",
)
return [warning]
else:
return [] | 36,975 |
def attack_images(cores, prob_cutoff):
"""
:param cores: how many cores to use for multiprocessing
:param prob_cutoff: user's image belongs to a certain category if the output of the last FC layer of the resnet model for the category > prob_cutoff
:return:
"""
mediaFile = "target_media"
slice_files(mediaFile, DATAPATH, cores)
subprocess.call(['./parallelize_im2proba.sh', cores,
city]) # downloads images and converts to embeddings, shell script calls im2proba.py
prob_file = combine_files(DATAPATH, cores)
clean_file = clean_trim(prob_cutoff, DATAPATH, prob_file)
counts_file = count_cats(DATAPATH, clean_file, countsFile="proba_cut_01_counts.csv" )
allPairs = make_allPairs("avg_pairs.csv", u_list_file=counts_file, DATAPATH=DATAPATH,
friendFile=city + ".target_friends", makeStrangers=True)
data_file = DATAPATH + "im_dataset.csv"
dataset = make_features_counts(DATAPATH, clean_file, data_file, counts_file,
allPairs)
score(dataset, name="mini-counts, cosine, entropy of max cat", classifiers=classifiers)
print ("Created image dataset at", data_file)
return data_file | 36,976 |
def ht(x):
"""ht(x)
Evaluates the heaviside function
Args:
x: Domain points
Returns:
ht(x): Heaviside function evaluated over the domain x
"""
g = np.ones_like(x)
for i in range(np.size(x)-1):
if x[i] < 0:
g[i] = 0
elif x[i] > 0:
g[i] = 1
elif x[i] == 0:
g[i] = .5
return g | 36,977 |
def Scan(client, table, callback):
"""Scans an entire table.
"""
found_entries = {}
for prefix in options.options.hash_key_prefixes:
found_entries[prefix] = 0
deleted = 0
last_key = None
count = 0
while True:
result = yield gen.Task(client.Scan, table.name, attributes=None, limit=50, excl_start_key=last_key)
count += len(result.items)
for item in result.items:
value = item.get('t', None)
sort_key = item.get('k', None)
if value is None or sort_key is None:
continue
for prefix in options.options.hash_key_prefixes:
if value.startswith(prefix):
logging.info('matching item: %r' % item)
found_entries[prefix] += 1
if options.options.delete:
logging.info('deleting item: %r' % item)
yield gen.Task(client.DeleteItem, table=table.name, key=db_client.DBKey(value, sort_key))
deleted += 1
if result.last_key:
last_key = result.last_key
else:
break
logging.info('Found entries: %r' % found_entries)
logging.info('scanned %d items, deleted %d' % (count, deleted))
callback() | 36,978 |
def xoGkuXokhXpZ():
"""Package link to class."""
pkg = Package("pkg")
return pkg.circles.simple_class.Foo | 36,979 |
def create_all_pts_within_observation_window(observation_window_hours) -> str:
"""
create a view of all patients within observation window
return the view name
"""
view_name = f"default.all_pts_{observation_window_hours}_hours"
query = f"""
CREATE OR REPLACE VIEW {view_name} AS
WITH admits AS (
SELECT
admits.subject_id,
admits.hadm_id,
admits.admittime,
admits.admittime + interval %(time_window_hours)s hour index_date,
CASE WHEN admits.deathtime <= (admits.admittime + interval %(time_window_hours)s hour) THEN 1 ELSE 0 END AS death_during_obs_win,
CASE WHEN admits.dischtime <= (admits.admittime + interval %(time_window_hours)s hour) THEN 1 ELSE 0 END AS disch_during_obs_win
FROM mimiciii.admissions admits
)
SELECT
admits.subject_id,
admits.hadm_id,
admits.index_date,
admits.admittime
FROM admits
WHERE
admits.death_during_obs_win != 1
and admits.disch_during_obs_win != 1
order by random()
--limit 1000
"""
params = {
'time_window_hours': str(observation_window_hours)
}
cursor.execute(query, params)
return view_name | 36,980 |
def get_signature(data, raw_labels):
"""
Should return a 4 x z* matrix, where z* is the number of classes in the
labels matrix.
"""
labels = raw_labels.reset_index()
pca = decomposition.PCA(n_components=2)
lle = manifold.LocallyLinearEmbedding(n_components=2)
X_pca = pd.DataFrame(pca.fit_transform(data))
X_lle = pd.DataFrame(lle.fit_transform(data))
class_no = np.shape(labels[0].unique())[0]
S = np.zeros([4,class_no])
for a in labels[0].unique():
this_pca = X_pca.loc[labels.loc[labels[0]==a].index]
this_lle = X_lle.loc[labels.loc[labels[0]==a].index]
S[0,a] = this_pca[0].mean()
S[1,a] = this_pca[1].mean()
S[2,a] = this_lle[0].mean()
S[3,a] = this_lle[1].mean()
return S | 36,981 |
def get_xyz_where(Z, Cond):
"""
Z and Cond are MxN matrices. Z are data and Cond is a boolean
matrix where some condition is satisfied. Return value is x,y,z
where x and y are the indices into Z and z are the values of Z at
those indices. x,y,z are 1D arrays
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond] | 36,982 |
def retrieve_seq_length(data):
"""compute the length of a sequence. 0 are masked.
Args:
data: input sequence
Returns:
a `int`, length of the sequence
"""
with tf.name_scope('GetLength'):
used = tf.sign(tf.reduce_max(tf.abs(data), axis=2))
length = tf.reduce_sum(used, axis=1)
length = tf.cast(length, tf.int32)
return length | 36,983 |
def get_census_params(variable_ids, county_level=False):
"""Gets census url params to make an API call.
variable_ids: The ids of the variables to request. Automatically includes
NAME.
county_level: Whether to request at the county level, or the state level."""
keys = variable_ids.copy()
keys.append("NAME")
params = {"get": ",".join(keys)}
params["for"] = "county:*" if county_level else "state:*"
return params | 36,984 |
def lookupName(n, names):
"""Check if name is in list of names
Parameters
----------
n : str
Name to check
names : list
List of names to check in
Returns
-------
bool
Flag denoting if name has been found in list (True) or not (False)
"""
if n in names:
return True
else:
return False | 36,985 |
def calculate_appointments(new_set, old_set):
"""
Calculate different appointment types.
Used for making useful distinctions in the email message.
new_set will be the fresh set of all available appointments at a given interval
old_set will the previous appointments variable getting passed in.
Ex1: Addition of HONEOYE
new_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'}
old_set = {'LIVERPOOL', 'BROOKLYN', 'KINGSTON'}
returns ->->
new_appointments = {'HONEOYE'}
all_appointments = {'LIVERPOOL', 'BROOKLYN', 'KINGSTON', HONEOYE}
Ex2: No Changes
new_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'}
old_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'}
returns ->->
new_appointments = set() (empty set)
all_appointments = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'}
"""
new_appointments = new_set.difference(old_set) # set of All appointments minus set of Old appointments yields the set of New appointments
old_appointments = new_set.intersection(old_set) # New intersect Old. yields those appointments that (intersect is equivalent the overlap in a venn diagram)
return new_appointments, old_appointments # Return new sets
| 36,986 |
def SystemPropertiesDir(dirname = 'system-properties'):
"""
SystemProperty
Creates configuration for a system-properties dir.
"""
print data.system_properties_dir.format(domain={'system-properties-dir': dirname}) | 36,987 |
def clean():
"""Deletes generated JavaScript files, the build folder and clears all caches."""
session.clean()
profile = Profile.Profile(session)
fm = profile.getFileManager()
fm.removeDir("build")
fm.removeDir("source/script") | 36,988 |
def log(func: Callable[..., RT]) -> Callable[..., RT]:
"""logs entering and exiting functions for debugging."""
logger = logging.getLogger(func.__module__)
@wraps(func)
def wrapper(*args, **kwargs) -> RT:
logger.debug("Entering: %s", func.__name__)
result = func(*args, **kwargs)
# logger.debug(repr(result))
logger.debug("Exiting: %s", func.__name__)
return result
return wrapper | 36,989 |
def chunks(l, n):
""" Yield successive `n`-sized chunks from list `l`. """
for i in range(0, len(l), n):
yield l[i : i + n] | 36,990 |
def transaksi_hari_ini():
"""
used in: app_kasir/statistik.html
"""
return Transaksi.objects.filter(
tanggal_transaksi__year=timezone.now().year,
tanggal_transaksi__month=timezone.now().month,
tanggal_transaksi__day=timezone.now().day
).count() | 36,991 |
def save_files(assembly_dict: Dict[str, bytes], output_dir: str):
"""
Saves the assemblies on assembly_dict to output_dir
:param assembly_dict: a dictionary that maps an assembly name to its contents
:param output_dir: output directory
:return: None
"""
os.makedirs(output_dir, exist_ok=True)
for asm_name, contents in assembly_dict.items():
asm_name = os.path.basename(asm_name)
full_out_path = os.path.join(output_dir, asm_name)
with open(full_out_path, 'wb') as fh:
fh.write(contents) | 36,992 |
def extractWindows(signal, window_size=10, return_window_indices=False):
""" Reshape a signal into a series of non-overlapping windows.
Parameters
----------
signal : numpy array, shape (num_samples,)
window_size : int, optional
return_window_indices : bool, optional
Returns
-------
windows : numpy array, shape (num_windows, window_size)
window_indices : numpy array of int, shape (num_windows, window_size)
"""
tail_len = signal.shape[0] % window_size
pad_arr = m.np.full(window_size - tail_len, m.np.nan)
signal_padded = m.np.concatenate((signal, pad_arr))
windows = signal_padded.reshape((-1, window_size))
if not return_window_indices:
return windows
indices = m.np.arange(signal_padded.shape[0])
window_indices = indices.reshape((-1, window_size))
return windows, window_indices | 36,993 |
def add(left: int, right: int):
"""
add up two numbers.
"""
print(left + right)
return 0 | 36,994 |
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Move retrieval to a '
'different directory.')
parser.add_argument('retrieval_id', help='the id of the retrieval '
'to move', type=int)
parser.add_argument('-d', '--directory', help='the new top-level directory '
' for the DRS structure (default: %(default)s)',
default=NEW_BASE_OUTPUT_DIR)
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args | 36,995 |
def rand_x_digit_num(x):
"""Return an X digit number, leading_zeroes returns a string, otherwise int."""
return '{0:0{x}d}'.format(random.randint(0, 10**x-1), x=x) | 36,996 |
def gen_workflow_steps(step_list):
"""Generates a table of steps for a workflow
Assumes step_list is a list of dictionaries with 'task_id' and 'state'
"""
steps = format_utils.table_factory(field_names=['Steps', 'State'])
if step_list:
for step in step_list:
steps.add_row([step.get('task_id'), step.get('state')])
else:
steps.add_row(['None', ''])
return format_utils.table_get_string(steps) | 36,997 |
def commonprefix(items):
"""Get common prefix for completions
Return the longest common prefix of a list of strings, but with special
treatment of escape characters that might precede commands in IPython,
such as %magic functions. Used in tab completion.
For a more general function, see os.path.commonprefix
"""
# the last item will always have the least leading % symbol
# min / max are first/last in alphabetical order
first_match = ESCAPE_RE.match(min(items))
last_match = ESCAPE_RE.match(max(items))
# common suffix is (common prefix of reversed items) reversed
if first_match and last_match:
prefix = os.path.commonprefix((first_match.group(0)[::-1], last_match.group(0)[::-1]))[::-1]
else:
prefix = ''
items = [s.lstrip(ESCAPE_CHARS) for s in items]
return prefix+os.path.commonprefix(items) | 36,998 |
def sround(x: Union[np.ndarray, float, list, tuple], digits: int=1) -> Any:
""" 'smart' round to largest `digits` + 1
Args
x (float, list, tuple, ndarray)
digits (int [1]) number of digits beyond highest
Examples
>>> sround(0.0212343, 2) # result 0.0212
"""
if isinstance(x, (float, np.float64, np.float32)):
safelog10 = lambda x: 0.0 if not x else np.log10(np.abs(x))
_sround = lambda x, d=1: np.round(x, max((-np.floor(safelog10(x)).astype(int) + digits), 0))
return _sround(x, digits)
_as_tuple = False
if isinstance(x, tuple):
x = list(x)
_as_tuple = True
elif isinstance(x, (list, np.ndarray)):
safelog10 = np.log10(np.abs(x))
safelog10[np.abs(safelog10) == np.inf] = 0
digits = np.maximum(-np.floor(safelog10).astype(int) + digits, 0)
for i in range(len(x)):
x[i] = np.round(x[i], digits[i])
if _as_tuple:
x = tuple(x)
return x | 36,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.