content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def _is_class(s):
"""Imports from a class/object like import DefaultJsonProtocol._"""
return s.startswith('import ') and len(s) > 7 and s[7].isupper()
|
deee946066b5b5fc548275dd2cce7ebc7023626d
| 3,646,713
|
def evaluate(vsm, wordsim_dataset_path):
"""Extract Correlation, P-Value for specified vector space mapper."""
return evaluation.extract_correlation_coefficient(
score_data_path=wordsim_dataset_path, vsm=vsm
)
|
2e12b16eee43aef50b5a6de7d0d9fc5b9c806536
| 3,646,714
|
def longest_substring_using_lists(s: str) -> int:
"""
find the longest substring without repeating characters
644 ms 14.3 MB
>>> longest_substring_using_lists("abac")
3
>>> longest_substring_using_lists("abcabcbb")
3
>>> longest_substring_using_lists("bbbbb")
1
>>> longest_substring_using_lists("pwwkew")
3
"""
words = list()
longest = 0
for char in s:
# for each character
removals = []
for word_idx in range(len(words)):
# check all found words for the char
word = words[word_idx]
if char in word:
# if it exists then set its length to longest if it is the longest
longest = max(longest, len(word))
removals.append(word)
else:
# else add char to word
words[word_idx] += char
for remove in removals:
words.remove(remove)
# add char into words
words.append(char)
return max(longest, *[len(word) for word in words])
|
4292af29c59ea6210cde28745f91f1e9573b7104
| 3,646,715
|
def getuserobj(user_id=None):
"""
登录查询用户是否存在的专用接口函数
:param user_id: 用户id(username)
:return: if exit: return 用户对象
else return None
"""
dbobj = connectMysql.connectMysql()
if user_id is '' or user_id is None:
dbobj.close_db()
return None
else:
userdata = dbobj.select_db(sql="select * from secret where ID = %s " % user_id)
if userdata is ():
# print("ID = %s and password = %s 未查询到数据" % (user_id, password))
dbobj.close_db()
return None
else:
dbobj.close_db()
return userdata[0]
|
78f5cd9edd72b1ee838fb4b7cde73b3c960be0df
| 3,646,716
|
def _parse_track_df(df: pd.DataFrame, track_id: int, track_name: str, track_comment: str,
data_year: int) -> dict:
"""
parses track data
:param df: data representing a track
:param track_id: track id
:param track_name: track name
:param track_comment: track comment
:param data_year: year to which the data is relevant
:return: parsed data
"""
must = from_list = choice = corner_stones = complementary = minor = additional_hug = 0
point_columns = [i for i, c in enumerate(df.columns) if 'כ נקודות' in c]
for i, r in df.iterrows():
category = r[0]
if 'סה\"כ' in category:
continue
raw_points = [r[i] for i in point_columns]
for raw_point in raw_points:
if not raw_point or pd.isnull(raw_point): # no need to take Nan or 0 value
continue
try:
points = float(raw_point)
except ValueError:
match = RE_RANGE.match(raw_point) or RE_MIN.match(raw_point)
if match:
points = float(match[1] or match[2])
else:
continue
if category in (MUST, MUST_IN_HUG, MUST_PROGRAMMING, MUST_SAFETY_LIBRARY) \
or MUST in category:
must += points
elif category in CHOICE_FROM_LIST or 'במסגרת האשכול' in category:
from_list += points
elif category == CHOICE_IN_HUG:
choice += points
elif CORNER_STONES in category:
corner_stones += points
elif category == COMPLEMENTARY:
complementary += points
elif category == MINOR:
minor += points
elif category == ADDITIONAL_HUG:
additional_hug += points
else:
# print(f'Could not identify {category}={raw_point}, defaulting to MUST')
must += points
return {'track_number': track_id,
'data_year': data_year,
'name': track_name,
'points_must': must,
'points_from_list': from_list,
'points_choice': choice,
'points_complementary': complementary,
'points_corner_stones': corner_stones,
'points_minor': minor,
'points_additional_hug': additional_hug,
'comment': track_comment or ''}
|
a6ccd068829ebd0355d4d13ee327255c09615a16
| 3,646,717
|
from typing import Tuple
from typing import Mapping
def parse_tileset(
tileset: TileSet
) -> Tuple[Mapping[Axes, int], TileCollectionData]:
"""
Parse a :py:class:`slicedimage.TileSet` for formatting into an
:py:class:`starfish.imagestack.ImageStack`.
Parameters:
-----------
tileset : TileSet
The tileset to parse.
Returns:
--------
Tuple[Tuple[int, int], TileSetData] :
A tuple consisting of the following:
1. The (y, x) size of each tile.
2. A :py:class:`starfish.imagestack.tileset.TileSetData` that can be queried to obtain
the image data and extras metadata of each tile, as well as the extras metadata of
the entire :py:class:`slicedimage.TileSet`.
"""
tile_data = TileSetData(tileset)
tile_shape = tileset.default_tile_shape
# if we don't have the tile shape, then we peek at the first tile and get its shape.
if tile_shape is None:
tile_key = next(iter(tile_data.keys()))
tile = tile_data.get_tile_by_key(tile_key)
tile_shape = tile.tile_shape
return (
tile_shape,
tile_data,
)
|
d75b121e91d47424704de671c716d1fbf6b02e86
| 3,646,718
|
def pad_sents(sents, pad_token):
""" Pad list of sentences(SMILES) according to the longest sentence in the batch.
@param sents (list[list[str]]): list of SMILES, where each sentence
is represented as a list of tokens
@param pad_token (str): padding token
@returns sents_padded (list[list[str]]): list of SMILES where SMILES shorter
than the max length SMILES are padded out with the pad_token, such that
each SMILES in the batch now has equal length.
"""
sents_padded = []
max_length = max([len(sentence) for sentence in sents])
sents_padded = [sentence+(max_length-len(sentence))*[pad_token] for sentence in sents]
return sents_padded
|
8f0eabfaaa18eafa84366a2f20ed2ddd633dacc6
| 3,646,719
|
def bicubic_interpolation_filter(sr):
"""Creates a bicubic interpolation filter."""
return _interpolation_filter(sr, cv2.INTER_CUBIC)
|
772ee384b90ae6b1e9fe875374441b3d59f86326
| 3,646,720
|
def is_receive_waiting():
"""Check to see if a payload is waiting in the receive buffer"""
#extern RADIO_RESULT radio_is_receive_waiting(void);
res = radio_is_receive_waiting_fn()
# this is RADIO_RESULT_OK_TRUE or RADIO_RESULT_OK_FALSE
# so it is safe to evaluate it as a boolean number.
return (res != 0)
|
8b2a8d1a003f89c3a9b8df7db0729e15a96fdfcc
| 3,646,721
|
import typing
def residual_block(
x,
filters: int,
weight_decay: float,
*,
strides: typing.Union[int, typing.Tuple[int, int]],
dilation: typing.Union[int, typing.Tuple[int, int]],
groups: int,
base_width: int,
downsample,
use_basic_block: bool,
use_cbam: bool,
cbam_channel_reduction: int,
activation: str,
pre_activation: bool,
small_input: bool,
name: str,
):
""" Residual block.
Design follows [2] where Strides=2 in the 3x3 convolution instead of the first 1x1
convolution for bottleneck block. This increases the Top1 for ~0.5, with a slight
performance drawback of ~5% images/sec. Last BN in each residual branch are
zero-initialized following [3] so that the residual branch starts with zeros and
each residual block behaves like an identity.This improves the model by 0.2~0.3%.
- Attention Layers
- CBAM: Convolutional Block Attention Module
[1] Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385
[2] resnet_50_v1_5_for_pytorch
https://ngc.nvidia.com/catalog/model-scripts/nvidia
[3] Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour
https://arxiv.org/abs/1706.02677
[4] Identity Mappings in Deep Residual Networks
https://arxiv.org/abs/1603.05027
"""
x = eval("basic" if use_basic_block else "bottleneck")(
x,
filters,
weight_decay,
strides=strides,
dilation=dilation,
groups=groups,
base_width=base_width,
downsample=downsample,
use_cbam=use_cbam,
cbam_channel_reduction=cbam_channel_reduction,
activation=activation,
pre_activation=pre_activation,
small_input=small_input,
name=name,
)
return x
|
f2021a89e2d737e73bfef3fb7dc127c3bbb5d0b7
| 3,646,722
|
def vacancy_based_on_freq(service,duration,frequency,earliest,latest,local_timezone):
"""
Check vacant timeslot with the user inputed duration for the frequency/week the user inputed.
service: get authentication from Google
duration: the length of the new event (int)
frequency: number of days in a week (int)
earliest: earliest time for timeframe (int)
latest: latest time for timeframe (int)
local_timezone: assigned timezone
"""
result = {}
week = 7
for i in range(week):
if check_vacancy(service,duration,i+1,earliest,latest,local_timezone) == None:
print(f'No slots left on this date. Still {frequency} spots left in the week to fill.')
pass
else:
result[i+1] = check_vacancy(service,duration,i+1,earliest,latest,local_timezone)
frequency -= 1
print(f'Yes! There is a timeslot! Now {frequency} spots left in the week.')
if frequency == 0:
break
return result
|
ba3d7b688170a7e03d849070eaae69ed718257d6
| 3,646,723
|
def byte_list_to_nbit_le_list(data, bitwidth, pad=0x00):
"""! @brief Convert a list of bytes to a list of n-bit integers (little endian)
If the length of the data list is not a multiple of `bitwidth` // 8, then the pad value is used
for the additional required bytes.
@param data List of bytes.
@param bitwidth Width in bits of the resulting values.
@param pad Optional value used to pad input data if not aligned to the bitwidth.
@result List of integer values that are `bitwidth` bits wide.
"""
bytewidth = bitwidth // 8
datalen = len(data) // bytewidth * bytewidth
res = [sum((data[offset + i] << (i * 8)) for i in range(bytewidth))
for offset in range(0, datalen, bytewidth)
]
remainder = len(data) % bytewidth
if remainder != 0:
pad_count = bytewidth - remainder
padded_data = list(data[-remainder:]) + [pad] * pad_count
res.append(sum((padded_data[i] << (i * 8)) for i in range(bytewidth)))
return res
|
b92bbc28cc2ffd59ae9ca2e459842d7f4b284d18
| 3,646,725
|
def admin_not_need_apply_check(func):
"""
admin用户不需要申请权限检查
"""
@wraps(func)
def wrapper(view, request, *args, **kwargs):
if request.user.username == ADMIN_USER:
raise error_codes.INVALID_ARGS.format(_("用户admin默认拥有任意权限, 无需申请"))
return func(view, request, *args, **kwargs)
return wrapper
|
9592d3a4691761be1f58c8a404d7cbef9bd01116
| 3,646,726
|
def parse_headers(header_list):
"""
Convert headers from our serialized dict with lists for keys to a
HTTPMessage
"""
header_string = b""
for key, values in header_list.items():
for v in values:
header_string += \
key.encode('utf-8') + b":" + v.encode('utf-8') + b"\r\n"
return compat.get_httpmessage(header_string)
|
8a387a7a60044115c61838f1da853e4608e3840d
| 3,646,727
|
def _rrv_add_ ( s , o ) :
"""Addition of RooRealVar and ``number''
>>> var = ...
>>> num = ...
>>> res = var + num
"""
if not isinstance ( o , val_types ) : return NotImplemented
if isinstance ( o , _RRV_ ) and not o.isConstant() : o = o.ve ()
elif hasattr ( o , 'getVal' ) : o = o.getVal ()
#
v = s.getVal() if s.isConstant() else s.ve()
#
return v + o
|
e3e41fe3ae53379f0b49a4a2aa14e3a401bae6b3
| 3,646,728
|
from haversine import haversine #import haversine function from library
def stations_by_distance(stations, p):
"""This module sorts stations by distance and returns a
list of (station, town, distance) tupules."""
list_station_dist = [] #initiates list to store stations and distance
#iterate through stations and calculate distamces
for station in stations:
distance = haversine(station.coord, p) #use haversine function to calculate distance between station and p
list_station_dist.append((station.name, station.town, distance)) #add data to list
sorted_list = sorted_by_key(list_station_dist, 2) #use sorting module to sort by distance
return sorted_list
|
4a378090803b061b8ea9b17d6255038235c1b1ca
| 3,646,729
|
import hashlib
def create_SHA_256_hash_of_file(file):
"""
Function that returns the SHA 256 hash of 'file'.\n
Logic taken from https://www.quickprogrammingtips.com/python/how-to-calculate-sha256-hash-of-a-file-in-python.html
"""
sha256_hash = hashlib.sha256()
with open(file, "rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
# Converting to upper case because that's what is required by the policy
# service. See their code:
# https://dev.azure.com/msasg/Bing_and_IPG/_git/Aether?path=/src/aether/platform/backendV2/BlueBox/PolicyService/Microsoft.MachineLearning.PolicyService/Workers/CatalogValidation.cs
return sha256_hash.hexdigest().upper()
|
14f62a49ea54f5fceb719c4df601fde165f5e55c
| 3,646,730
|
def partition_average(partition):
"""Given a partition, calculates the expected number of words sharing the same hint"""
score = 0
total = 0
for hint in partition:
score += len(partition[hint])**2
total += len(partition[hint])
return score / total
|
944f514e925a86f3be431bd4d56970d92d16f570
| 3,646,731
|
import queue
def set_params(config):
"""Configure parameters based on loaded configuration"""
params = {
'path': None,
'minio': None,
'minio_access_key': None,
'minio_secret_key': None,
'minio_secure': True,
'minio_ca_certs': None,
'minio_bucket': 'catalogue',
'minio_path': '',
'url': None,
'client': None,
'instance': None,
'timeout': DEFAULT_TIMEOUT,
'verify': False,
'cert': None,
'thread_cnt': DEFAULT_THREAD_COUNT,
'wsdl_replaces': DEFAULT_WSDL_REPLACES,
'excluded_member_codes': [],
'excluded_subsystem_codes': [],
'filtered_hours': 24,
'filtered_days': 30,
'filtered_months': 12,
'cleanup_interval': 7,
'days_to_keep': 30,
'work_queue': queue.Queue(),
'results': {},
'results_lock': Lock(),
'shutdown': Event()
}
if 'output_path' in config:
params['path'] = config['output_path']
LOGGER.info('Configuring "path": %s', params['path'])
if 'minio_url' in config:
params['minio'] = config['minio_url']
LOGGER.info('Configuring "minio_url": %s', params['minio'])
if 'minio_access_key' in config:
params['minio_access_key'] = config['minio_access_key']
LOGGER.info('Configuring "minio_access_key": %s', params['minio_access_key'])
if 'minio_secret_key' in config:
params['minio_secret_key'] = config['minio_secret_key']
LOGGER.info('Configuring "minio_secret_key": <password hidden>')
if 'minio_secure' in config:
params['minio_secure'] = config['minio_secure']
LOGGER.info('Configuring "minio_secure": %s', params['minio_secure'])
if 'minio_ca_certs' in config:
params['minio_ca_certs'] = config['minio_ca_certs']
LOGGER.info('Configuring "minio_ca_certs": %s', params['minio_ca_certs'])
if 'minio_bucket' in config:
params['minio_bucket'] = config['minio_bucket']
LOGGER.info('Configuring "minio_bucket": %s', params['minio_bucket'])
if 'minio_path' in config:
params['minio_path'] = config['minio_path']
params['minio_path'].strip('/')
if params['minio_path']:
params['minio_path'] += '/'
LOGGER.info('Configuring "minio_path": %s', params['minio_path'])
if params['path'] is None and params['minio'] is None:
LOGGER.error('Configuration error: No output path or MinIO URL are provided')
return None
if 'server_url' in config:
params['url'] = config['server_url']
LOGGER.info('Configuring "url": %s', params['url'])
else:
LOGGER.error('Configuration error: Local Security Server URL is not provided')
return None
if 'client' in config and len(config['client']) in (3, 4):
params['client'] = config['client']
LOGGER.info('Configuring "client": %s', params['client'])
else:
LOGGER.error(
'Configuration error: Client identifier is incorrect. Expecting list of identifiers. '
'Example: ["INST", "CLASS", "MEMBER_CODE", "MEMBER_CLASS"])')
return None
if 'instance' in config and config['instance']:
params['instance'] = config['instance']
LOGGER.info('Configuring "instance": %s', params['instance'])
if 'timeout' in config and config['timeout'] > 0.0:
params['timeout'] = config['timeout']
LOGGER.info('Configuring "timeout": %s', params['timeout'])
if 'server_cert' in config and config['server_cert']:
params['verify'] = config['server_cert']
LOGGER.info('Configuring "verify": %s', params['verify'])
if 'client_cert' in config and 'client_key' in config \
and config['client_cert'] and config['client_key']:
params['cert'] = (config['client_cert'], config['client_key'])
LOGGER.info('Configuring "cert": %s', params['cert'])
if 'thread_count' in config and config['thread_count'] > 0:
params['thread_cnt'] = config['thread_count']
LOGGER.info('Configuring "thread_cnt": %s', params['thread_cnt'])
if 'wsdl_replaces' in config:
params['wsdl_replaces'] = config['wsdl_replaces']
LOGGER.info('Configuring "wsdl_replaces": %s', params['wsdl_replaces'])
if 'excluded_member_codes' in config:
params['excluded_member_codes'] = config['excluded_member_codes']
LOGGER.info('Configuring "excluded_member_codes": %s', params['excluded_member_codes'])
if 'excluded_subsystem_codes' in config:
params['excluded_subsystem_codes'] = config['excluded_subsystem_codes']
LOGGER.info(
'Configuring "excluded_subsystem_codes": %s', params['excluded_subsystem_codes'])
if 'filtered_hours' in config and config['filtered_hours'] > 0:
params['filtered_hours'] = config['filtered_hours']
LOGGER.info('Configuring "filtered_hours": %s', params['filtered_hours'])
if 'filtered_days' in config and config['filtered_days'] > 0:
params['filtered_days'] = config['filtered_days']
LOGGER.info('Configuring "filtered_days": %s', params['filtered_days'])
if 'filtered_months' in config and config['filtered_months'] > 0:
params['filtered_months'] = config['filtered_months']
LOGGER.info('Configuring "filtered_months": %s', params['filtered_months'])
if 'cleanup_interval' in config and config['cleanup_interval'] > 0:
params['cleanup_interval'] = config['cleanup_interval']
LOGGER.info('Configuring "cleanup_interval": %s', params['cleanup_interval'])
if 'days_to_keep' in config and config['days_to_keep'] > 0:
params['days_to_keep'] = config['days_to_keep']
LOGGER.info('Configuring "days_to_keep": %s', params['days_to_keep'])
if params['path'] is not None and params['minio'] is not None:
LOGGER.warning('Saving to both local and MinIO storage is not supported')
if params['minio']:
LOGGER.info('Using MinIO storage')
else:
LOGGER.info('Using local storage')
LOGGER.info('Configuration done')
return params
|
b1cc87d88b656cb6d57dcb0579276de8f0d744e8
| 3,646,732
|
def post_stop_watch():
"""
This method change watcher status to true and return -> "watching": false
"""
url = common.combine_url(
config.INGESTION_AGENT_URL,
config.INGESTION_WATCHER_STATUS,
config.INGESTION_STOP_WATCHER,
)
resp = base_requests.send_post_request(url)
return resp
|
2b7634c78bf46c6365aac89f4be6908d8baf1bcf
| 3,646,733
|
def combine_grad_fields(field1, field2):
"""
Combines two gradient fields by summing the gradiends in every point.
The absolute values of each pixel are not interesting.
Inputs:
- field1: np.array(N, M) of Pixels.
- field2: np.array(N, M) of Pixels.
Output:
- out_field: np.array(N, M) of Pixels.
"""
assert field1.shape[0] == field2.shape[0], "field1.shape[0] != field2.shape[0]"
assert field1.shape[1] == field2.shape[1], "field1.shape[1] != field2.shape[1]"
out_field = np.ndarray(field1.shape, dtype=np.object)
N, M = field1.shape
for i in range(N):
for j in range(M):
grad = field1[i, j].grad + field2[i, j].grad
out_field[i, j] = Pixel(i, j, 0, grad)
out_field[i, j].normalize_grad()
return out_field
|
7cbe02280c33d9ed077a5b39f3df347c08c11417
| 3,646,734
|
def edit_module_form(request, module_id):
"""
Only the instructor who is the creator of the course to which this module belongs can access this.
"""
course = Module.objects.get(moduleID=module_id).getCourse()
if request.user.role != 1 or (course.instructorID.userID != request.user.userID):
context={
'message': "You do not have access to this page."
}
return render(request, 'ICE/message.html', context)
instructor_id = request.user.userID
if request.method == 'POST':
module=Module.objects.get(moduleID=module_id)
ordNum = 0
for key, value in request.POST.items():
if key=='orderNumber':
ordNum = value
course = module.getCourse()
modules = Module.objects.filter(courseID=course.courseID)
maxOrd = 0
sameOrd = 0
for m in modules:
if m.orderNumber > maxOrd:
maxOrd = m.orderNumber
if int(maxOrd) < int(ordNum):
for m in modules:
if m.orderNumber > module.orderNumber:
mod = Module.objects.get(moduleID = m.moduleID)
mod.orderNumber -= 1
mod.save()
module.orderNumber=course.numOfModules
module.save()
elif int(ordNum) == 0:
for m in modules:
if m.orderNumber < module.orderNumber:
mod = Module.objects.get(moduleID = m.moduleID)
mod.orderNumber += 1
mod.save()
module.orderNumber = 1
module.save()
else:
for m in modules:
if int(m.orderNumber) == int(ordNum):
sameOrd = m.orderNumber
if int(sameOrd) != 0 and int(sameOrd) > int(module.orderNumber):
for m in modules:
if int(m.orderNumber) <= int(sameOrd) and int(m.orderNumber) > int(module.orderNumber):
mod = Module.objects.get(moduleID=m.moduleID)
mod.orderNumber = mod.orderNumber - 1
mod.save()
module.orderNumber = ordNum
module.save()
elif int(sameOrd) != 0 and int(sameOrd) < int(module.orderNumber):
for m in modules:
if int(m.orderNumber) >= int(sameOrd) and int(m.orderNumber) < int(module.orderNumber):
mod = Module.objects.get(moduleID=m.moduleID)
mod.orderNumber = mod.orderNumber + 1
mod.save()
module.orderNumber = ordNum
module.save()
return redirect('../../instructorCourse/courseID='+str(course.courseID)+'&moduleID=1/')
form = EditModuleForm()
return render(request, 'edit_module.html', {'moduleform': form})
|
ba1dc405c9249fb6227d0ed0d1cd0fc5b80caa78
| 3,646,736
|
def r2(y_true, y_pred):
"""
:math:`R^2` (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: R2
"""
return r2_score(y_true, y_pred)
|
3962c83a022cbab416a2914c0be749cf5b66d51e
| 3,646,737
|
def passstore(config, name):
"""Get password file"""
return config.passroot / name
|
d0ca8c71650bd98dacd7d6ff9ed061aba3f2c43a
| 3,646,738
|
def coord_shell_array(nvt_run, func, li_atoms, species_dict, select_dict,
run_start, run_end):
"""
Args:
nvt_run: MDAnalysis Universe
func: One of the neighbor statistical method (num_of_neighbor_one_li,
num_of_neighbor_one_li_simple)
li_atoms: Atom group of the Li atoms.
species_dict (dict): A dict of coordination cutoff distance
of the interested species.
select_dict: A dictionary of species selection.
run_start (int): Start time step.
run_end (int): End time step.
"""
num_array = func(
nvt_run, li_atoms[0], species_dict, select_dict, run_start, run_end
)
for li in tqdm_notebook(li_atoms[1::]):
this_li = func(
nvt_run, li, species_dict, select_dict, run_start, run_end
)
for kw in num_array.keys():
num_array[kw] = np.concatenate((num_array.get(kw),
this_li.get(kw)), axis=0)
return num_array
|
4a0b5f1417cb2184c866cf5ca5c138ed051f44a6
| 3,646,740
|
def plot_transactions_ts(transactional_df, frequency="M", aggregation="n_purchases", reg=False, black_friday_dates=None, plot_black_friday=False, plot_normal_only=False, **kwargs):
"""
plota a evolucao das compras no tempo
black_friday_dates:: list of datetime.date
"""
# preventing unwnated modifications to original df
transactional_df = transactional_df.copy().rename(columns={"data": "date", "receita": "revenue", "id_cliente": "customer_id"})
transactional_df = transactional_df[["date", "revenue", "customer_id"] if not 'black_friday' in transactional_df.columns else ["date", "revenue", "customer_id", "black_friday"]]
transactional_df.index = transactional_df['date']
# if black friday dates are explicity given, a new column is added to the dataframe flagging the relevant purchases
if black_friday_dates:
transactional_df["black_friday"] = transactional_df["date"].dt.date.isin(black_friday_dates).astype(np.int8)
# level of aggregation
assert frequency not in ('Y'), "invalid frequency - use plot_transactions_y"
grouper = transactional_df.resample(frequency)
# aggregating data
if aggregation == "n_purchases":
df = grouper.size().rename(aggregation).to_frame()
elif aggregation == "revenue":
df = grouper["revenue"].sum().rename(aggregation).to_frame()
elif aggregation == "mean_ticket":
df = grouper["revenue"].mean().rename(aggregation).to_frame()
elif aggregation == "n_customers":
df = grouper["customer_id"].nunique().rename(aggregation).to_frame()
else:
raise ValueError(f"unknown aggregation {aggregation} - available agregations: n_purchases, revenue, mean_ticket, n_customers")
# for frequency grouping toubleshooting
# if kwargs.get("troubleshoot_frequency", False):
df = df.join(grouper["date"].max().rename("date_max"))
df = df.join(grouper["date"].min().rename("date_min"))
df["n_days"] = (df["date_max"] - df["date_min"]).dt.days + 1
if kwargs.get("full_intervals_only", False):
if frequency == "M":
df = df[df["n_days"] >= kwargs.get("full_interval_m", 28)].copy()
elif frequency == "W":
df = df[df["n_days"] >= kwargs.get("full_interval_m", 7)].copy()
if "black_friday" in transactional_df.columns:
if frequency != 'Y':
df = df.join(grouper["black_friday"].max())
if plot_black_friday or plot_normal_only:
assert "black_friday" in df.columns, "No Black Friday Information Available"
# n_purchases on normal days
df[f"{aggregation}_normal"] = df[aggregation]
df.loc[df["black_friday"] == 1, f"{aggregation}_normal"] = np.nan
df[f"{aggregation}_normal"] = df[f"{aggregation}_normal"].interpolate(method="linear")
# por plotting reasons, considering "neighbor" rows as black_friday == 1
try:
bf_idx = [(i-1, i, i+1) for i in df.reset_index()[df.reset_index()["black_friday"] == 1].index]
bf_idx = list(set(list(sum(bf_idx, ()))))
df.iloc[bf_idx, (df.columns == "black_friday").argmax()] = 1
except IndexError:
pass
# n_purchases on black friday days
df[f"{aggregation}_bf"] = df[aggregation]
df.loc[df["black_friday"] != 1, f"{aggregation}_bf"] = np.nan
# plot!
ax = kwargs.get("ax")
if not ax:
fig, ax = plt.subplots(figsize=kwargs.get("figsize", (18,4)))
if plot_black_friday:
(df[f'{aggregation}_normal']).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label_normal", "Normal"))
(df[f'{aggregation}_bf']).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label_bf", "Black Friday"))
# simple linear regression - WARNING: simplistic treatment of timeseries data
if reg:
f = np.poly1d(np.polyfit(range(df.shape[0]), (df[f'{aggregation}_normal']).values, 1))
df["fitted_line"] = f(np.arange(df.shape[0]))
df["fitted_line"].plot(ax=ax, lw=2, ls='--', alpha=.5, label="Eq_normal: " + f"{f}".strip())
elif plot_normal_only:
(df[f'{aggregation}_normal']).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label_normal", "Normal"))
# simple linear regression - WARNING: simplistic treatment of timeseries data
if reg:
f = np.poly1d(np.polyfit(range(df.shape[0]), (df[f'{aggregation}_normal']).values, 1))
df["fitted_line"] = f(np.arange(df.shape[0]))
df["fitted_line"].plot(ax=ax, lw=2, ls='--', alpha=.5, label="Eq_normal: " + f"{f}".strip())
else:
(df[aggregation]).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label"))
# simple linear regression - WARNING: simplistic treatment of timeseries data
if reg:
f = np.poly1d(np.polyfit(range(df.shape[0]), (df[aggregation]).values, 1))
df["fitted_line"] = f(np.arange(df.shape[0]))
df["fitted_line"].plot(ax=ax, lw=2, ls='--', alpha=.5, label="Eq_normal: " + f"{f}".strip())
if kwargs.get("legend", False):
ax.legend()
ax.set_title(kwargs.get("title", f"{aggregation.upper()} - {frequency}"), size=kwargs.get("title_size", 14))
ax.set_xlabel(kwargs.get("xlabel",""))
return ax
|
ba4764f91a37f4b88f63e52d7aa02660d8296d11
| 3,646,742
|
import time
def generate_token(public_id):
"""
Simple token generator returning encoded JWT
:param public_id: unique string user identification
:return JWT: authorization token for given public_id
"""
# if User.query.filter_by(public_id=public_id).one_or_none() is None:
# return jsonify(404, "ID unverified")
# else:
timestamp = int(time.time())
payload = {
"iss": JWT_ISSUER,
"iat": int(timestamp),
"exp": int(timestamp + JWT_LIFETIME_SECONDS),
"sub": str(public_id),
}
return jwt.encode(payload, JWT_SECRET, algorithm=JWT_ALGORITHM)
|
88bbaabfeb8ba666daf532cae22f7486349a9a9d
| 3,646,743
|
def plot_predicted_data(training_actual_df, predicted_df, date_col, actual_col,
pred_col=PredictionKeys.PREDICTION.value, prediction_percentiles=None,
title="", test_actual_df=None, is_visible=True,
figsize=None, path=None, fontsize=None,
line_plot=False, markersize=70, lw=2, linestyle='-'):
"""
plot training actual response together with predicted data; if actual response of predicted
data is there, plot it too.
Parameters
----------
training_actual_df : pd.DataFrame
training actual response data frame. two columns required: actual_col and date_col
predicted_df : pd.DataFrame
predicted data response data frame. two columns required: actual_col and pred_col. If
user provide prediction_percentiles, it needs to include them as well in such
`prediction_{x}` where x is the correspondent percentiles
prediction_percentiles : list
list of two elements indicates the lower and upper percentiles
date_col : str
the date column name
actual_col : str
pred_col : str
title : str
title of the plot
test_actual_df : pd.DataFrame
test actual response dataframe. two columns required: actual_col and date_col
is_visible : boolean
whether we want to show the plot. If called from unittest, is_visible might = False.
figsize : tuple
figsize pass through to `matplotlib.pyplot.figure()`
path : str
path to save the figure
fontsize : int; optional
fontsize of the title
line_plot : bool; default False
if True, make line plot for observations; otherwise, make scatter plot for observations
markersize : int; optional
point marker size
lw : int; optional
out-of-sample prediction line width
linestyle : str
linestyle of prediction plot
Returns
-------
matplotlib axes object
"""
if is_empty_dataframe(training_actual_df) or is_empty_dataframe(predicted_df):
raise ValueError("No prediction data or training response to plot.")
if not is_ordered_datetime(predicted_df[date_col]):
raise ValueError("Prediction df dates is not ordered.")
plot_confid = False
if prediction_percentiles is None:
_pred_percentiles = [5, 95]
else:
_pred_percentiles = prediction_percentiles
if len(_pred_percentiles) != 2:
raise ValueError("prediction_percentiles has to be None or a list with length=2.")
confid_cols = ['prediction_{}'.format(_pred_percentiles[0]),
'prediction_{}'.format(_pred_percentiles[1])]
if set(confid_cols).issubset(predicted_df.columns):
plot_confid = True
if not figsize:
figsize = (16, 8)
if not fontsize:
fontsize = 16
_training_actual_df = training_actual_df.copy()
_predicted_df = predicted_df.copy()
_training_actual_df[date_col] = pd.to_datetime(_training_actual_df[date_col])
_predicted_df[date_col] = pd.to_datetime(_predicted_df[date_col])
fig, ax = plt.subplots(facecolor='w', figsize=figsize)
if line_plot:
ax.plot(_training_actual_df[date_col].values,
_training_actual_df[actual_col].values,
marker=None, color='black', lw=lw, label='train response', linestyle=linestyle)
else:
ax.scatter(_training_actual_df[date_col].values,
_training_actual_df[actual_col].values,
marker='.', color='black', alpha=0.8, s=markersize,
label='train response')
ax.plot(_predicted_df[date_col].values,
_predicted_df[pred_col].values,
marker=None, color='#12939A', lw=lw, label=PredictionKeys.PREDICTION.value, linestyle=linestyle)
# vertical line separate training and prediction
if _training_actual_df[date_col].values[-1] < _predicted_df[date_col].values[-1]:
ax.axvline(x=_training_actual_df[date_col].values[-1], color='#1f77b4', linestyle='--')
if test_actual_df is not None:
test_actual_df = test_actual_df.copy()
test_actual_df[date_col] = pd.to_datetime(test_actual_df[date_col])
if line_plot:
ax.plot(test_actual_df[date_col].values,
test_actual_df[actual_col].values,
marker=None, color='#FF8C00', lw=lw, label='train response', linestyle=linestyle)
else:
ax.scatter(test_actual_df[date_col].values,
test_actual_df[actual_col].values,
marker='.', color='#FF8C00', alpha=0.8, s=markersize,
label='test response')
# prediction intervals
if plot_confid:
ax.fill_between(_predicted_df[date_col].values,
_predicted_df[confid_cols[0]],
_predicted_df[confid_cols[1]],
facecolor='#42999E', alpha=0.5)
ax.set_title(title, fontsize=fontsize)
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.5)
ax.legend()
if path:
fig.savefig(path)
if is_visible:
plt.show()
else:
plt.close()
return ax
|
36e0fe88c664df1b93a7d96f217d4d2c94b96ad2
| 3,646,745
|
def CheckTreeIsOpen(input_api, output_api, url, closed, url_text):
"""Similar to the one in presubmit_canned_checks except it shows an helpful
status text instead.
"""
assert(input_api.is_committing)
try:
connection = input_api.urllib2.urlopen(url)
status = connection.read()
connection.close()
if input_api.re.match(closed, status):
long_text = status + '\n' + url
try:
connection = input_api.urllib2.urlopen(url_text)
text = connection.read()
connection.close()
match = input_api.re.search(r"\<div class\=\"Notice\"\>(.*)\<\/div\>",
text)
if match:
long_text = match.group(1).strip()
except IOError:
pass
return [output_api.PresubmitPromptWarning("The tree is closed.",
long_text=long_text)]
except IOError:
pass
return []
|
540dd0ceb9c305907b0439b678a6444ca24c3f76
| 3,646,746
|
def tnr_ecma_st(signal, fs, prominence=True):
"""Computation of tone-to-noise ration according to ECMA-74, annex D.9
for a stationary signal.
The T-TNR value is calculated according to ECMA-TR/108
Parameters
----------
signal :numpy.array
A stationary signal in [Pa].
fs : integer
Sampling frequency.
prominence : boolean
If True, the algorithm only returns the prominent tones, if False it returns all tones detected.
Default is True.
Output
------
t_tnr : array of float
global TNR value, along time if is_stationary = False
tnr : array of float
TNR values for each detected tone
promi : array of bool
prominence criterion for each detected tone
tones_freqs : array of float
frequency of the detected tones
"""
# Compute db spectrum
spectrum_db, freq_axis = spectrum(signal, fs, db=True)
# Compute tnr values
tones_freqs, tnr, prom, t_tnr = _tnr_main_calc(spectrum_db, freq_axis)
prom = prom.astype(bool)
if prominence == False:
return t_tnr, tnr, prom, tones_freqs
else:
return t_tnr, tnr[prom], prom[prom], tones_freqs[prom]
|
4dcb740899de5a9411fddb8ed41b0b1628a438f4
| 3,646,747
|
def pop_legacy_palette(kwds, *color_defaults):
"""
Older animations in BPA and other areas use all sorts of different names for
what we are now representing with palettes.
This function mutates a kwds dictionary to remove these legacy fields and
extract a palette from it, which it returns.
"""
palette = kwds.pop('palette', None)
if palette:
legacy = [k for k, _ in color_defaults if k in kwds]
if legacy:
raise ValueError('Cannot set palette and ' + ', '.join(legacy))
return palette
values = [kwds.pop(k, v) for k, v in color_defaults]
if values and color_defaults[0][0] in ('colors', 'palette'):
values = values[0]
return make.colors(values or None)
|
438ff6bb0f1300c614c724535d2215b2419fbb84
| 3,646,748
|
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T).
Parameters
----------
X : array-like
First matrix
Y : array-like
Second matrix
"""
return np.dot(X.ravel(), Y.ravel())
|
9c8d601144507cdbfb2d738af61a0016ea808d4a
| 3,646,749
|
import gc
from datetime import datetime
async def handle_waste_view(ack, body, client, view):
"""Process input from waste form"""
logger.info("Processing waste input...")
logger.info(body)
raw_leaders = view['state']['values']['input_a']['leader_names']['selected_options']
leader_list = [" - " + n['value'] for n in raw_leaders]
regulars = float(view['state']['values']['input_b']['regulars']['value'])
spicy = float(view['state']['values']['input_c']['spicy']['value'])
nuggets = float(view['state']['values']['input_d']['nuggets']['value'])
strips = float(view['state']['values']['input_e']['strips']['value'])
g_filets = float(view['state']['values']['input_f']['grilled1']['value'])
g_nuggets = float(view['state']['values']['input_g']['grilled2']['value'])
# Check that input is numeric when it needs to be
chicken_list = [regulars, spicy, nuggets, strips, g_filets, g_nuggets]
# for item in chicken_list:
# if not isinstance(item, float):
# payload = {
# "response_action": "errors",
# "errors": {
# "block_id": "error_message"
# }
# }
# Store data
total_weight = sum(chicken_list)
sh = gc.open_by_key(creds.waste_id)
goal_sheet = sh.worksheet("Goals")
goal_values = goal_sheet.get_all_values()
goals = {}
for row in goal_values:
if row[0] == "Type":
continue
goals[row[0]] = float(row[1])
user = await client.users_info(user=body['user']['id'])
user_name = user['user']['real_name']
new_line = "\n"
block1 = {
"type": "section",
"text": {"type": "mrkdwn", "text": f"*Submitted by:* {user_name}"}
}
block2 = {
"type": "section",
"text": {"type": "mrkdwn",
"text": (f"*Leaders on:*\n"
f"{new_line.join(leader_list)}\n")
}
}
block3_text = "*Weights:*\n"
if total_weight > 0:
if regulars:
if regulars >= goals['Filets']:
block3_text += f"_Regulars: {regulars} lbs._\n"
else:
block3_text += f"Regulars: {regulars} lbs.\n"
if spicy:
if spicy >= goals['Spicy']:
block3_text += f"_Spicy: {spicy} lbs._\n"
else:
block3_text += f"Spicy: {spicy} lbs.\n"
if nuggets:
if nuggets >= goals['Nuggets']:
block3_text += f"_Nuggets: {nuggets} lbs._\n"
else:
block3_text += f"Nuggets: {nuggets} lbs.\n"
if strips:
if strips >= goals['Strips']:
block3_text += f"_Strips: {strips} lbs._\n"
else:
block3_text += f"Strips: {strips} lbs.\n"
if g_filets:
if g_filets >= goals['Grilled Filets']:
block3_text += f"_Grilled Filets: {g_filets} lbs._\n"
else:
block3_text += f"Grilled Filets: {g_filets} lbs.\n"
if g_nuggets:
if g_nuggets >= goals['Grilled Nuggets']:
block3_text += f"_Grilled Nuggets: {g_nuggets} lbs._\n"
else:
block3_text += f"Grilled Nuggets: {g_nuggets} lbs.\n"
to_post = [str(datetime.now()), regulars, spicy, nuggets, strips, g_filets, g_nuggets]
# Handle breakfast items
if datetime.now().hour < 13:
breakfast = float(view['state']['values']['input_h']['breakfast']['value'])
to_post.append(breakfast)
g_breakfast = float(view['state']['values']['input_i']['grilled3']['value'])
to_post.append(g_breakfast)
if sum([breakfast, g_breakfast]) > 0:
total_weight += sum([breakfast, g_breakfast])
if breakfast:
if breakfast >= goals['Breakfast Filets']:
block3_text += f"_Breakfast Filets: {breakfast} lbs._\n"
else:
block3_text += f"Breakfast Filets: {breakfast} lbs.\n"
if g_breakfast:
if g_breakfast >= goals['Grilled Breakfast']:
block3_text += f"_Grilled Breakfast: {g_breakfast} lbs._\n"
else:
block3_text += f"Grilled Breakfast: {g_breakfast} lbs.\n"
block3 = {
"type": "section",
"text": {"type": "mrkdwn", "text": block3_text}
}
blocks = [block1, block2, block3]
other = view['state']['values']['input_j']['other']['value']
if other:
block4 = {
"type": "section",
"text": {"type": "mrkdwn", "text": f"*Notes:*\n{other}"}
}
blocks.append(block4)
block5 = {
"type": "section",
"text": {"type": "mrkdwn", "text": "Please remember to replace stickers on all waste containers."}
}
blocks.append(block5)
await ack()
# Send data to Google Sheet
try:
sheet = sh.worksheet("Data")
sheet.append_row(to_post, value_input_option='USER_ENTERED')
except gspread.exceptions.GSpreadException as e:
return await client.chat_postMessage(channel=body['user']['id'],
text=e)
except Exception as e:
await client.chat_postMessage(channel=body['user']['id'],
text=f"There was an error while storing the message to the Google Sheet.\n{e}")
await client.chat_postMessage(channel=creds.pj_user_id,
text=f"There was an error while storing the message to the Google Sheet.\n{e}")
return
await client.chat_postMessage(channel=creds.boh_channel,
blocks=blocks,
text="New waste report posted.")
|
101b85b947cc148176f2f4d067cb73c0386b56cd
| 3,646,750
|
def random_superposition(dim: int) -> np.ndarray:
"""
Args:
dim: Specified size returns a 2^dim length array.
Returns:
Normalized random array.
"""
state_vector = np.random.standard_normal(dim).astype(complex)
state_vector += 1j * np.random.normal(dim)
state_vector /= np.linalg.norm(state_vector)
return state_vector
|
f33867247a64c09571fcfc29c10e45e8e9921271
| 3,646,751
|
def predict(dag_model: Dag, test_data: Tensor) -> MultitaskMultivariateNormal:
"""
Can use this little helper function to predict from a Dag without
wrapping it in a DagGPyTorchModel.
"""
dag_model.eval()
with no_grad(), fast_pred_var():
return dag_model(test_data)
|
d4c0e2d48e2edf3f4e2b07b26024d92390efe4dc
| 3,646,752
|
def edit_skill():
"""Edit a skill entry in the skills table for a certain user. """
id = request.form['id']
skill_level = request.form['skill_level']
skills.update({'skill_level': skill_level}, id=id)
return good_json_response('success')
|
9b314aa51f990e1bbbd6bf75874e410e937fa595
| 3,646,753
|
def is_catalogue_link(link):
"""check whether the specified link points to a catalogue"""
return link['type'] == 'application/atom+xml' and 'rel' not in link
|
bc6e2e7f5c34f6ea198036cf1404fef8f7e7b214
| 3,646,754
|
def morlet_window(width: int, sigma: float) -> np.ndarray:
"""
Unadjusted Morlet window function.
Parameters
----------
width : integer (positive power of 2)
Window width to use - power of two as window of two corresponds to Nyquist rate.
sigma : float
Corresponds to the frequency of the frequency of the wavelet.
Returns
-------
output : real ndarray
Normalised Morlet wavelet vector.
Notes
-----
https://en.wikipedia.org/wiki/Morlet_wavelet
"""
# fixed width wavelet translates to a fixed width Fourier transformed wavelet in frequency spectrum
# Definition - https://en.wikipedia.org/wiki/Morlet_wavelet
c_pi = (1 + np.exp(- sigma ** 2) - 2 * np.exp(- 0.75 * sigma ** 2)) ** (-1 / 2)
t = (np.arange(width + 1) - (width / 2)) * (10 / width)
wavelet = c_pi * (np.pi ** (-1 / 4)) * (np.exp(1j * sigma * t) - np.exp(- (1 / 2) * sigma ** 2))
output = np.exp(- (1 / 2) * t ** 2) * wavelet.real
return output
|
2f0d6ff644a078b50bb510835a9bb2d2028ea143
| 3,646,755
|
def tfidfvec():
"""
中文特征值化
:return: None
"""
c1, c2, c3 = cutword()
print(c1, c2, c3)
tf = TfidfVectorizer()
data = tf.fit_transform([c1, c2, c3])
print(tf.get_feature_names())
print(data.toarray())
return None
|
1a0fe0e4a28e6d963f49156476ed720df492718b
| 3,646,756
|
import http
def resolve_guid(guid, suffix=None):
"""Resolve GUID to corresponding URL and return result of appropriate
view function. This effectively yields a redirect without changing the
displayed URL of the page.
:param guid: GUID value (not the object)
:param suffix: String to append to GUID route
:return: Werkzeug response
"""
# Get prefix; handles API routes
prefix = request.path.split(guid)[0].rstrip('/')
# Look up GUID
guid_object = Guid.load(guid)
if guid_object:
# verify that the object is a GuidStoredObject descendant. If a model
# was once a descendant but that relationship has changed, it's
# possible to have referents that are instances of classes that don't
# have a redirect_mode attribute or otherwise don't behave as
# expected.
if not isinstance(guid_object.referent, GuidStoredObject):
sentry.log_message(
'Guid `{}` resolved to non-guid object'.format(guid)
)
raise HTTPError(http.NOT_FOUND)
referent = guid_object.referent
if referent is None:
logger.error('Referent of GUID {0} not found'.format(guid))
raise HTTPError(http.NOT_FOUND)
mode = referent.redirect_mode
if mode is None:
raise HTTPError(http.NOT_FOUND)
url = referent.deep_url if mode == 'proxy' else referent.url
url = _build_guid_url(url, prefix, suffix)
# Always redirect API URLs; URL should identify endpoint being called
if prefix or mode == 'redirect':
if request.query_string:
url += '?' + request.query_string
return redirect(url)
return proxy_url(url)
# GUID not found; try lower-cased and redirect if exists
guid_object_lower = Guid.load(guid.lower())
if guid_object_lower:
return redirect(
_build_guid_url(
guid.lower(), prefix, suffix
)
)
# GUID not found
raise HTTPError(http.NOT_FOUND)
|
85a422f1c5709a44050595be4bf91ce1b937c0ef
| 3,646,757
|
from typing import Any
def _is_array(obj: Any) -> bool:
"""Whether the object is a numpy array."""
return isinstance(obj, np.ndarray)
|
4852e045fcef142c23a9370efbe3b5ffe7a9f8a3
| 3,646,758
|
def has_ao_1e_int_overlap(trexio_file) -> bool:
"""Check that ao_1e_int_overlap variable exists in the TREXIO file.
Parameter is a ~TREXIO File~ object that has been created by a call to ~open~ function.
Returns:
True if the variable exists, False otherwise
Raises:
- Exception from trexio.Error class if TREXIO return code ~rc~ is TREXIO_FAILURE and prints the error message using string_of_error.
- Exception from some other error (e.g. RuntimeError).
"""
try:
rc = pytr.trexio_has_ao_1e_int_overlap(trexio_file.pytrexio_s)
if rc == TREXIO_FAILURE:
raise Error(rc)
except:
raise
if rc == TREXIO_SUCCESS:
return True
else:
return False
|
53b5ec13ff7c32af5692972d586921a4d5bc07ef
| 3,646,759
|
from typing import Sequence
from typing import Set
async def get_non_existent_ids(collection, id_list: Sequence[str]) -> Set[str]:
"""
Return the IDs that are in `id_list`, but don't exist in the specified `collection`.
:param collection: the database collection to check
:param id_list: a list of document IDs to check for existence
:return: a list of non-existent IDs
"""
existing_group_ids = await collection.distinct("_id", {"_id": {"$in": id_list}})
return set(id_list) - set(existing_group_ids)
|
b13c61f4528c36a9d78a3687ce84c39158399142
| 3,646,760
|
def create_source_fc(header):
"""
Creates :class:`parser.file_configuration_t` instance, configured to
contain path to C++ source file
:param header: path to C++ source file
:type header: str
:rtype: :class:`parser.file_configuration_t`
"""
return file_configuration_t(
data=header,
content_type=file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE)
|
29cd1112b9f59091b286f5222e62e9bec309bd36
| 3,646,761
|
def StorageFlatten(cache_line_size, create_bound_attribute=False):
"""Flatten the multi-dimensional read/write to 1D.
Parameters
----------
cache_line_size: int
The size of CPU cache line.
create_bound_attribute:
Whether to create bound attributes.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.StorageFlatten(cache_line_size, create_bound_attribute)
|
cb535450d3a503632f66e015555148d211f3e6f9
| 3,646,762
|
def wrap(node):
"""Stringify the parse tree node and wrap it in parentheses if it might be
ambiguous.
"""
if isinstance(node, (IntNode, CallNode, SymbolNode)):
return str(node)
else:
return "(" + str(node) + ")"
|
9ac5d9a7d5e6d6539231ba6897a44e2787d92809
| 3,646,763
|
def _ParseProjectNameMatch(project_name):
"""Process the passed project name and determine the best representation.
Args:
project_name: a string with the project name matched in a regex
Returns:
A minimal representation of the project name, None if no valid content.
"""
if not project_name:
return None
return project_name.lstrip().rstrip('#: \t\n')
|
cb9f92a26c7157a5125fbdb5dd8badd7ffd23055
| 3,646,764
|
import io
import zipfile
def getCharts(dmldata: bytearray) -> list:
"""Get DrawingML object from clipboard"""
stream = io.BytesIO(dmldata)
with zipfile.ZipFile(stream, "r") as z:
with z.open("[Content_Types].xml") as f:
tree = ET.fromstring(f.read())
part_names = []
for link in tree.findall(Override):
content_type = link.attrib["ContentType"]
if content_type == ChartType:
part_name = link.attrib["PartName"]
part_names.append(part_name)
charts = []
for part_name in part_names:
with io.TextIOWrapper(z.open(part_name.strip("/"), "r"), encoding='utf-8') as f:
xmltext = f.read()
chartfile = ChartFile(xmltext)
charts.append(chartfile.chart)
return charts
|
402bf7e45be03ccda31563c2fc10afe2d4d09077
| 3,646,766
|
def explore_validation_time_gap_threshold_segments(participant_list, time_gap_list = [100, 200, 300, 400, 500, 1000, 2000], prune_length = None,
auto_partition_low_quality_segments = False):
"""Explores different threshiold values for the invalid time gaps in the Segments for all Participants in the list
"""
seglen = 0
segs = 0
participants = []
for p in participant_list:
print("pid:", p.pid)
if p.require_valid_segments == True:
raise Exception("explore_validation_threshold_segments should be called with a list of Participants with require_valid_segments = False")
tvalidity = []
for seg in p.segments:
seglen += seg.completion_time
segs += len(p.segments)
for tresh in time_gap_list: ##time-gap
invc = 0
invsegs=[]
for seg in p.segments:
if seg.calc_validity2(tresh) == False:
invc +=1
if len(invsegs)>0:
print("seg:",invsegs)
tvalidity.append((tresh, invc))
participants.append( (p.pid,tvalidity, len(p.segments) ) )
print ( (tvalidity, len(p.segments)) )
print("average seg len",seglen/float(segs))
return participants
|
bd88f292a00986212ae36e383c4bb4e3cd94067c
| 3,646,767
|
def convolve_design(X, hrf, opt=None):
"""convolve each column of a 2d design matrix with hrf
Args:
X ([2D design matrix]): time by cond, or list of onsets
hrf ([1D hrf function]): hrf
opt: if onset case, provides n_times and tr for
interpolation
Returns:
[convdes]: 2D: Samples by cond
"""
# if onset-time case
if type(X) is list:
errmsg = 'n_times needs to be in opt'
np.testing.assert_equal(
'n_times' in opt,
True,
err_msg=errmsg)
n_times = opt['n_times']
tr = opt['tr']
# calc
n_conditions = len(X)
convdes = np.zeros((n_times, n_conditions))
all_times = np.linspace(0, tr*(n_times-1), n_times)
hrf_times = np.linspace(0, tr*(len(hrf)-1), len(hrf))
for q in range(n_conditions):
# onset times for qth condition in run p
otimes = X[q]
# intialize
yvals = np.zeros((n_times))
# loop over onset times
for r in otimes:
# interpolate to find values at the
# data sampling time points
f = pchip(
r + hrf_times,
hrf,
extrapolate=False)(all_times)
yvals = yvals + np.nan_to_num(f)
# record
convdes[:, q] = yvals
# normal vector or matrix cases
else:
ndims = X.ndim
if ndims == 1:
ntime = X.shape[0]
convdes = np.convolve(X, hrf)
convdes = convdes[range(ntime)]
else:
ntime, ncond = X.shape
convdes = np.asarray(
[np.convolve(X[:, x], hrf, ) for x in range(ncond)]).T
convdes = convdes[range(ntime), :]
return convdes
|
02f2473ff18a78759c87884cd0f7fc94db6e0e2d
| 3,646,768
|
from typing import List
def relax_incr_dimensions(iet, **kwargs):
"""
Recast Iterations over IncrDimensions as ElementalFunctions; insert
ElementalCalls to iterate over the "main" and "remainder" regions induced
by the IncrDimensions.
"""
sregistry = kwargs['sregistry']
efuncs = []
mapper = {}
for tree in retrieve_iteration_tree(iet):
iterations = [i for i in tree if i.dim.is_Incr]
if not iterations:
continue
root = iterations[0]
if root in mapper:
continue
outer, inner = split(iterations, lambda i: not i.dim.parent.is_Incr)
# Compute the iteration ranges
ranges = []
for i in outer:
maxb = i.symbolic_max - (i.symbolic_size % i.dim.step)
ranges.append(((i.symbolic_min, maxb, i.dim.step),
(maxb + 1, i.symbolic_max, i.symbolic_max - maxb)))
# Remove any offsets
# E.g., `x = x_m + 2 to x_M - 2` --> `x = x_m to x_M`
outer = [i._rebuild(limits=(i.dim.root.symbolic_min, i.dim.root.symbolic_max,
i.step))
for i in outer]
# Create the ElementalFunction
name = sregistry.make_name(prefix="bf")
body = compose_nodes(outer)
dynamic_parameters = flatten((i.symbolic_bounds, i.step) for i in outer)
dynamic_parameters.extend([i.step for i in inner if not is_integer(i.step)])
efunc = make_efunc(name, body, dynamic_parameters)
efuncs.append(efunc)
# Create the ElementalCalls
calls = []
for p in product(*ranges):
dynamic_args_mapper = {}
for i, (m, M, b) in zip(outer, p):
dynamic_args_mapper[i.symbolic_min] = m
dynamic_args_mapper[i.symbolic_max] = M
dynamic_args_mapper[i.step] = b
for j in inner:
if j.dim.root is i.dim.root and not is_integer(j.step):
value = j.step if b is i.step else b
dynamic_args_mapper[j.step] = (value,)
calls.append(efunc.make_call(dynamic_args_mapper))
mapper[root] = List(body=calls)
iet = Transformer(mapper).visit(iet)
return iet, {'efuncs': efuncs}
|
0d7af62309d427c6477329ed6b7a5dccab390a51
| 3,646,769
|
def _get_lspci_name(line):
"""Reads and returns a 'name' from a line of `lspci` output."""
hush = line.split('[')
return '['.join(hush[0:-1]).strip()
|
92910d0f4d9dce1689ed22a963932fb85d8e2677
| 3,646,770
|
def dumps_bytes(obj):
"""
Serialize ``obj`` to JSON formatted ``bytes``.
"""
b = dumps(obj)
if isinstance(b, unicode):
b = b.encode("ascii")
return b
|
5ee94b2bd5a8bcd2f8586578e6d86084a030d93a
| 3,646,771
|
def get_child_right_position(position: int) -> int:
"""
heap helper function get the position of the right child of the current node
>>> get_child_right_position(0)
2
"""
return (2 * position) + 2
|
2a5128a89ac35fe846d296d6b92c608e50b80a45
| 3,646,772
|
def get_feature_set_details(shape_file_path):
""" This function gets the shape type of the shapefile and make a list
of fields to be added to output summary table based on that shape type """
try:
# Checking for geometry type
feat_desc = arcpy.Describe(shape_file_path)
arcpy.AddMessage(("Shapefile is of '{0}' type.")
.format(str(feat_desc.shapeType)))
# According to shape type kame a list of fields to be added to
# summary table
list_of_fields = ["summaryfield", "summaryvalue"]
if feat_desc.shapeType.upper() == "POLYGON":
list_of_fields += ["area_acres", "area_sqkm"]
elif feat_desc.shapeType.upper() == "POLYLINE":
list_of_fields += ["length_Miles", "length_Km"]
elif feat_desc.shapeType.upper() == "POINT":
list_of_fields += ["Count"]
return [feat_desc.shapeType, list_of_fields]
except Exception as error:
arcpy.AddError("Error occurred during execution:" + str(error))
|
9c4eddd9963751d195f4165ecc862d2753bb2067
| 3,646,774
|
def get_label_parts(label):
"""returns the parts of an absolute label as a list"""
return label[2:].replace(":", "/").split("/")
|
44998aad262f04fdb4da9e7d96d2a2b3afb27502
| 3,646,775
|
import pandas
import numpy
def combined_spID(*species_identifiers):
"""Return a single column unique species identifier
Creates a unique species identifier based on one or more columns of a
data frame that represent the unique species ID.
Args:
species_identifiers: A tuple containing one or pieces of a unique
species identifier or lists of these pieces.
Returns:
A single unique species identifier or a list of single identifiers
"""
# Make standard input data types capable of element wise summation
input_type = type(species_identifiers[0])
assert input_type in [list, tuple, str, pandas.core.series.Series, numpy.ndarray]
if input_type is not str:
species_identifiers = [pandas.Series(identifier) for identifier in species_identifiers]
single_identifier = species_identifiers[0]
if len(species_identifiers) > 1:
for identifier in species_identifiers[1:]:
single_identifier += identifier
if input_type == numpy.ndarray:
single_identifier = numpy.array(single_identifier)
else:
single_identifier = input_type(single_identifier)
return single_identifier
|
d50abeccc6fb0235fd8a58dfadbd7c6bdb72825d
| 3,646,777
|
import copy
def qr(A, prec=1e-10):
"""
computes a faster and economic qr decomposition similar to:
http://www.iaa.ncku.edu.tw/~dychiang/lab/program/mohr3d/source/Jama%5CQRDecomposition.html
"""
m = len(A)
if m <= 0:
return [], A
n = len(A[0])
Rdiag = [0] * n;
QR = copy.deepcopy(A)
for k in range(n):
# Compute 2-norm of k-th column without under/overflow.
nrm = 0.0
for i in range(k, m):
nrm = sqrt(nrm ** 2 + QR[i][k] ** 2)
if abs(nrm) > prec:
# Form k-th Householder vector.
if k < m and QR[k][k] < 0:
nrm = -nrm
for i in range(k, m):
QR[i][k] /= nrm
if k < m:
QR[k][k] += 1.0
# Apply transformation to remaining columns.
for j in range(k + 1, n):
s = 0.0
for i in range(k, m):
s += QR[i][k] * QR[i][j]
if k < m:
s = -s / QR[k][k]
for i in range(k, m):
QR[i][j] += s * QR[i][k]
Rdiag[k] = -nrm;
# compute R
R = [[0] * n for z in range(min(m, n))]
for i in range(m):
for j in range(i, n):
if i < j:
R[i][j] = QR[i][j]
if i == j:
R[i][i] = Rdiag[i]
# compute Q
w = min(m, n)
Q = [[0] * w for i in range(m)]
for k in range(w - 1, -1, -1):
if k < w:
Q[k][k] = 1.0;
for j in range(k, w):
if k < m and abs(QR[k][k]) > prec:
s = 0.0
for i in range(k, m):
s += QR[i][k] * Q[i][j]
s = -s / QR[k][k]
for i in range(k, m):
Q[i][j] += s * QR[i][k]
return Q, R
|
fe1ffa20b5ad44a76837bd816ca07c3388ebcb4d
| 3,646,778
|
def split_range(r, n):
"""
Computes the indices of segments after splitting a range of r values
into n segments.
Parameters
----------
r : int
Size of the range vector.
n : int
The number of splits.
Returns
-------
segments : list
The list of lists of first and last indices of segments.
Example
-------
>>> split_range(8, 2)
[[0, 4], [4, 8]]
"""
step = int(r / n)
segments = []
for i in range(n):
new_segment = [step * i, step * (i + 1)]
segments.append(new_segment)
# correct the gap in the missing index due to the truncated step
segments[-1][-1] = r
return segments
|
34f570933a5eb8772dc4b2e80936887280ff47a4
| 3,646,779
|
def is_connected_to_mongo():
"""
Make sure user is connected to mongo; returns True if connected, False otherwise.
Check below url to make sure you are looking for the right port.
"""
maxSevSelDelay = 1 # how long to spend looking for mongo
try: # make sure this address is running
url = "mongodb://127.0.0.1:27017" # standard mongo port
client = pymongo.MongoClient(url, serverSelectionTimeoutMS=maxSevSelDelay) # check the url for specified amt of time
client.admin.command("serverStatus") # connect via serverStatus (will not cause error if connected)
except pymongo.errors.ServerSelectionTimeoutError as err: # error if serverStatus does not go through
return False # not connected
return True
|
2bf28835ae192d41836f2335ce5c1e152b0e0838
| 3,646,780
|
def _fill_three_digit_hex_color_code(*, hex_color_code: str) -> str:
"""
Fill 3 digits hexadecimal color code until it becomes 6 digits.
Parameters
----------
hex_color_code : str
One digit hexadecimal color code (not including '#').
e.g., 'aaa', 'fff'
Returns
-------
filled_color_code : str
Result color code. e.g., 'aaaaaa', 'ffffff'
"""
filled_color_code: str = ''
for char in hex_color_code:
filled_color_code += char * 2
return filled_color_code
|
d91df947fcc5f0718bbd9b3b4f69f1ad68ebeff4
| 3,646,781
|
import re
def normalize(text: str, convert_digits=True) -> str:
"""
Summary:
Arguments:
text [type:string]
Returns:
normalized text [type:string]
"""
# replacing all spaces,hyphens,... with white space
space_pattern = (
r"[\xad\ufeff\u200e\u200d\u200b\x7f\u202a\u2003\xa0\u206e\u200c\x9d]"
)
space_pattern = re.compile(space_pattern)
text = space_pattern.sub(" ", text)
# remove keshide,
text = re.sub(r"[ـ\r]", "", text)
# remove Aarab
text = re.sub(r"[\u064B\u064C\u064D\u064E\u064F\u0650\u0651\u0652]", "", text)
# replace arabic alphabets with equivalent persian alphabet
regex_list = [
(r"ء", r"ئ"),
(r"ﺁ|آ", r"آ"),
(r"ٲ|ٱ|إ|ﺍ|أ", r"ا"),
(r"ﺐ|ﺏ|ﺑ", r"ب"),
(r"ﭖ|ﭗ|ﭙ|ﺒ|ﭘ", r"پ"),
(r"ﭡ|ٺ|ٹ|ﭞ|ٿ|ټ|ﺕ|ﺗ|ﺖ|ﺘ", r"ت"),
(r"ﺙ|ﺛ", r"ث"),
(r"ﺝ|ڃ|ﺠ|ﺟ", r"ج"),
(r"ڃ|ﭽ|ﭼ", r"چ"),
(r"ﺢ|ﺤ|څ|ځ|ﺣ", r"ح"),
(r"ﺥ|ﺦ|ﺨ|ﺧ", r"خ"),
(r"ڏ|ډ|ﺪ|ﺩ", r"د"),
(r"ﺫ|ﺬ|ﻧ", r"ذ"),
(r"ڙ|ڗ|ڒ|ڑ|ڕ|ﺭ|ﺮ", r"ر"),
(r"ﺰ|ﺯ", r"ز"),
(r"ﮊ", r"ژ"),
(r"ݭ|ݜ|ﺱ|ﺲ|ښ|ﺴ|ﺳ", r"س"),
(r"ﺵ|ﺶ|ﺸ|ﺷ", r"ش"),
(r"ﺺ|ﺼ|ﺻ", r"ص"),
(r"ﺽ|ﺾ|ﺿ|ﻀ", r"ض"),
(r"ﻁ|ﻂ|ﻃ|ﻄ", r"ط"),
(r"ﻆ|ﻇ|ﻈ", r"ظ"),
(r"ڠ|ﻉ|ﻊ|ﻋ", r"ع"),
(r"ﻎ|ۼ|ﻍ|ﻐ|ﻏ", r"غ"),
(r"ﻒ|ﻑ|ﻔ|ﻓ", r"ف"),
(r"ﻕ|ڤ|ﻖ|ﻗ", r"ق"),
(r"ڭ|ﻚ|ﮎ|ﻜ|ﮏ|ګ|ﻛ|ﮑ|ﮐ|ڪ|ك", r"ک"),
(r"ﮚ|ﮒ|ﮓ|ﮕ|ﮔ", r"گ"),
(r"ﻝ|ﻞ|ﻠ|ڵ", r"ل"),
(r"ﻡ|ﻤ|ﻢ|ﻣ", r"م"),
(r"ڼ|ﻦ|ﻥ|ﻨ", r"ن"),
(r"ވ|ﯙ|ۈ|ۋ|ﺆ|ۊ|ۇ|ۏ|ۅ|ۉ|ﻭ|ﻮ|ؤ", r"و"),
(r"ﺔ|ﻬ|ھ|ﻩ|ﻫ|ﻪ|ۀ|ە|ة|ہ", r"ه"),
(r"ﭛ|ﻯ|ۍ|ﻰ|ﻱ|ﻲ|ں|ﻳ|ﻴ|ﯼ|ې|ﯽ|ﯾ|ﯿ|ێ|ے|ى|ي", r"ی"),
(r"¬", r""),
(r"•|·|●|·|・|∙|。|ⴰ", r"."),
(r",|٬|٫|‚|,", r"،"),
(r"ʕ|\?", r"؟"),
(r"|ِ||ُ||َ||ٍ||ٌ||ً", r""),
]
for pattern, replac in regex_list:
text = re.sub(pattern, replac, text)
# replace arabic and english digits with equivalent persian digits
num_dict = dict()
if convert_digits:
num_dict[u"0"] = u"۰"
num_dict[u"1"] = u"۱"
num_dict[u"2"] = u"۲"
num_dict[u"3"] = u"۳"
num_dict[u"4"] = u"۴"
num_dict[u"5"] = u"۵"
num_dict[u"6"] = u"۶"
num_dict[u"7"] = u"۷"
num_dict[u"8"] = u"۸"
num_dict[u"9"] = u"۹"
num_dict[u"%"] = u"٪"
num_dict[u"٠"] = u"۰"
num_dict[u"١"] = u"۱"
num_dict[u"٢"] = u"۲"
num_dict[u"٣"] = u"۳"
num_dict[u"٤"] = u"۴"
num_dict[u"٥"] = u"۵"
num_dict[u"٦"] = u"۶"
num_dict[u"٧"] = u"۷"
num_dict[u"٨"] = u"۸"
num_dict[u"٩"] = u"۹"
num_pattern = re.compile(r"(" + "|".join(num_dict.keys()) + r")")
text = num_pattern.sub(lambda x: num_dict[x.group()], text)
punctuation_after, punctuation_before = r"\.:!،؛؟»\]\)\}", r"«\[\(\{"
regex_list = [
# replace quotation with «»
('"([^\n"]+)"', r"«\1»"),
# replace single quotation with «»
("'([^\n\"]+)'", r"«\1»"),
# replace ٬ with «»
('٬([^\n"]+)٬', r"«\1»"),
# replace Double Angle Bracket with «»
('《([^\n"]+)》', r"«\1»"),
# replace dot with momayez
("([\d+])\.([\d+])", r"\1٫\2"),
# replace 3 dots
(r" ?\.\.\.", " … "),
# fix ی space
(r"([^ ]ه) ی ", r"\1ی "),
# put zwnj after می, نمی
(r"(^| )(ن?می) ", r"\1\2"),
# put zwnj before تر, تری, ترین, گر, گری, ها, های
(
r"(?<=[^\n\d "
+ punctuation_after
+ punctuation_before
+ "]{2}) (تر(ین?)?|گری?|های?)(?=[ \n"
+ punctuation_after
+ punctuation_before
+ "]|$)",
r"\1",
),
# join ام, ایم, اش, اند, ای, اید, ات
(
r"([^ ]ه) (ا(م|یم|ش|ند|ی|ید|ت))(?=[ \n" + punctuation_after + "]|$)",
r"\1\2",
),
# remove space before and after quotation
('" ([^\n"]+) "', r'"\1"'),
# remove space before punctuations
(" ([" + punctuation_after + "])", r"\1"),
# remove space after punctuations
("([" + punctuation_before + "]) ", r"\1"),
# put space after . and :
(
"(["
+ punctuation_after[:3]
+ "])([^ "
+ punctuation_after
+ "\w\d\\/۰۱۲۳۴۵۶۷۸۹])",
r"\1 \2",
),
# put space after punctuation
(
"([" + punctuation_after[3:] + "])([^ " + punctuation_after + "])",
r"\1 \2",
),
# put space before punctuations
(
"([^ " + punctuation_before + "])([" + punctuation_before + "])",
r"\1 \2",
),
# Remove repeating characters (keep 2 repeats)
(r"(ئآابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیچ)\1+", r"\1\1"),
]
for pattern, replac in regex_list:
text = re.sub(pattern, replac, text)
# fix "؟ " in links
text = re.sub(r"([a-zA-z]+)(؟ )", r"\1?", text)
# fix "، " in English numbers
text = re.sub(r"([0-9+])، ([0-9+])", r"\1,\2", text)
# fix "٫" in English numbers
text = re.sub(r"([0-9+])٫([0-9+])", r"\1.\2", text)
# fix "، " in farsi digits
text = re.sub(r"([۰-۹+])، ([۰-۹+])", r"\1٫\2", text)
return text
|
43bddc9c78615ab4cc58a732c164ad81b6848dc1
| 3,646,782
|
def register_name_for(entity):
"""
gets the admin page register name for given entity class.
it raises an error if the given entity does not have an admin page.
:param type[pyrin.database.model.base.BaseEntity] entity: the entity class of
admin page to get its
register name.
:raises AdminPageNotFoundError: admin page not found error.
:rtype: str
"""
return get_component(AdminPackage.COMPONENT_NAME).register_name_for(entity)
|
c43ab1a1e08058435c6680886d0e7c1dd81b1bef
| 3,646,783
|
def index():
"""Show all the posts, most recent first."""
db = get_db()
posts = db.execute(
# "SELECT p.id, title, body, created, author_id, username"
# " FROM post p"
# " JOIN user u ON p.author_id = u.id"
# " ORDER BY created DESC"
"SELECT *, l.author_id as love_author, count(distinct l.id) as likes"
" FROM post p"
" LEFT JOIN user u ON p.author_id = u.id"
" LEFT JOIN love l ON p.id = l.post_id"
" GROUP BY p.id"
" ORDER BY created DESC"
# "SELECT p.id, title, body, created, author_id, username, count(distinct love.id)"
# " FROM post p"
# " LEFT JOIN love on p.id=love.post_id"
# " JOIN user u ON p.author_id = u.id"
# " GROUP BY p.id"
).fetchall()
return render_template("blog/index.html", posts=posts)
|
b40a302ef1278f3fb0227c40db4764c22f5f0cb8
| 3,646,784
|
import requests
def get_user_information(fbid, extra_fields=[]):
""" Gets user basic information: first_name, last_name, gender,
profile_pic, locale, timezone
:usage:
>>> # Set the user fbid you want the information
>>> fbid = "<user fbid>"
>>> # Call the function passing the fbid of user.
>>> user_information = fbbotw.get_user_information(fbid=fbid)
:param str fbid: User id to get the information.
:param list extra_fields: Extra fields that your app is allowed to \
request. eg. 'locale', 'timezone', 'gender'
:return dict:
>>> user_information = {
"id": "user_id",
"name": "User Full Name",
"first_name": "User First Name",
"last_name": "User Last Name",
"profile_pic": "https://cdn_to_pic.com/123",
}
:facebook docs: `/user-profile <https://developers.facebook.com/docs/\
messenger-platform/user-profile>`_
"""
user_info_url = GRAPH_URL.format(fbid=fbid)
payload = dict()
fields = [
'name', 'first_name', 'last_name', 'profile_pic'
] + extra_fields
payload['fields'] = (
",".join(fields)
)
payload['access_token'] = PAGE_ACCESS_TOKEN
user_info = requests.get(user_info_url, payload).json()
return user_info
|
7765fec33fc70a67c3249e6417d0f75acba0ba2e
| 3,646,785
|
import re
def parseTeam(teamString):
"""Parse strings for data from official Pokemon Showdown format.
Keyword arguemnts:\n
teamString -- a team string, copied from pokepaste or pokemon showdown
"""
pokemonList = teamString.split('\n\n')
teamList = []
#print(pokemonList)
for pokemon in pokemonList:
currentPokemonDict = {}
moveCounter = 1
currentPokemon = pokemon.split('\n')
if 'Ability' not in pokemon:
continue
for attribute in currentPokemon:
if 'Happiness:' or 'IVs:' or 'Shiny:' in attribute:
pass
if '@' in attribute:
attribute = attribute.split('@')
currentPokemonDict['Species'] = attribute[0].strip().replace(' ','')
if '(' in currentPokemonDict['Species']:
currentPokemonDict['Species'] = re.search(r'\(([^)]+)', currentPokemonDict['Species']).group(1)
if len(currentPokemonDict['Species']) == 1:
temp = attribute[0].split('(')[0]
currentPokemonDict['Species'] = temp.strip()
currentPokemonDict['Item'] = attribute[1].strip().replace(' ','')
if 'Nature' in attribute:
attribute = attribute.strip()
attribute = attribute.split(' ')
currentPokemonDict['Nature'] = attribute[0].strip()
if '- ' in attribute:
currentPokemonDict['Move'+str(moveCounter)] = attribute.split('- ')[1].strip().replace(' ','')
moveCounter += 1
if 'EVs' in attribute:
currentPokemonDict['HPEVs'] = 0
currentPokemonDict['AtkEVs'] = 0
currentPokemonDict['DefEVs'] = 0
currentPokemonDict['SpAEVs'] = 0
currentPokemonDict['SpDEVs'] = 0
currentPokemonDict['SpeEVs'] = 0
attribute = attribute.split(':')
attribute = attribute[1].split('/')
for item in attribute:
item = item.strip()
item = item.split(' ')
currentPokemonDict[item[1]+'EVs'] = int(item[0])
teamList.append(currentPokemonDict)
return teamList
|
70680cf1eca50a4738b8afc7ab12fcd86b48d01d
| 3,646,786
|
def mix_style(style_codes,
content_codes,
num_layers=1,
mix_layers=None,
is_style_layerwise=True,
is_content_layerwise=True):
"""Mixes styles from style codes to those of content codes.
Each style code or content code consists of `num_layers` codes, each of which
is typically fed into a particular layer of the generator. This function mixes
styles by partially replacing the codes of `content_codes` from some certain
layers with those of `style_codes`.
For example, if both style code and content code are with shape [10, 512],
meaning to have 10 layers and each employs a 512-dimensional latent code. And
the 1st, 2nd, and 3rd layers are the target layers to perform style mixing.
Then the top half of the content code (with shape [3, 512]) will be replaced
by the top half of the style code (also with shape [3, 512]).
NOTE: This function also supports taking single-layer latent codes as inputs,
i.e., setting `is_style_layerwise` or `is_content_layerwise` as False. In this
case, the corresponding code will be first repeated for `num_layers` before
performing style mixing.
Args:
style_codes: Style codes, with shape [num_styles, *code_shape] or
[num_styles, num_layers, *code_shape].
content_codes: Content codes, with shape [num_contents, *code_shape] or
[num_contents, num_layers, *code_shape].
num_layers: Total number of layers in the generative model. (default: 1)
mix_layers: Indices of the layers to perform style mixing. `None` means to
replace all layers, in which case the content code will be completely
replaced by style code. (default: None)
is_style_layerwise: Indicating whether the input `style_codes` are
layer-wise codes. (default: True)
is_content_layerwise: Indicating whether the input `content_codes` are
layer-wise codes. (default: True)
num_layers
Returns:
Codes after style mixing, with shape [num_styles, num_contents, num_layers,
*code_shape].
Raises:
ValueError: If input `content_codes` or `style_codes` is with invalid shape.
"""
if not is_style_layerwise:
style_codes = style_codes[:, np.newaxis]
style_codes = np.tile(
style_codes,
[num_layers if axis == 1 else 1 for axis in range(style_codes.ndim)])
if not is_content_layerwise:
content_codes = content_codes[:, np.newaxis]
content_codes = np.tile(
content_codes,
[num_layers if axis == 1 else 1 for axis in range(content_codes.ndim)])
if not (style_codes.ndim >= 3 and style_codes.shape[1] == num_layers and
style_codes.shape[1:] == content_codes.shape[1:]):
raise ValueError(f'Shapes of style codes and content codes should be '
f'[num_styles, num_layers, *code_shape] and '
f'[num_contents, num_layers, *code_shape] respectively, '
f'but {style_codes.shape} and {content_codes.shape} are '
f'received!')
layer_indices = parse_indices(mix_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
num_styles = style_codes.shape[0]
num_contents = content_codes.shape[0]
code_shape = content_codes.shape[2:]
s = style_codes[:, np.newaxis]
s = np.tile(s, [num_contents if axis == 1 else 1 for axis in range(s.ndim)])
c = content_codes[np.newaxis]
c = np.tile(c, [num_styles if axis == 0 else 1 for axis in range(c.ndim)])
from_style = np.zeros(s.shape, dtype=bool)
from_style[:, :, layer_indices] = True
results = np.where(from_style, s, c)
assert results.shape == (num_styles, num_contents, num_layers, *code_shape)
return results
|
377a0638d84ed084a91eb6dfb20302e56ab85647
| 3,646,788
|
import collections
def _build_client_update(model: model_lib.Model,
use_experimental_simulation_loop: bool = False):
"""Creates client update logic for FedSGD.
Args:
model: A `tff.learning.Model` used to compute gradients.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation.
Returns:
A `tf.function`.
"""
dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(
use_experimental_simulation_loop)
@tf.function
def client_update(initial_weights, dataset):
model_weights = model_utils.ModelWeights.from_model(model)
tf.nest.map_structure(lambda a, b: a.assign(b), model_weights,
initial_weights)
def reduce_fn(state, batch):
"""Runs forward_pass on batch and sums the weighted gradients."""
accumulated_gradients, num_examples_sum = state
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
gradients = tape.gradient(output.loss, model_weights.trainable)
num_examples = tf.cast(output.num_examples, tf.float32)
accumulated_gradients = tuple(
accumulator + num_examples * gradient
for accumulator, gradient in zip(accumulated_gradients, gradients))
# We may be able to optimize the reduce function to avoid doubling the
# number of required variables here (e.g. keeping two copies of all
# gradients). If you're looking to optimize memory usage this might be a
# place to look.
return (accumulated_gradients, num_examples_sum + num_examples)
def _zero_initial_state():
"""Create a tuple of (gradient accumulators, num examples)."""
return tuple(
tf.nest.map_structure(tf.zeros_like,
model_weights.trainable)), tf.constant(
0, dtype=tf.float32)
gradient_sums, num_examples_sum = dataset_reduce_fn(
reduce_fn=reduce_fn,
dataset=dataset,
initial_state_fn=_zero_initial_state)
# We now normalize to compute the average gradient over all examples.
average_gradient = tf.nest.map_structure(
lambda gradient: gradient / num_examples_sum, gradient_sums)
model_output = model.report_local_unfinalized_metrics()
stat_output = collections.OrderedDict(num_examples=num_examples_sum)
average_gradient, has_non_finite_delta = (
tensor_utils.zero_all_if_any_non_finite(average_gradient))
if has_non_finite_delta > 0:
client_weight = tf.constant(0.0)
else:
client_weight = num_examples_sum
return client_works.ClientResult(
update=average_gradient,
update_weight=client_weight), model_output, stat_output
return client_update
|
8ba68ebba2c6520e9c7a89975d16dc20debf52eb
| 3,646,791
|
import ipaddress
def ipv4_addr_check():
"""Prompt user for IPv4 address, then validate. Re-prompt if invalid."""
while True:
try:
return ipaddress.IPv4Address(input('Enter valid IPv4 address: '))
except ValueError:
print('Bad value, try again.')
raise
|
e85681cdcedb605f47240b27e8e2bce077a39273
| 3,646,792
|
def energybalance_erg(ratio,crew,erg,w0=4.3801,dt=0.03,doplot=1,doprint=0,theconst=1.0):
"""
calculates one stroke with ratio as input, using force profile in time domain
"""
# w0 = initial flywheel angular velo
# initialising output values
dv = 100.
vavg = 0.0
vend = 0.0
power = 0.0
# stroke parameters
tempo = crew.tempo
mc = crew.mc
recprofile = crew.recprofile
d = crew.strokelength
Nrowers = 1
drag = erg.drag
inertia = erg.inertia
cord = erg.cord
cordlength = erg.cordlength
r = erg.r # sprocket radius
# nr of time steps
aantal = 1+int(round(60./(tempo*dt)))
time = linspace(0,60./tempo,aantal)
# flywheel angular velo
wf = zeros(len(time))+w0
wfdot = zeros(len(time))
# crew velo
vc = zeros(len(time))
vpull = zeros(len(time))
Fhandle = zeros(len(time))
Fres = zeros(len(time))
Fleg = zeros(len(time))
ydotdot = zeros(len(time))
ydot = zeros(len(time)) # +wf[0]*r
Pf = zeros(len(time))
Phandle = zeros(len(time))
Ebungee = zeros(len(time))
Pbungee = zeros(len(time))
handlepos = 0
vhand = ydot[0]
# initial handle and boat velocities
vc[0] = ydot[0]
# calculate average drive speed
tdrive = ratio*max(time)
vdriveavg = crew.strokelength/tdrive
idrivemax = int(round(tdrive/dt))
## powerconst = 2.58153699 # bij sin^(1/3)
## powerconst = 2 # bij sin
# powerconst = 1.5708 # bij sin^2
# macht = 2.
# vhandmax = np.pi*d/(powerconst*tdrive)
# vhand = vhandmax*(np.sin(np.pi*(time)/tdrive))**(macht)
# powerconst = 3.1733259127
# vhandmax = np.pi*d/(powerconst*tdrive)
# vhand = vhandmax*(1-np.cos(2*np.pi*(time)/tdrive))
macht = 0.5
x = np.linspace(0,1,100)
y = (x-x**2)**(macht)
s = np.cumsum(np.diff(x)*y[1:])[-1]
powerconst = 1/s
vhandmax = powerconst*d/tdrive
vhand = vhandmax*((time/tdrive)-(time/tdrive)**2)**macht
# stroke
for i in range(1,idrivemax):
now = dt*i
timerel = now/tdrive
time2 = (dt*(i+1))/tdrive
vi = vhand[i-1]
vj = vhand[i]
vpull[i] = vhand[i]
Tdrag = drag*wf[i-1]**2
handlepos += dt*vi
ydot[i] = crew.vcm(vi, handlepos)
# ydot[i] = vi*(1-timerel)
# ydot[i] = vi
ydotdot[i] = (ydot[i]-ydot[i-1])/dt
wnext = vj/r
wnext2 = wf[i-1]-dt*Tdrag/inertia
# if wnext > 0.99*wf[i-1]:
if wnext > wnext2:
wf[i] = wnext
Tacceler = inertia*(wnext-wf[i-1])/dt
else:
wf[i] = wf[i-1]-dt*Tdrag/inertia
Tacceler = 0
Tdrag = 0
wfdot[i] = (wf[i]-wf[i-1])/dt
Fhandle[i] = ((Tdrag+Tacceler)/r)+cord*(cordlength+handlepos)
Fres[i] = Nrowers*mc*ydotdot[i]
Fleg[i] = Fres[i]+Fhandle[i]
Ebungee[i] = 0.5*(cord*(cordlength+handlepos)**2 - cord*cordlength**2)
Pbungee[i] = (Ebungee[i]-Ebungee[i-1])/dt
vc[i] = ydot[i]
# recovery
trecovery = max(time)-time[idrivemax]
ratio = time[idrivemax]/max(time)
aantalstroke = idrivemax
if (recprofile == 1): # oude methode (sinus)
vhandmax = -np.pi*d/(2*trecovery)
vhand = vhandmax*np.sin(np.pi*(time-time[i])/trecovery)
for k in range(idrivemax,aantal):
Tdrag = drag*wf[k-1]**2 # drag torque
wf[k] = wf[k-1]-dt*Tdrag/inertia
ydot[k] = crew.vcm(vhand, handlepos)
# ydot[k] = vhand
vc[k] = ydot[k]
ydotdot[k] = (ydot[k]-ydot[k-1])/dt
handlepos = handlepos+vhand[k]*dt
Ebungee[k] = 0.5*(cord*(cordlength+handlepos)**2 - cord*cordlength**2)
Pbungee[k] = (Ebungee[k]-Ebungee[k-1])/dt
else:
vavgrec = d/trecovery
vcrecovery = zeros(aantal)
for k in range(idrivemax,aantal):
vhand = crew.vhandle(vavgrec,trecovery,time[k]-time[idrivemax])
vpull[k] = vhand
vcrecovery[k] = crew.vcm(vhand, handlepos)
# vcrecovery[k] = vhand
Tdrag = drag*wf[k-1]**2 # drag torque
wf[k] = wf[k-1]-dt*Tdrag/inertia
wfdot[k] = (wf[k]-wf[k-1])/dt
ydot[k] = vcrecovery[k]
vc[k] = ydot[k]
ydotdot[k] = (ydot[k]-ydot[k-1])/dt
handlepos = d+d*crew.dxhandle(vavgrec,trecovery,time[k]-time[idrivemax])
Fhandle[k] = cord*(cordlength+handlepos)
Fres[k] = Nrowers*mc*ydotdot[k]
Fleg[k] = Fres[k]+Fhandle[k]
Ebungee[k] = 0.5*(cord*(cordlength+handlepos)**2 - cord*cordlength**2)
Pbungee[k] = (Ebungee[k]-Ebungee[k-1])/dt
ydot[0] = ydot[0]/2.
ydotdot[1]=(ydot[1]-ydot[0])/dt
Pq = (Nrowers*mc)*ydotdot*ydot
Pleg = Fleg*ydot
Phandle = Fhandle*vpull
Parm = Phandle-Fhandle*ydot
Plegdiss = 0.5*theconst*(abs(Pleg)-Pleg)
Plegsource = abs(Pleg)
Parmdiss = 0.5*theconst*(abs(Parm)-Parm)
Parmsource = abs(Parm)
# sources
Elegsource = cumsum(Plegsource)*dt
Earmsource = cumsum(Parmsource)*dt
Eleg = cumsum(Pleg)*dt
Earm = cumsum(Parm)*dt
Ehandle = cumsum(Phandle)*dt
# sinks
# drag power
Pw = drag*wf**3.
Ew = cumsum(Pw)*dt
Elegdiss = cumsum(Plegdiss)*dt
Earmdiss = cumsum(Parmdiss)*dt
# storage
Pwheel = inertia*wf*wfdot
Ewheel = cumsum(Pwheel)*dt
Ewheel = Ewheel - Ewheel[0]
Ebungee = cumsum(Pbungee)*dt
Pqrower = abs(Pq)
Pdiss = 0.5*theconst*(Pqrower-Pq)
Eq = cumsum(Pq)*dt
Eqrower = cumsum(Pqrower)*dt
Ediss = cumsum(Pdiss)*dt
# printing
if (doprint==1):
print(("Ediss rower ",Ediss[aantal-1]))
print(("E drag ",Ew[aantal-1]))
print(("Eleg ",Eqrower[aantal-1]))
print(("Ehandle ",Ehandle[aantal-1]))
print(("Ebungee ",Ebungee[aantal-1]))
print("")
print(("P handle ",Ehandle[aantal-1]/time[aantal-1]))
print(("P drag ",Ew[aantal-1]/time[aantal-1]))
print("")
# plotting
if (doplot==1):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, ydot,'r-',label = 'Crew velocity')
pyplot.plot(time, vpull,'k-',label = 'Handle velocity')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('v (m/s)')
pyplot.show()
if (doplot==2):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Fhandle,'r-',label = 'Handle force')
pyplot.plot(time, Fleg,'b-',label = 'Leg force')
pyplot.plot(time, Fres,'g-',label = 'Accelerating force')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('force (N)')
pyplot.show()
if (doplot==3):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Phandle, 'r-', label = 'Handle Power')
pyplot.plot(time, Pleg,'b-',label = 'Leg power')
pyplot.plot(time, Pq,'k-',label = 'Kinetic power')
pyplot.plot(time, Parm,'y-',label = 'Arm power')
pyplot.plot(time, Pq+Phandle-Parm-Pleg,'b+', label = 'should be zero')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('power (W)')
pyplot.show()
if (doplot==4):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Ewheel,'g-',label = 'Flywheel energy stored')
pyplot.plot(time, Eq+Ebungee,'k-',label = 'Kinetic energy')
pyplot.plot(time, Ew,'r-',label = 'Drag dissipation')
pyplot.plot(time, Ediss,'b-',label = 'Rower body dissipation')
pyplot.plot(time, Ewheel+Eq+Ew+Ediss+Ebungee, 'b+', label = 'Sinks+Kinetic')
pyplot.plot(time, Ew+Ediss, 'r+', label = 'Sinks')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('Energy (J)')
pyplot.show()
if (doplot==5):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Pleg, 'y-', label = 'Leg power')
pyplot.plot(time, Plegdiss,'g-',label = 'Leg dissipation')
pyplot.plot(time, Plegsource,'g+',label = 'Leg source')
pyplot.plot(time, Parm, 'r-', label = 'Arm power')
pyplot.plot(time, Parmdiss,'k-',label = 'Arm dissipation')
pyplot.plot(time, Parmsource,'k+',label = 'Arm source')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel('power (W)')
pyplot.show()
if (doplot==6):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Elegsource+Ehandle, 'bo', label = 'Leg power')
pyplot.plot(time, Elegdiss,'g-',label = 'Leg dissipation')
pyplot.plot(time, Earm, 'r-', label = 'Arm power')
pyplot.plot(time, Ehandle, 'k+', label = 'Handle power')
pyplot.plot(time, Earmdiss,'k-',label = 'Arm dissipation')
pyplot.plot(time, Eqrower+Ewheel+Ebungee, 'y+', label = 'Eqrower+Ewheel+Ecord')
pyplot.plot(time, Elegsource+Earmsource,'b+', label = 'Sources')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel('energy (J)')
pyplot.show()
if (doplot==7):
pyplot.clf()
pyplot.plot(time, Ew+Ediss, 'r-', label = 'Total Sinks')
# pyplot.plot(time, Elegsource+Earmsource,'go',label = 'Total Sources')
pyplot.plot(time, Eqrower+Ehandle,'y-',label = 'Total Sources 2')
pyplot.plot(time, Ewheel+Eq+Ew+Ediss+Ebungee, 'b+', label = 'Sinks+Kinetic')
pylab.legend(loc='lower right')
pyplot.xlabel("time (s)")
pyplot.ylabel('energy (J)')
pyplot.show()
if (doplot==8):
pyplot.clf()
pyplot.plot(time, ydot, 'r-', label = 'Crew velocity')
pylab.legend(loc='lower right')
pyplot.xlabel("time (s)")
pyplot.ylabel("v (m/s)")
pyplot.show()
if (doplot==9):
pyplot.clf()
wref = wf
pyplot.plot(time,wref,'r-',label='flywheel speed')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel("Flywheel speed (rad/sec)")
pyplot.show()
dw = wf[len(time)-1]-wf[0]
wavg = mean(wf)
wend = wf[len(time)-1]
energy = max(Ew+Ediss)
energyd = max(Ew)
energy = energy/Nrowers
energyd = energyd/Nrowers
power = energy*tempo/60.
powerd = energyd*tempo/60.
return [dw,wend,wavg,ratio,energy,power,powerd]
|
a742b88394f194afee3ef89bc4e878027fcac8b5
| 3,646,794
|
def get_EXP3_policy(Q, eta, G_previous):
"""
Obtain EXP-3 policy based on a given Q-function. Also, return updated
values of G, to be used in future calls to this function.
Inputs:
1) Q: a num_states x num_actions matrix, in which Q[s][a] specifies
the Q-function in state s and action a.
2) eta: a scalar; this is the eta parameter defined in the EPMC algorithm.
3) G_previous: num_states x num_actions matrix; this is a matrix of the
G-values defined in the EPMC algorithm. These values are from the
previous iteration.
Outputs:
1) policy: a policy, specified by a num_states x num_actions matrix, in
which policy[s][a] is the probability of taking action a in state s.
2) G: num_states x num_actions updated G matrix, as defined in the EPMC
algorithm.
"""
num_actions = Q.shape[1]
# Update the policy:
policy = np.exp((eta / num_actions) * G_previous)
policy = (policy.T / policy.sum(axis=1)).T
policy = eta / num_actions + (1 - eta) * policy
# Update G:
G = G_previous + Q / policy
return policy, G
|
80e886142fff398801e4f767ed09392d7f8b398c
| 3,646,796
|
def table_content(db, table):
"""
return a 2 dimentioanl array cont-aining all table values
========================================================
>>> table_content("sys", "host_ip")
[[1, 2, 3],
[2, 3, 4],
[3, 4, 5]]
========================================================
"""
#XXX: uses : `select * from table`
return execute_and_fetch(_SELECT_TABLE.format(db, table))
|
51b973f442131a157aa13ae0c0ed9f1d07a4115d
| 3,646,797
|
def process_sort_params(sort_keys, sort_dirs, default_keys=None,
default_dir='asc'):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list
if not already specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added
to the processed list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified or if an invalid sort
direction is specified
"""
if default_keys is None:
default_keys = ['created_at', 'id']
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs):
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys.
if sort_dirs:
result_dirs = []
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
msg = _("Unknown sort direction, must be 'desc' or 'asc'.")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _("Sort direction array size exceeds sort key array size.")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
|
1f11f5d7d4fbe5c1864ace120c1dbde7b6023acb
| 3,646,798
|
def namify(idx):
"""
Helper function that pads a given file number and return it as per the dataset image name format.
"""
len_data = 6 #Ilsvr images are in the form of 000000.JPEG
len_ = len(str(idx))
need = len_data - len_
assert len_data >= len_, "Error! Image idx being fetched is incorrect. Invalid value."
pad = '0'*need
return pad+str(idx)
|
069ff7a297f944e9e0e51e5e100276a54fa51618
| 3,646,799
|
def delete_meeting(request, club_name, meeting_id):
"""Meeting is deleted by the host"""
meeting = Meeting.objects.get(id=meeting_id)
MeetingAttendance.objects.filter(user=request.user, meeting=meeting).delete()
meeting.delete()
return redirect('meeting_list', club_name)
|
1a81e001c2cb6175ec4a7693f745f4090c06a8e3
| 3,646,800
|
def mock_environ():
"""Mock for `os.environ.copy`"""
return {"SOME_ENV_VAR": "42"}
|
d68d44d793847f46354a8cf2503b654a40eed92a
| 3,646,802
|
def get_bedtools_coverage_cmd(bam_filename, gff_filename,
output_filename,
require_paired=False):
"""
Get bedtools command for getting the number of reads
from the BAM filename that are strictly contained within
each interval of the GFF.
"""
args = {"bam_filename": bam_filename,
"gff_filename": gff_filename}
# Do not include strandedness flag since that doesn't handle
# paired-end cases
intersect_cmd = "bedtools intersect -abam %(bam_filename)s " \
"-b %(gff_filename)s -f 1 -ubam " %(args)
coverage_cmd = "%s | bedtools coverage -abam - -b %s -counts > %s" \
%(intersect_cmd, gff_filename, output_filename)
return coverage_cmd
|
e4d6da3e3e7fe611c3bc3023bea3a76a0003a1f2
| 3,646,803
|
from typing import List
from typing import Tuple
from typing import Dict
def get_notes_mapping_dict(notes_list: List) -> Tuple[Dict, np.array]:
"""
Function get list of midi notes and returns mapping for each note
:param notes_list:
:return:
"""
assert len(notes_list) > 0, 'Empty notes list !!'
full_list = sorted(set(notes_list))
notes2idx = {note_e: i for i, note_e in enumerate(full_list)}
idx2note = np.array(full_list)
return notes2idx, idx2note
|
8ea85f83f6d048587ed762272ae19e4176c7d4f3
| 3,646,804
|
def p_y_given_x(X, mean_x, variance_x):
"""
Calculates the probablity of class
value being y, given label is x.
PARAMETERS
==========
X: list
Input of unknown class values
given by user.
mean_x: ndarray(dtype=int,ndim=1,axis=1)
Mean for given label.
variance_x: ndarray(dtype=int,ndim=1,axis=1)
Variance for given label.
RETURNS
=======
p: float
Probability, according to gaussian
distribution, for given mean and variance.
"""
p = 1 / (np.sqrt(2 * np.pi * variance_x)) * \
np.exp((-(X - mean_x)**2) / (2 * variance_x))
return p
|
64fc1e5e5c81affdad1cc02f5a62d5ba186a1129
| 3,646,805
|
def run_train(cfg, wandb):
"""Train function starts here
Args:
cfg (obj `DictConfig`): This is the config from hydra.
"""
data_directory = cfg.data.data_directory
train_batch_size = cfg.data.train_batch_size
max_seq_len = cfg.task.max_seq_len # Maximum length per sequence
max_predictions_per_seq = cfg.task.max_predictions_per_seq # Maximum predictions (Mask) per sequence
dtype = cfg.trainer.dtype
is_training = cfg.model.is_training
use_dropout = cfg.model.use_dropout
loss_type = cfg.optimizer.loss_type
use_constant_lr = cfg.optimizer.use_constant_lr
num_layers = cfg.model.num_layers
return_all_layer_outputs = False
training_loss_names = None
if loss_type and loss_type == 'joint':
return_all_layer_outputs = True
training_loss_names = {'loss_{}'.format(i + 1) for i in range(num_layers)}
learning_rate = cfg.optimizer.learning_rate
warmup_rate = cfg.optimizer.warmup_rate
decay_function = cfg.optimizer.decay_function
steps_per_epoch = cfg.trainer.steps_per_epoch
epochs = cfg.trainer.epochs
distribution_strategy = cfg.trainer.strategy
num_gpus = cfg.trainer.num_gpus
tpu_address = cfg.trainer.tpu_address
model_checkpoint_dir = cfg.trainer.model_checkpoint_dir
# Get dataset and tokenizer
tokenizer_layer = get_tokenizer()
# We split text by words (whitespace), inside MLM function.
masked_lm_map_fn = mlm_fn(tokenizer_layer, max_seq_len, max_predictions_per_seq)
train_dataset = get_dataset(data_directory, masked_lm_map_fn, train_batch_size)
# validation_dataset = get_validation_data(all_questions, eval_batch_size, tokenizer_layer, max_seq_len)
# Get Model
model_fn = get_model(return_all_layer_outputs, is_training, use_dropout, tokenizer_layer.vocab_size.numpy())
# Get Optimizer
# steps_per_epoch is number of examples seen during one epoch (with batch size)
# total examples per epoch = steps_per_epoch * batch_size
examples_per_epoch = steps_per_epoch # Assume steps_per_epoch = 100000, and epochs = 5, examples = 500000
optimizer_fn = get_optimizer(
learning_rate, examples_per_epoch, epochs, warmup_rate, decay_function, use_constant_lr
)
# Get loss
loss_fn = get_loss(loss_type)
# Get trainer
trainer = get_trainer(
distribution_strategy=distribution_strategy, num_gpus=num_gpus, tpu_address=tpu_address, dtype=dtype
)
# Train
history = trainer.run(
model_fn=model_fn,
optimizer_fn=optimizer_fn,
train_dataset=train_dataset,
train_loss_fn=loss_fn,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
model_checkpoint_dir=model_checkpoint_dir,
batch_size=train_batch_size,
training_loss_names=training_loss_names,
repeat_dataset=True,
wandb=wandb,
)
return history
|
1b454fd28b9c308700fde67336a65e0e54be41ca
| 3,646,806
|
def wrf_ll_to_ij(lon, lat, map_proj, truelat1=-999.,truelat2=-999.,stand_lon=999., \
ref_lat=-999,ref_lon=-999,pole_lat=90,pole_lon=0,knowni=-999,\
knownj=-999,dx=-999, dy=-999, latinc=-999., loninc=-999):
"""
Converts lon/lat values to i/j index values.
lon,lat - lat,lon values to convert
map_proj -- map projection
"""
lon2 = _promote_scalar(lon)
lat2 = _promote_scalar(lat)
map_proj2 = _promote_scalar(map_proj)
truelat12 = _promote_scalar(truelat1)
truelat22 = _promote_scalar(truelat2)
stand_lon2 = _promote_scalar(stand_lon)
ref_lat2 = _promote_scalar(ref_lat)
ref_lon2 = _promote_scalar(ref_lon)
pole_lat2 = _promote_scalar(pole_lat)
pole_lon2 = _promote_scalar(pole_lon)
knowni2 = _promote_scalar(knowni)
knownj2 = _promote_scalar(knownj)
dx2 = _promote_scalar(dx)
dy2 = _promote_scalar(dy)
latinc2 = _promote_scalar(latinc)
loninc2 = _promote_scalar(loninc)
return fplib.wrf_ll_to_ij(lon2,lat2,map_proj2,truelat12,truelat22,stand_lon2, \
ref_lat2,ref_lon2,pole_lat2,pole_lon2,knowni2, knownj2,\
dx2, dy2, latinc2,loninc2)
|
9f1cbfa535584d0de1a7e1736fa08def0bb52f71
| 3,646,807
|
from typing import List
def create_specimen_resource(specimen_identifier: List[dict],
patient_reference: dict,
specimen_type: str,
received_datetime: str = None,
collection_datetime: str = None,
note: str = None) -> dict:
"""
Create specimen resource following the FHIR format
(http://www.hl7.org/implement/standards/fhir/specimen.html)
"""
specimen_type_system = 'http://terminology.hl7.org/CodeSystem/v2-0487'
specimen_resource = {
"resourceType": "Specimen",
"identifier": specimen_identifier,
"subject": patient_reference,
"type": create_codeable_concept(specimen_type_system, specimen_type)
}
if received_datetime:
specimen_resource["receivedTime"] = received_datetime
if collection_datetime:
specimen_resource["collection"] = {
"collectedDateTime": collection_datetime
}
if note:
specimen_resource["note"] = [{"text": note}]
return specimen_resource
|
05f8c314bae1c160e05b7d7c22343d3d195eb262
| 3,646,808
|
from typing import List
from typing import Dict
def get_attribute_slots(
tracker: "Tracker", object_attributes: List[Text]
) -> List[Dict[Text, Text]]:
"""
Copied from rasa_sdk.knowledge_base.utils and overridden
as we also need to return the entity role for range queries.
If the user mentioned one or multiple attributes of the provided object_type in
an utterance, we extract all attribute values from the tracker and put them
in a list. The list is used later on to filter a list of objects.
For example: The user says 'What Italian restaurants do you know?'.
The NER should detect 'Italian' as 'cuisine'.
We know that 'cuisine' is an attribute of the object type 'restaurant'.
Thus, this method returns [{'name': 'cuisine', 'value': 'Italian'}] as
list of attributes for the object type 'restaurant'.
Args:
tracker: the tracker
object_attributes: list of potential attributes of object
Returns: a list of attributes
"""
attributes = []
for attr in object_attributes:
attr_val = tracker.get_slot(attr) if attr in tracker.slots else None
if attr_val is not None:
entities = tracker.latest_message.get("entities", [])
role = [e['role'] for e in entities if e['entity'] == attr and e['value'] == attr_val and 'role' in e]
role = role[0] if len(role) else None
attributes.append({"name": attr, "value": attr_val, "role": role})
return attributes
|
59876d36adc362c074f6be267d0ccb65735256dd
| 3,646,809
|
def pearson_correlation(self, preferences):
"""
Returns the Pearson Correlation of two user_s, A and B by
performing the PPMC calculation on the scatter plot of (a, b)
ratings on the shared set of critiqued titles.
"""
# Store the length to save traversals of the len computation.
# If they have no rankings in common, return 0.
length = len(preferences)
if length == 0:
return 0
# Loop through the preferences of each user_ once and compute the
# various summations that are required for our final calculation.
sumA = sumB = sumSquareA = sumSquareB = sumProducts = 0
for a, b in preferences.values():
sumA += a
sumB += b
sumSquareA += pow(a, 2)
sumSquareB += pow(b, 2)
sumProducts += a*b
# Calculate Pearson Score
numerator = (sumProducts*length) - (sumA*sumB)
denominator = sqrt(((sumSquareA*length) - pow(sumA, 2)) * ((sumSquareB*length) - pow(sumB, 2)))
# Prevent division by zero.
if denominator == 0:
return 0
return abs(numerator / denominator)
|
290bad340c7745883d41f0a6cde809b4ae8c987f
| 3,646,810
|
def update_stakeholder(id: int, name: str = None, company: str = None, role: str = None,
attitude: str = None, archived: bool = None) -> Stakeholder or None:
"""
Provide a POST API endpoint for updating a specific stakeholder.
:param id: ID of the stakeholder.
:param name: Name of the stakeholder.
:param company: Company of the stakeholder.
:param role: Role of the stakeholder.
:param attitude: Attitude of the stakeholder.
:return:
"""
try:
stakeholder = Stakeholder.query.get(id)
if not name:
raise KeyError('Name must not be empty')
stakeholder.name = name
stakeholder.company = company if company is not None else stakeholder.company
stakeholder.role = role if role is not None else stakeholder.role
stakeholder.attitude = attitude if attitude is not None else stakeholder.attitude
stakeholder.archived = archived if archived is not None else stakeholder.archived
db.session.commit()
return stakeholder
except AttributeError:
raise OperationalError(f"Could not load stakeholder with id {id}", {}, '')
except TypeError:
return None
|
fd13119e1435535c2ada786e207ab7c5acd9f2ab
| 3,646,811
|
def register():
"""Sign up user."""
if current_user.is_authenticated:
return redirect(url_for("homepage"))
form = RegistrationForm()
if form.validate_on_submit():
user = User(
username=form.username.data,
name=form.name.data,
email=form.email.data,
)
user.set_password(form.password.data)
user.set_is_admin()
db.session.add(user)
db.session.commit()
flash("Your account has been created, you are now able to log in.")
return redirect(url_for("users.login"))
return render_template("register.html", title="Register", form=form)
|
155365d20fd5838784d4380ae1bc02373ca11bf5
| 3,646,812
|
def txt_as_matrix(buff, border):
"""\
Returns the text QR code as list of [0,1] lists.
:param io.StringIO buff: Buffer to read the matrix from.
"""
res = []
code = buff.getvalue().splitlines()
len_without_border = len(code) - border
for l in islice(code, border, len_without_border):
res.append([int(clr) for clr in islice(l, border, len_without_border)])
return res
|
086e6c0b4f3831288e1ca2c37047b5c0fb6f00e0
| 3,646,813
|
def create_logismosb_node(name="LOGISMOSB"):
"""
This function...
:param name:
:return:
"""
node = Node(LOGISMOSB(), name=name)
config = read_machine_learning_config()
return set_inputs(node, config)
|
8a1fa419ae94df09802f38badb68b97660d35987
| 3,646,814
|
def calc_tract_accessibility(tracts, pois, G, weight='length',
func=acc_cumulative_gaussian,k=5,
random_seed=None, func_kws={},
pois_weight_column=None,iter_cap=1_000):
"""
Calculate accessibility by census tract using given accessibility function.
Parameters
----------
tracts : GeoDataframe
Area GeoDataFrame containing census tract information
pois : GeoDataFrame
Point GeoDataFrame containing points of interest
G : NetworkX graph structure
Network Graph.
weight : string
Graph´s weight attribute for shortest paths (such as length or travel time)
func : function
Access score function to use. Options are: acc_cumulative,
acc_soft_threshold, and acc_cumulative_gaussian
func_kws : dictionary
arguments for the access score function
k : int
number of sampled points per tract
pois_weight_column : string
Column in the pois GeoDataFrame with location weights.
random_seed : int
random seed.
iter_cap : int
Parameter to limit memory usage. If the code raises memory error, lowering this
parameter might help.
Returns
-------
Dictionary in the form {tract index: average accessibility score}
"""
assert 0<k and type(k)==int, '"k" must be a positive integer'
# get places on the gdf
X = np.array([n.coords[0][0] for n in pois['geometry']])
Y = np.array([n.coords[0][1] for n in pois['geometry']])
#set places to nodes
nodes = ox.get_nearest_nodes(G,X,Y, method='balltree')
attrs = {}.fromkeys(G.nodes,0)
if pois_weight_column is None:
pois_weight_column = 'temp'
pois = pois.copy()
pois[pois_weight_column] = 1
for node, val in zip(nodes,pois[pois_weight_column]):
attrs[node] += val
nx.set_node_attributes(G,attrs,pois_weight_column)
# get igraph object for fast computations
Gig = get_full_igraph(G)
#create a dictionary for cross-references
node_dict = {}
for node in Gig.vs:
node_dict[int(node['osmid'])] = node
#get nodes to target (for faster shortest paths)
n_targets = [n for n in G.nodes if G.nodes[n][pois_weight_column]>0]
nig_targets = [node_dict[n] for n in n_targets]
vals = [G.nodes[n][pois_weight_column] for n in n_targets]
loop = tracts.iterrows()
X,Y = [],[]
for tract in tracts.iterrows():
tract = tract[1]
poly = tract['geometry']
# get k points within the polygon
X_,Y_ = random_points_in_polygon(k,poly,seed=random_seed)
#match points to graph
X+=X_
Y+=Y_
###here
X = np.array(X)
Y = np.array(Y)
trackt_ns = ox.get_nearest_nodes(G,X,Y,method='balltree')
ig_nodes = [node_dict[n] for n in trackt_ns]
#initiate total accessibility as zero
#calc distances to nodes
acc=[]
if len(ig_nodes)>=iter_cap*k:
loop = list(tracts.iterrows())
loop = [_[1] for _ in loop]
sects = [ig_nodes[x:x+iter_cap*k] for x in range(0,int((len(ig_nodes)//(iter_cap*k)+1)*(iter_cap*k))+1,iter_cap*k)]
loops = [loop[x:x+iter_cap] for x in range(0,int((len(loop)//(iter_cap)+1)*iter_cap)+1,iter_cap)]
# print(len(loops),len(sects))
for section,l in zip(sects,loops):
distances = Gig.shortest_paths_dijkstra(source=section, target=nig_targets, weights=weight)
n=0
for tract in l:
total_acc=0
for ds in distances[n:n+k]:
new = np.array(vals)*func(np.array(ds), **func_kws)
total_acc += new.sum()
acc.append(total_acc/k)
n+=k
else:
distances = Gig.shortest_paths_dijkstra(source=ig_nodes, target=nig_targets, weights=weight)
n=0
for tract in loop:
total_acc=0
for ds in distances[n:n+k]:
new = np.array(vals)*func(np.array(ds), **func_kws)
total_acc += new.sum()
acc.append(total_acc/k)
n+=k
return {i:a for i,a in zip(tracts.index,acc)}
|
8c3abdf1da08d74926892bdf597d33066296ba38
| 3,646,815
|
def _exp_func(x, a, b, c):
"""Exponential function of a single variable, x.
Parameters
----------
x : float or numpy.ndarray
Input data.
a : float
First parameter.
b : float
Second parameter.
c : float
Third parameter.
Returns
-------
float or numpy.ndarray
a * exp(b * x) + c
"""
return a * np.exp(b * x) + c
|
41b6299561162b41189efcfa14820eb8e12396eb
| 3,646,816
|
def seek_inactive(x, start, length, direction=-1, abstol=0):
""" Seek inactive region to the left of start
Example
-------
>>> # _______ |
>>> seek_inactive([3, 2, 1, 1, 1, 2, 3, 4, 2], start=7, length=3)
(1, slice(2, 4))
When no sufficiently long sequence is found we return the end
>>> # _ |
>>> seek_inactive([3, 2, 1, 1, 1, 2, 3, 4, 2], start=7, length=5)
(3, slice(0, 0))
"""
end = -1 if direction == -1 else len(x)
ind = start
for i in range(start, end, direction):
if abs(x[i] - x[ind]) > abstol:
ind = i
if abs(ind - i) >= length - 1:
return x[ind], slice(ind, i, direction)
if direction == 1:
return x[-1], slice(-1, -1)
else:
return x[0], slice(0, 0)
|
a0029e0c145381b2acf57f77107d75d89c909b39
| 3,646,817
|
from typing import Counter
def word_cross_product_phi(t1, t2):
"""Basis for cross-product features. This tends to produce pretty
dense representations.
Parameters
----------
t1, t2 : `nltk.tree.Tree`
As given by `str2tree`.
Returns
-------
defaultdict
Maps each (w1, w2) in the cross-product of `t1.leaves()` and
`t2.leaves()` to its count. This is a multi-set cross-product
(repetitions matter).
"""
return Counter([(w1, w2) for w1, w2 in product(t1.leaves(), t2.leaves())])
|
dd5ab36d48abce087afa99b98a05c97a0ee30a76
| 3,646,818
|
def cube_filter_highpass(array, mode='laplacian', verbose=True, **kwargs):
"""
Apply ``frame_filter_highpass`` to the frames of a 3d or 4d cube.
Parameters
----------
array : numpy ndarray
Input cube, 3d or 4d.
mode : str, optional
``mode`` parameter to the ``frame_filter_highpass`` function. Defaults
to a Laplacian high-pass filter.
verbose : bool, optional
If ``True`` timing and progress bar are shown.
**kwargs : dict
Passed through to the ``frame_filter_highpass`` function.
Returns
-------
filtered : numpy ndarray
High-pass filtered cube.
"""
array_out = np.empty_like(array)
if array.ndim == 3:
for i in Progressbar(range(array.shape[0]), verbose=verbose):
array_out[i] = frame_filter_highpass(array[i], mode=mode, **kwargs)
elif array.ndim == 4:
for i in Progressbar(range(array.shape[1]), verbose=verbose):
for lam in range(array.shape[0]):
array_out[lam][i] = frame_filter_highpass(array[lam][i],
mode=mode, **kwargs)
else:
raise TypeError('Input array is not a 3d or 4d cube')
return array_out
|
21c689249ad32919dbb410b2b2b9e221ce31f4df
| 3,646,819
|
import requests
import json
def translate_text(text: str, url: str, model_id) -> TranslatedObject:
"""Translates a text with the url of a translation server. The url is the url that comes up when you start the
translation model"""
assert type(text) == str, "Text has to be of type string"
assert type(url) == str, "Url has to be of type string"
model_ids = get_valid_model_ids()
if model_id not in model_ids:
raise ModelIDNotFoundException(model_id, model_ids)
# text = re.sub(r"([?.!,:;¿])", r" \1 ", text)
# text = re.sub(r'[" "]+', " ", text)
text = mt_en.tokenize(text, return_str=True)
url = f"{url}/translator/translate"
headers = {"Content-Type": "application/json"}
data = [{"src": text, "id": model_id}]
response = requests.post(url, json=data, headers=headers)
translation = response.text
jsn = json.loads(translation)
tokens = jsn[0][0]['tgt']
input_text = jsn[0][0]['src']
score = jsn[0][0]['pred_score']
# text = re.sub(r" ([?.!,:،؛؟¿])", r"\1", text)
# text = mt_nl.detokenize(tokens)
text = tokens
return TranslatedObject(input_text, text, score)
|
54d7f1e93f6452edf140e845795bfc9bfd9bb092
| 3,646,820
|
def quantized_avg_pool_run(shape, dtype1, shape_list, dtype2, ksize, strides,
padding, data_format, quant_algo,
scale_mode, scale_sqrt, attrs):
"""run function"""
if not isinstance(shape_list, (list, tuple, type(None))):
raise RuntimeError("shape_list should be a list, tuple or None!")
op_attrs = [ksize, strides, padding, data_format,
quant_algo, scale_mode, scale_sqrt]
if shape_list is None:
mod = utils.op_build_test(quantized_avg_pool, [shape], [dtype1],
op_attrs=[None] + op_attrs,
kernel_name='quantized_avgpool', attrs=attrs)
else:
mod = utils.op_build_test(quantized_avg_pool,
[shape, shape_list], [dtype1, dtype2],
op_attrs=op_attrs,
kernel_name='quantized_avgpool', attrs=attrs)
expect, inputs, out_buf = gen_data(shape, dtype1, shape_list, dtype2, ksize,
strides, padding, data_format, quant_algo,
scale_mode, scale_sqrt)
output = utils.mod_launch(mod, (*inputs, *out_buf), expect=expect)
rtol, atol = get_rtol_atol("quantized_avgpool", dtype1)
if expect.dtype in ("int8", "uint8"):
cmp_res = compare_int(output, expect)
else:
cmp_res = compare_tensor(output, expect, rtol=rtol, atol=atol)
return inputs, output, expect, cmp_res
|
d704d90a3124607c31e2470cdd1b2fafe967e05e
| 3,646,821
|
def dry(message, func, *args, **kw):
"""Wraps a function that performs a destructive operation, so that
nothing will happen when a dry run is requested.
Runs func with the given arguments and keyword arguments. If this
is a dry run, print the message rather than running the function."""
if message is not None:
info(message)
if tasks.environment.dry_run:
return
return func(*args, **kw)
|
4dd73f2640b5f5a063db6b13ec970c309e753f78
| 3,646,822
|
def move_cups(current: int, cups: CircularLinkedList) -> int: # return the new current cup
"""
1. The crab picks up the three cups that are immediately clockwise of the
current cup. They are removed from the circle; cup spacing is adjusted
as necessary to maintain the circle.
2. The crab selects a destination cup: the cup with a label equal to the
current cup's label minus one. If this would select one of the cups that
was just picked up, the crab will keep subtracting one until it finds a
cup that wasn't just picked up. If at any point in this process the value
goes below the lowest value on any cup's label, it wraps around to the
highest value on any cup's label instead.
3. The crab places the cups it just picked up so that they are immediately
clockwise of the destination cup. They keep the same order as when they
were picked up.
4. The crab selects a new current cup: the cup which is immediately
clockwise of the current cup.
Note that the current cup is specified by its label.
"""
# Pick up some cups from the next available location...
adjacent = cups.next(current)
picked_up = cups.to_list(location=adjacent, length=3)
# find the destination cup...
target = current - 1
counter = 0
while (target in picked_up) or (target not in cups):
target -= 1
counter += 1
if target < 0:
target = max(cups)
if counter > len(cups):
raise AssertionError("Stuck!")
# move the cups...
cups.move(dst=target, src=adjacent, length=3)
# return the new current cup...
return cups.next(current)
|
5f66d5066c29c05bb264bedc6ac4f27ee30e4488
| 3,646,823
|
def get_colormap(n=18, randomize=True):
""" "Get expanded colormap"""
n_colors = np.ceil(n / 6) + 1
cols = []
for col in COLORS:
pal = sns.light_palette(col, n_colors=n_colors)
for rgb in pal[1:]:
cols.append(rgb)
if randomize:
shuffle(cols) # shuffle to break grouping
return ListedColormap(cols)
|
f31ffd3e3667b947e1034617e0165516b942be5a
| 3,646,825
|
def partition2(n):
""" Coin partitions. Let partition(n) represent the number of different ways in which n coins can be separated into piles.
For example, five coins can be separated into piles in exactly seven different ways, so partition(5)=7. """
# dynamic programming table, table cell (i,j), parition size = i + 1, target n = i + 1, cell value = partition(n)
dp = {} # using dict as dynamic programming table is really slow
for i in range(n):
dp[(0,i)] = 1 # One way to partition any n using piles of size 1
dp[(i,0)] = 1 # One way to partition n=1
for i in range(1,n):
for j in range(1,n):
value = dp[(i-1,j)] # Include ways to partition n using piles <i
if i == j:
value += 1 # One way to make n using piles of the same size
elif j > i:
value += dp[(i,j-i-1)] # Include ways to make j-i using piles of size <i
dp[(i,j)] = value
if i == j:
print(i+1,value)
if value % N == 0:
print('result',i+1,value)
return value
return dp[(n-1,n-1)]
|
3537d9eadeb4ba9265c9d9bbe7016f41aecc009e
| 3,646,826
|
import torch
def all_gather_batch(tensors):
"""
Performs all_gather operation on the provided tensors.
"""
# Queue the gathered tensors
world_size = get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
tensor_list = []
output_tensor = []
for tensor in tensors:
tensor_all = [torch.ones_like(tensor) for _ in range(world_size)]
dist.all_gather(
tensor_all,
tensor,
async_op=False # performance opt
)
tensor_list.append(tensor_all)
for tensor_all in tensor_list:
output_tensor.append(torch.cat(tensor_all, dim=0))
return output_tensor
|
640f737f9daf9a934cc97673dcec033caf784c62
| 3,646,827
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.