content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def preprocess_data_for_clustering(df):
"""Prepare data in order to apply a clustering algorithm
Parameters
----------
df : pandas.DataFrame
Input data, *i.e.* city-related timeseries, supposed to have
`station_id`, `ts` and `nb_bikes` columns
Returns
-------
pandas.DataFrame
Simpified version of `df`, ready to be used for clustering
"""
# Filter unactive stations
max_bikes = df.groupby("station_id")["nb_bikes"].max()
unactive_stations = max_bikes[max_bikes==0].index.tolist()
active_station_mask = np.logical_not(df['station_id'].isin(unactive_stations))
df = df[active_station_mask]
# Set timestamps as the DataFrame index and resample it with 5-minute periods
df = (df.set_index("ts")
.groupby("station_id")["nb_bikes"]
.resample("5T")
.mean()
.bfill())
df = df.unstack(0)
# Drop week-end records
df = df[df.index.weekday < 5]
# Gather data regarding hour of the day
df['hour'] = df.index.hour
df = df.groupby("hour").mean()
return df / df.max()
|
144c701b3be12aed2a1488a08eb05c65c6d704c5
| 3,646,700
|
def chars(line):
"""Returns the chars in a TerminalBuffer line.
"""
return "".join(c for (c, _) in notVoids(line))
|
1ffad7e9d0cc800f8de579fa30aeb108a12bd8d2
| 3,646,701
|
def map_is_finite(query_points: tf.Tensor, observations: tf.Tensor) -> Dataset:
"""
:param query_points: A tensor.
:param observations: A tensor.
:return: A :class:`~trieste.data.Dataset` containing all the rows in ``query_points``,
along with the tensor result of mapping the elements of ``observations`` to: `1` if they are
a finite number, else `0`, with dtype `tf.uint8`.
:raise ValueError or InvalidArgumentError: If ``query_points`` and ``observations`` do not
satisfy the shape constraints of :class:`~trieste.data.Dataset`.
"""
return Dataset(query_points, tf.cast(_is_finite(observations), tf.uint8))
|
e22571a179acfb8261924eaf71dec40d17adc47d
| 3,646,702
|
def docker_image_exists(args, image): # type: (EnvironmentConfig, str) -> bool
"""Return True if the image exists, otherwise False."""
try:
docker_command(args, ['image', 'inspect', image], capture=True)
except SubprocessError:
return False
return True
|
ba2eedacef0179b25d203f00cf42fb4f4e4f9b72
| 3,646,703
|
import os
import shutil
def remove_local(path):
"""Remove a local file or directory.
Arguments:
path (str): Absolute path to the file or directory.
Returns:
Boolean indicating result.
"""
if os.path.isfile(path):
# Regular file
remover = os.remove
elif os.path.isdir(path):
# Directory
remover = shutil.rmtree
else:
# What?
cprint(m.PATH_NOEXIST % path, 'red')
return False
try:
remover(path)
except Exception as e:
# Something failed
cprint(m.RM_ERR % (path, e), 'red')
return False
return True
|
7267d37bab380b7a9dc103ee3002f9c454cf7bf2
| 3,646,704
|
def get_g2_fit_general_two_steps(
g2,
taus,
function="simple_exponential",
second_fit_range=[0, 20],
sequential_fit=False,
*argv,
**kwargs,
):
"""
Fit g2 in two steps,
i) Using the "function" to fit whole g2 to get baseline and beta (contrast)
ii) Then using the obtained baseline and beta to fit g2 in a "second_fit_range" by using simple_exponential function
"""
g2_fit_result, taus_fit, g2_fit = get_g2_fit_general(
g2, taus, function, sequential_fit, *argv, **kwargs
)
guess_values = {}
for k in list(g2_fit_result[0].params.keys()):
guess_values[k] = np.array(
[g2_fit_result[i].params[k].value for i in range(g2.shape[1])]
)
if "guess_limits" in kwargs:
guess_limits = kwargs["guess_limits"]
else:
guess_limits = dict(
baseline=[1, 1.8],
alpha=[0, 2],
beta=[0.0, 1],
relaxation_rate=[0.001, 10000],
)
g2_fit_result, taus_fit, g2_fit = get_g2_fit_general(
g2,
taus,
function="simple_exponential",
sequential_fit=sequential_fit,
fit_range=second_fit_range,
fit_variables={
"baseline": False,
"beta": False,
"alpha": False,
"relaxation_rate": True,
},
guess_values=guess_values,
guess_limits=guess_limits,
)
return g2_fit_result, taus_fit, g2_fit
|
d02fbf1796e00b8f490a97a7f7274bab1233a823
| 3,646,705
|
import doctest
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % \
(module.__name__, t)
return f, t
|
cc50538cf8cf50959a4c67eb1c37010d2eab45a9
| 3,646,706
|
def rate_matrix_arrhenius_time_segmented(energies, barriers, segment_temperatures, segment_start_times, t_range):
"""
Compute the rate matrix for each time ``t`` in ``t_range``, where the bath temperature is a piecewise constant
function of time.
The bath temperature function, by which the rate matrices are calculated, is a piecewise constant function where
each piece is a segment described by the its temperature and the time it starts.
First, the temperature for every time, denoted by ``T(t)``, is calculated as follows:
``T(t) = Ti`` where ``t = segment_start_times[i]`` and ``Ti = segment_temperatures[i]``.
Then, for every time ``t`` in ``t_range``, a rate matrix is calculated with the corresponding temperature ``T(t)``.
The bath temperature is set to the last given temperature ``segment_start_times[-1]`` and stays at this value
until the last time ``t`` in ``t_range``.
Parameters
----------
energies : (N,) array or sequence of float
Energies of the states of the arrhenius, ordered in ascending order.
barriers : (N, N) array
Energy barriers between states. Must be given as matrix.
segment_temperatures : (K,) array
Temperature sequence where each temperature corresponds to each segment.
segment_start_times : (K,) array
Start time sequence where each time corresponds to each segment.
t_range : (M,) array
Time sequence.
Returns
-------
rate_matrix_time : (N, N, M)
Rate matrices stacked in the depth dimension (axis=2).
Raises
-----
ValueError
If the first segment start time ``segment_start_times[0]`` is not equal to ``t_range[0]``.
"""
if segment_start_times[0] != t_range[0]:
raise ValueError('The first segment start time `segment_start_times[0]` must be equal to `t_range[0]`.')
temperature_array = temperature_array_from_segments(segment_temperatures, segment_start_times, t_range)
return rate_matrix_arrhenius(energies, barriers, temperature_array)
|
78245c7452a41af91be5f57f0acc72cc67c2e2e0
| 3,646,707
|
def div_tensor(tensor, coords=(x, y, z), h_vec=(1, 1, 1)):
"""
Divergence of a (second order) tensor
Parameters
----------
tensor : Matrix (3, 3)
Tensor function function to compute the divergence from.
coords : Tuple (3), optional
Coordinates for the new reference system. This is an optional
parameter it takes (x, y, z) as default.
h_vec : Tuple (3), optional
Scale coefficients for the new coordinate system. It takes
(1, 1, 1), as default.
Returns
-------
divergence: Matrix
Divergence of tensor.
References
----------
.. [RICHARDS] Rowland Richards. Principles of Solids Mechanics.
CRC Press, 2011.
"""
h1, h2, h3 = h_vec
u1, u2, u3 = coords
div1 = diff(h2*h3*tensor[0, 0], u1) + diff(h1*h3*tensor[0, 1], u2) \
+ diff(h1*h2*tensor[0, 2], u3) + h3*tensor[0, 1]*diff(h1, u2) \
+ h2*tensor[0, 2]*diff(h1, u3) - h3*tensor[1, 1]*diff(h2, u1) \
- h2*tensor[2, 2]*diff(h3, u1)
div2 = diff(h2*h3*tensor[1, 0], u1) + diff(h1*h3*tensor[1, 1], u2) \
+ diff(h1*h2*tensor[1, 2], u3) + h1*tensor[1, 2]*diff(h2, u3) \
+ h3*tensor[1, 0]*diff(h2, u1) - h1*tensor[2, 2]*diff(h3, u2) \
- h3*tensor[2, 2]*diff(h1, u2)
div3 = diff(h2*h3*tensor[2, 0], u1) + diff(h1*h3*tensor[2, 1], u2) \
+ diff(h1*h2*tensor[2, 2], u3) + h2*tensor[2, 0]*diff(h1, u1) \
+ h1*tensor[2, 1]*diff(h1, u2) - h1*tensor[1, 1]*diff(h2, u3) \
+ h2*tensor[2, 2]*diff(h1, u3)
return Matrix([div1, div2, div3])/(h1*h2*h3)
|
8159dfc8b330f9184c336ba4cecabe2fdf0d7d55
| 3,646,708
|
import ast
def convert_path_to_repr_exp(path, with_end=False):
"""
Generate a representative expression for the given path
"""
exp = ""
#print("Path: {}".format(path))
for i in range(len(path)):
if with_end == False and \
((i == 0) or (i == len(path)-1)):
continue
nd_idx = path[i]
if nd_idx == start_state:
exp += "BOS"
continue
if nd_idx == end_state:
exp += "EOS"
continue
node_content = idx_to_node[nd_idx]
#print("Node content: {}".format(node_content))
node_dic = ast.literal_eval(str(node_content))
text = ""
for key, value in node_dic.items():
text = value[1]
break
exp += ' ' + text
return exp
|
68dd8ea13ecdef6f2c3947a1e9341ff5e10ccf78
| 3,646,709
|
def cluster_create(context, values):
"""Create a cluster from the values dictionary."""
return IMPL.cluster_create(context, values)
|
593577a3d912a6a24e8f6d3b66d66e1d8f39a681
| 3,646,710
|
def compute_heading_error(est, gt):
"""
Args:
est: the estimated heading as sin, cos values
gt: the ground truth heading as sin, cos values
Returns:
MSE error and angle difference from dot product
"""
mse_error = np.mean((est-gt)**2)
dot_prod = np.sum(est * gt, axis=1)
angle = np.arccos(np.clip(dot_prod, a_min=-1, a_max=1))
return mse_error, angle
|
b015a5b904994372c6ca207388ee3db0ef477f0a
| 3,646,711
|
def _get_count_bid(soup: bs4.BeautifulSoup) -> int:
""" Return bidding count from `soup`.
Parameters
----------
soup : bs4.BeautifulSoup
Soup of a Yahoo Auction page.
Returns
-------
int
Count of total bidding.
"""
tags = soup.find_all('dt', text='入札件数')
if len(tags) > 0:
tag = tags[0]
if isinstance(tag, bs4.element.Tag):
tag = tag.find_next_sibling('dd', {'class': 'Count__number'})
return int(tag.text[:-4])
return 0
|
066b1dcb519db10276dfce35ba04b04f5efdbe66
| 3,646,712
|
def _is_class(s):
"""Imports from a class/object like import DefaultJsonProtocol._"""
return s.startswith('import ') and len(s) > 7 and s[7].isupper()
|
deee946066b5b5fc548275dd2cce7ebc7023626d
| 3,646,713
|
def evaluate(vsm, wordsim_dataset_path):
"""Extract Correlation, P-Value for specified vector space mapper."""
return evaluation.extract_correlation_coefficient(
score_data_path=wordsim_dataset_path, vsm=vsm
)
|
2e12b16eee43aef50b5a6de7d0d9fc5b9c806536
| 3,646,714
|
def longest_substring_using_lists(s: str) -> int:
"""
find the longest substring without repeating characters
644 ms 14.3 MB
>>> longest_substring_using_lists("abac")
3
>>> longest_substring_using_lists("abcabcbb")
3
>>> longest_substring_using_lists("bbbbb")
1
>>> longest_substring_using_lists("pwwkew")
3
"""
words = list()
longest = 0
for char in s:
# for each character
removals = []
for word_idx in range(len(words)):
# check all found words for the char
word = words[word_idx]
if char in word:
# if it exists then set its length to longest if it is the longest
longest = max(longest, len(word))
removals.append(word)
else:
# else add char to word
words[word_idx] += char
for remove in removals:
words.remove(remove)
# add char into words
words.append(char)
return max(longest, *[len(word) for word in words])
|
4292af29c59ea6210cde28745f91f1e9573b7104
| 3,646,715
|
def getuserobj(user_id=None):
"""
登录查询用户是否存在的专用接口函数
:param user_id: 用户id(username)
:return: if exit: return 用户对象
else return None
"""
dbobj = connectMysql.connectMysql()
if user_id is '' or user_id is None:
dbobj.close_db()
return None
else:
userdata = dbobj.select_db(sql="select * from secret where ID = %s " % user_id)
if userdata is ():
# print("ID = %s and password = %s 未查询到数据" % (user_id, password))
dbobj.close_db()
return None
else:
dbobj.close_db()
return userdata[0]
|
78f5cd9edd72b1ee838fb4b7cde73b3c960be0df
| 3,646,716
|
def _parse_track_df(df: pd.DataFrame, track_id: int, track_name: str, track_comment: str,
data_year: int) -> dict:
"""
parses track data
:param df: data representing a track
:param track_id: track id
:param track_name: track name
:param track_comment: track comment
:param data_year: year to which the data is relevant
:return: parsed data
"""
must = from_list = choice = corner_stones = complementary = minor = additional_hug = 0
point_columns = [i for i, c in enumerate(df.columns) if 'כ נקודות' in c]
for i, r in df.iterrows():
category = r[0]
if 'סה\"כ' in category:
continue
raw_points = [r[i] for i in point_columns]
for raw_point in raw_points:
if not raw_point or pd.isnull(raw_point): # no need to take Nan or 0 value
continue
try:
points = float(raw_point)
except ValueError:
match = RE_RANGE.match(raw_point) or RE_MIN.match(raw_point)
if match:
points = float(match[1] or match[2])
else:
continue
if category in (MUST, MUST_IN_HUG, MUST_PROGRAMMING, MUST_SAFETY_LIBRARY) \
or MUST in category:
must += points
elif category in CHOICE_FROM_LIST or 'במסגרת האשכול' in category:
from_list += points
elif category == CHOICE_IN_HUG:
choice += points
elif CORNER_STONES in category:
corner_stones += points
elif category == COMPLEMENTARY:
complementary += points
elif category == MINOR:
minor += points
elif category == ADDITIONAL_HUG:
additional_hug += points
else:
# print(f'Could not identify {category}={raw_point}, defaulting to MUST')
must += points
return {'track_number': track_id,
'data_year': data_year,
'name': track_name,
'points_must': must,
'points_from_list': from_list,
'points_choice': choice,
'points_complementary': complementary,
'points_corner_stones': corner_stones,
'points_minor': minor,
'points_additional_hug': additional_hug,
'comment': track_comment or ''}
|
a6ccd068829ebd0355d4d13ee327255c09615a16
| 3,646,717
|
from typing import Tuple
from typing import Mapping
def parse_tileset(
tileset: TileSet
) -> Tuple[Mapping[Axes, int], TileCollectionData]:
"""
Parse a :py:class:`slicedimage.TileSet` for formatting into an
:py:class:`starfish.imagestack.ImageStack`.
Parameters:
-----------
tileset : TileSet
The tileset to parse.
Returns:
--------
Tuple[Tuple[int, int], TileSetData] :
A tuple consisting of the following:
1. The (y, x) size of each tile.
2. A :py:class:`starfish.imagestack.tileset.TileSetData` that can be queried to obtain
the image data and extras metadata of each tile, as well as the extras metadata of
the entire :py:class:`slicedimage.TileSet`.
"""
tile_data = TileSetData(tileset)
tile_shape = tileset.default_tile_shape
# if we don't have the tile shape, then we peek at the first tile and get its shape.
if tile_shape is None:
tile_key = next(iter(tile_data.keys()))
tile = tile_data.get_tile_by_key(tile_key)
tile_shape = tile.tile_shape
return (
tile_shape,
tile_data,
)
|
d75b121e91d47424704de671c716d1fbf6b02e86
| 3,646,718
|
def pad_sents(sents, pad_token):
""" Pad list of sentences(SMILES) according to the longest sentence in the batch.
@param sents (list[list[str]]): list of SMILES, where each sentence
is represented as a list of tokens
@param pad_token (str): padding token
@returns sents_padded (list[list[str]]): list of SMILES where SMILES shorter
than the max length SMILES are padded out with the pad_token, such that
each SMILES in the batch now has equal length.
"""
sents_padded = []
max_length = max([len(sentence) for sentence in sents])
sents_padded = [sentence+(max_length-len(sentence))*[pad_token] for sentence in sents]
return sents_padded
|
8f0eabfaaa18eafa84366a2f20ed2ddd633dacc6
| 3,646,719
|
def bicubic_interpolation_filter(sr):
"""Creates a bicubic interpolation filter."""
return _interpolation_filter(sr, cv2.INTER_CUBIC)
|
772ee384b90ae6b1e9fe875374441b3d59f86326
| 3,646,720
|
def is_receive_waiting():
"""Check to see if a payload is waiting in the receive buffer"""
#extern RADIO_RESULT radio_is_receive_waiting(void);
res = radio_is_receive_waiting_fn()
# this is RADIO_RESULT_OK_TRUE or RADIO_RESULT_OK_FALSE
# so it is safe to evaluate it as a boolean number.
return (res != 0)
|
8b2a8d1a003f89c3a9b8df7db0729e15a96fdfcc
| 3,646,721
|
import typing
def residual_block(
x,
filters: int,
weight_decay: float,
*,
strides: typing.Union[int, typing.Tuple[int, int]],
dilation: typing.Union[int, typing.Tuple[int, int]],
groups: int,
base_width: int,
downsample,
use_basic_block: bool,
use_cbam: bool,
cbam_channel_reduction: int,
activation: str,
pre_activation: bool,
small_input: bool,
name: str,
):
""" Residual block.
Design follows [2] where Strides=2 in the 3x3 convolution instead of the first 1x1
convolution for bottleneck block. This increases the Top1 for ~0.5, with a slight
performance drawback of ~5% images/sec. Last BN in each residual branch are
zero-initialized following [3] so that the residual branch starts with zeros and
each residual block behaves like an identity.This improves the model by 0.2~0.3%.
- Attention Layers
- CBAM: Convolutional Block Attention Module
[1] Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385
[2] resnet_50_v1_5_for_pytorch
https://ngc.nvidia.com/catalog/model-scripts/nvidia
[3] Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour
https://arxiv.org/abs/1706.02677
[4] Identity Mappings in Deep Residual Networks
https://arxiv.org/abs/1603.05027
"""
x = eval("basic" if use_basic_block else "bottleneck")(
x,
filters,
weight_decay,
strides=strides,
dilation=dilation,
groups=groups,
base_width=base_width,
downsample=downsample,
use_cbam=use_cbam,
cbam_channel_reduction=cbam_channel_reduction,
activation=activation,
pre_activation=pre_activation,
small_input=small_input,
name=name,
)
return x
|
f2021a89e2d737e73bfef3fb7dc127c3bbb5d0b7
| 3,646,722
|
def vacancy_based_on_freq(service,duration,frequency,earliest,latest,local_timezone):
"""
Check vacant timeslot with the user inputed duration for the frequency/week the user inputed.
service: get authentication from Google
duration: the length of the new event (int)
frequency: number of days in a week (int)
earliest: earliest time for timeframe (int)
latest: latest time for timeframe (int)
local_timezone: assigned timezone
"""
result = {}
week = 7
for i in range(week):
if check_vacancy(service,duration,i+1,earliest,latest,local_timezone) == None:
print(f'No slots left on this date. Still {frequency} spots left in the week to fill.')
pass
else:
result[i+1] = check_vacancy(service,duration,i+1,earliest,latest,local_timezone)
frequency -= 1
print(f'Yes! There is a timeslot! Now {frequency} spots left in the week.')
if frequency == 0:
break
return result
|
ba3d7b688170a7e03d849070eaae69ed718257d6
| 3,646,723
|
import os
import importlib
def load_extensions():
"""
NOTE: This code is a copy of the code in econ_platform_core.extensions.__init__.py.
I will need to figure out how to make this function not use the current directory.
TODO: Merge this function with the one in econ_platform_core.
Imports all *.py files in this directory (in alphabetical order).
Since the order of import will eventually matter, will need to add something to force a order of import operations.
For now, not am issue (can just use the alphabetical order rule to fix problems).
All errors are caught and largely ignored (other than listing the module that failed, and a text dump on the
console.
Returns [loaded_extensions, failed_extensions]
The operations on import of an extension:
(1) The import itself. If you wish, you can just put a script that is executed.
(2) If the module has a variable (hopefully a string) with the name 'extension_name', that is used as the extension
name for display, otherwise it is the name of the text file.
(3) If the module has a main() function, it is called.
Since logging is not yet initialised, things are dumped to console rather than logged. (If you really need logging
for debugging purposes, you could turn on logging in the extension.)
:return: list
"""
# There might be some iteration tools in importlib, but no time to read documentation...
this_dir = os.path.dirname(__file__)
flist = os.listdir(this_dir)
# Do alphabetical order
flist.sort()
exclusion_list = ['__init__']
loaded_extensions = []
failed_extensions = []
decorated_fails = []
use_monkey_example = econ_platform_core.PlatformConfiguration['Options'].getboolean('UseMonkeyPatchExample')
if not use_monkey_example:
exclusion_list.append('monkey_patch_example')
for fname in flist:
fname = fname.lower()
if not fname.endswith('.py'):
continue
fname = fname[:-3]
if fname in exclusion_list:
continue
# Import it!
try:
mod = importlib.import_module('econ_platform.extensions.' + fname)
if hasattr(mod, 'extension_name'):
fname = str(mod.extension_name)
# Try running main()
if hasattr(mod, 'main'):
mod.main()
print('Extension {0} loaded.'.format(fname))
loaded_extensions.append(fname)
except Exception as ex:
print('Failure loading extension:', fname)
print(type(ex), str(ex))
failed_extensions.append(fname)
decorated_fails.append((fname, str(ex)))
return (loaded_extensions, failed_extensions, decorated_fails)
|
89c490d973d06621ac0c371a4204e4352462185b
| 3,646,724
|
def byte_list_to_nbit_le_list(data, bitwidth, pad=0x00):
"""! @brief Convert a list of bytes to a list of n-bit integers (little endian)
If the length of the data list is not a multiple of `bitwidth` // 8, then the pad value is used
for the additional required bytes.
@param data List of bytes.
@param bitwidth Width in bits of the resulting values.
@param pad Optional value used to pad input data if not aligned to the bitwidth.
@result List of integer values that are `bitwidth` bits wide.
"""
bytewidth = bitwidth // 8
datalen = len(data) // bytewidth * bytewidth
res = [sum((data[offset + i] << (i * 8)) for i in range(bytewidth))
for offset in range(0, datalen, bytewidth)
]
remainder = len(data) % bytewidth
if remainder != 0:
pad_count = bytewidth - remainder
padded_data = list(data[-remainder:]) + [pad] * pad_count
res.append(sum((padded_data[i] << (i * 8)) for i in range(bytewidth)))
return res
|
b92bbc28cc2ffd59ae9ca2e459842d7f4b284d18
| 3,646,725
|
def admin_not_need_apply_check(func):
"""
admin用户不需要申请权限检查
"""
@wraps(func)
def wrapper(view, request, *args, **kwargs):
if request.user.username == ADMIN_USER:
raise error_codes.INVALID_ARGS.format(_("用户admin默认拥有任意权限, 无需申请"))
return func(view, request, *args, **kwargs)
return wrapper
|
9592d3a4691761be1f58c8a404d7cbef9bd01116
| 3,646,726
|
def parse_headers(header_list):
"""
Convert headers from our serialized dict with lists for keys to a
HTTPMessage
"""
header_string = b""
for key, values in header_list.items():
for v in values:
header_string += \
key.encode('utf-8') + b":" + v.encode('utf-8') + b"\r\n"
return compat.get_httpmessage(header_string)
|
8a387a7a60044115c61838f1da853e4608e3840d
| 3,646,727
|
def _rrv_add_ ( s , o ) :
"""Addition of RooRealVar and ``number''
>>> var = ...
>>> num = ...
>>> res = var + num
"""
if not isinstance ( o , val_types ) : return NotImplemented
if isinstance ( o , _RRV_ ) and not o.isConstant() : o = o.ve ()
elif hasattr ( o , 'getVal' ) : o = o.getVal ()
#
v = s.getVal() if s.isConstant() else s.ve()
#
return v + o
|
e3e41fe3ae53379f0b49a4a2aa14e3a401bae6b3
| 3,646,728
|
from haversine import haversine #import haversine function from library
def stations_by_distance(stations, p):
"""This module sorts stations by distance and returns a
list of (station, town, distance) tupules."""
list_station_dist = [] #initiates list to store stations and distance
#iterate through stations and calculate distamces
for station in stations:
distance = haversine(station.coord, p) #use haversine function to calculate distance between station and p
list_station_dist.append((station.name, station.town, distance)) #add data to list
sorted_list = sorted_by_key(list_station_dist, 2) #use sorting module to sort by distance
return sorted_list
|
4a378090803b061b8ea9b17d6255038235c1b1ca
| 3,646,729
|
import hashlib
def create_SHA_256_hash_of_file(file):
"""
Function that returns the SHA 256 hash of 'file'.\n
Logic taken from https://www.quickprogrammingtips.com/python/how-to-calculate-sha256-hash-of-a-file-in-python.html
"""
sha256_hash = hashlib.sha256()
with open(file, "rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
# Converting to upper case because that's what is required by the policy
# service. See their code:
# https://dev.azure.com/msasg/Bing_and_IPG/_git/Aether?path=/src/aether/platform/backendV2/BlueBox/PolicyService/Microsoft.MachineLearning.PolicyService/Workers/CatalogValidation.cs
return sha256_hash.hexdigest().upper()
|
14f62a49ea54f5fceb719c4df601fde165f5e55c
| 3,646,730
|
def partition_average(partition):
"""Given a partition, calculates the expected number of words sharing the same hint"""
score = 0
total = 0
for hint in partition:
score += len(partition[hint])**2
total += len(partition[hint])
return score / total
|
944f514e925a86f3be431bd4d56970d92d16f570
| 3,646,731
|
import queue
def set_params(config):
"""Configure parameters based on loaded configuration"""
params = {
'path': None,
'minio': None,
'minio_access_key': None,
'minio_secret_key': None,
'minio_secure': True,
'minio_ca_certs': None,
'minio_bucket': 'catalogue',
'minio_path': '',
'url': None,
'client': None,
'instance': None,
'timeout': DEFAULT_TIMEOUT,
'verify': False,
'cert': None,
'thread_cnt': DEFAULT_THREAD_COUNT,
'wsdl_replaces': DEFAULT_WSDL_REPLACES,
'excluded_member_codes': [],
'excluded_subsystem_codes': [],
'filtered_hours': 24,
'filtered_days': 30,
'filtered_months': 12,
'cleanup_interval': 7,
'days_to_keep': 30,
'work_queue': queue.Queue(),
'results': {},
'results_lock': Lock(),
'shutdown': Event()
}
if 'output_path' in config:
params['path'] = config['output_path']
LOGGER.info('Configuring "path": %s', params['path'])
if 'minio_url' in config:
params['minio'] = config['minio_url']
LOGGER.info('Configuring "minio_url": %s', params['minio'])
if 'minio_access_key' in config:
params['minio_access_key'] = config['minio_access_key']
LOGGER.info('Configuring "minio_access_key": %s', params['minio_access_key'])
if 'minio_secret_key' in config:
params['minio_secret_key'] = config['minio_secret_key']
LOGGER.info('Configuring "minio_secret_key": <password hidden>')
if 'minio_secure' in config:
params['minio_secure'] = config['minio_secure']
LOGGER.info('Configuring "minio_secure": %s', params['minio_secure'])
if 'minio_ca_certs' in config:
params['minio_ca_certs'] = config['minio_ca_certs']
LOGGER.info('Configuring "minio_ca_certs": %s', params['minio_ca_certs'])
if 'minio_bucket' in config:
params['minio_bucket'] = config['minio_bucket']
LOGGER.info('Configuring "minio_bucket": %s', params['minio_bucket'])
if 'minio_path' in config:
params['minio_path'] = config['minio_path']
params['minio_path'].strip('/')
if params['minio_path']:
params['minio_path'] += '/'
LOGGER.info('Configuring "minio_path": %s', params['minio_path'])
if params['path'] is None and params['minio'] is None:
LOGGER.error('Configuration error: No output path or MinIO URL are provided')
return None
if 'server_url' in config:
params['url'] = config['server_url']
LOGGER.info('Configuring "url": %s', params['url'])
else:
LOGGER.error('Configuration error: Local Security Server URL is not provided')
return None
if 'client' in config and len(config['client']) in (3, 4):
params['client'] = config['client']
LOGGER.info('Configuring "client": %s', params['client'])
else:
LOGGER.error(
'Configuration error: Client identifier is incorrect. Expecting list of identifiers. '
'Example: ["INST", "CLASS", "MEMBER_CODE", "MEMBER_CLASS"])')
return None
if 'instance' in config and config['instance']:
params['instance'] = config['instance']
LOGGER.info('Configuring "instance": %s', params['instance'])
if 'timeout' in config and config['timeout'] > 0.0:
params['timeout'] = config['timeout']
LOGGER.info('Configuring "timeout": %s', params['timeout'])
if 'server_cert' in config and config['server_cert']:
params['verify'] = config['server_cert']
LOGGER.info('Configuring "verify": %s', params['verify'])
if 'client_cert' in config and 'client_key' in config \
and config['client_cert'] and config['client_key']:
params['cert'] = (config['client_cert'], config['client_key'])
LOGGER.info('Configuring "cert": %s', params['cert'])
if 'thread_count' in config and config['thread_count'] > 0:
params['thread_cnt'] = config['thread_count']
LOGGER.info('Configuring "thread_cnt": %s', params['thread_cnt'])
if 'wsdl_replaces' in config:
params['wsdl_replaces'] = config['wsdl_replaces']
LOGGER.info('Configuring "wsdl_replaces": %s', params['wsdl_replaces'])
if 'excluded_member_codes' in config:
params['excluded_member_codes'] = config['excluded_member_codes']
LOGGER.info('Configuring "excluded_member_codes": %s', params['excluded_member_codes'])
if 'excluded_subsystem_codes' in config:
params['excluded_subsystem_codes'] = config['excluded_subsystem_codes']
LOGGER.info(
'Configuring "excluded_subsystem_codes": %s', params['excluded_subsystem_codes'])
if 'filtered_hours' in config and config['filtered_hours'] > 0:
params['filtered_hours'] = config['filtered_hours']
LOGGER.info('Configuring "filtered_hours": %s', params['filtered_hours'])
if 'filtered_days' in config and config['filtered_days'] > 0:
params['filtered_days'] = config['filtered_days']
LOGGER.info('Configuring "filtered_days": %s', params['filtered_days'])
if 'filtered_months' in config and config['filtered_months'] > 0:
params['filtered_months'] = config['filtered_months']
LOGGER.info('Configuring "filtered_months": %s', params['filtered_months'])
if 'cleanup_interval' in config and config['cleanup_interval'] > 0:
params['cleanup_interval'] = config['cleanup_interval']
LOGGER.info('Configuring "cleanup_interval": %s', params['cleanup_interval'])
if 'days_to_keep' in config and config['days_to_keep'] > 0:
params['days_to_keep'] = config['days_to_keep']
LOGGER.info('Configuring "days_to_keep": %s', params['days_to_keep'])
if params['path'] is not None and params['minio'] is not None:
LOGGER.warning('Saving to both local and MinIO storage is not supported')
if params['minio']:
LOGGER.info('Using MinIO storage')
else:
LOGGER.info('Using local storage')
LOGGER.info('Configuration done')
return params
|
b1cc87d88b656cb6d57dcb0579276de8f0d744e8
| 3,646,732
|
def post_stop_watch():
"""
This method change watcher status to true and return -> "watching": false
"""
url = common.combine_url(
config.INGESTION_AGENT_URL,
config.INGESTION_WATCHER_STATUS,
config.INGESTION_STOP_WATCHER,
)
resp = base_requests.send_post_request(url)
return resp
|
2b7634c78bf46c6365aac89f4be6908d8baf1bcf
| 3,646,733
|
def combine_grad_fields(field1, field2):
"""
Combines two gradient fields by summing the gradiends in every point.
The absolute values of each pixel are not interesting.
Inputs:
- field1: np.array(N, M) of Pixels.
- field2: np.array(N, M) of Pixels.
Output:
- out_field: np.array(N, M) of Pixels.
"""
assert field1.shape[0] == field2.shape[0], "field1.shape[0] != field2.shape[0]"
assert field1.shape[1] == field2.shape[1], "field1.shape[1] != field2.shape[1]"
out_field = np.ndarray(field1.shape, dtype=np.object)
N, M = field1.shape
for i in range(N):
for j in range(M):
grad = field1[i, j].grad + field2[i, j].grad
out_field[i, j] = Pixel(i, j, 0, grad)
out_field[i, j].normalize_grad()
return out_field
|
7cbe02280c33d9ed077a5b39f3df347c08c11417
| 3,646,734
|
import time
def start_queue_manager(hostname, portnr, auth_code, logger):
"""
Starts the queue manager process.
"""
p = Process(target=_queue_manager_target, args=(hostname, portnr, auth_code, logger))
p.start()
for i in range(10):
time.sleep(2)
if get_event_queue(hostname, portnr, auth_code) is not None:
break
else:
logger.debug(f"Queue ready {i}")
return p
|
428201417b76b896a3cdd6efe40b69a3cf297966
| 3,646,735
|
def edit_module_form(request, module_id):
"""
Only the instructor who is the creator of the course to which this module belongs can access this.
"""
course = Module.objects.get(moduleID=module_id).getCourse()
if request.user.role != 1 or (course.instructorID.userID != request.user.userID):
context={
'message': "You do not have access to this page."
}
return render(request, 'ICE/message.html', context)
instructor_id = request.user.userID
if request.method == 'POST':
module=Module.objects.get(moduleID=module_id)
ordNum = 0
for key, value in request.POST.items():
if key=='orderNumber':
ordNum = value
course = module.getCourse()
modules = Module.objects.filter(courseID=course.courseID)
maxOrd = 0
sameOrd = 0
for m in modules:
if m.orderNumber > maxOrd:
maxOrd = m.orderNumber
if int(maxOrd) < int(ordNum):
for m in modules:
if m.orderNumber > module.orderNumber:
mod = Module.objects.get(moduleID = m.moduleID)
mod.orderNumber -= 1
mod.save()
module.orderNumber=course.numOfModules
module.save()
elif int(ordNum) == 0:
for m in modules:
if m.orderNumber < module.orderNumber:
mod = Module.objects.get(moduleID = m.moduleID)
mod.orderNumber += 1
mod.save()
module.orderNumber = 1
module.save()
else:
for m in modules:
if int(m.orderNumber) == int(ordNum):
sameOrd = m.orderNumber
if int(sameOrd) != 0 and int(sameOrd) > int(module.orderNumber):
for m in modules:
if int(m.orderNumber) <= int(sameOrd) and int(m.orderNumber) > int(module.orderNumber):
mod = Module.objects.get(moduleID=m.moduleID)
mod.orderNumber = mod.orderNumber - 1
mod.save()
module.orderNumber = ordNum
module.save()
elif int(sameOrd) != 0 and int(sameOrd) < int(module.orderNumber):
for m in modules:
if int(m.orderNumber) >= int(sameOrd) and int(m.orderNumber) < int(module.orderNumber):
mod = Module.objects.get(moduleID=m.moduleID)
mod.orderNumber = mod.orderNumber + 1
mod.save()
module.orderNumber = ordNum
module.save()
return redirect('../../instructorCourse/courseID='+str(course.courseID)+'&moduleID=1/')
form = EditModuleForm()
return render(request, 'edit_module.html', {'moduleform': form})
|
ba1dc405c9249fb6227d0ed0d1cd0fc5b80caa78
| 3,646,736
|
def r2(y_true, y_pred):
"""
:math:`R^2` (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: R2
"""
return r2_score(y_true, y_pred)
|
3962c83a022cbab416a2914c0be749cf5b66d51e
| 3,646,737
|
def passstore(config, name):
"""Get password file"""
return config.passroot / name
|
d0ca8c71650bd98dacd7d6ff9ed061aba3f2c43a
| 3,646,738
|
import json
import sys
def _get(url, **fields):
"""Get a GroupMe API url using urllib3.
Can have arbitrary string parameters
which will be part of the GET query string."""
fields["token"] = login.get_login()
response = HTTP.request("GET", GROUPME_API + url, fields=fields)
# 2XX Success
if 200 <= response.status < 300:
if response.status != 200:
warn(
"Unexpected status code %d when querying %s. "
"Please open an issue at %s/issues/new"
% (response.status, response.geturl(), HOMEPAGE)
)
data = response.data.decode("utf-8")
return json.loads(data)["response"]
# 304 Not Modified: we reached the end of the data
if response.status == 304:
return None
# 401 Not Authorized
if response.status == 401:
sys.exit(
"Permission denied. Maybe you typed your password wrong? "
"Try changing it with -D."
)
# Unknown status code
raise RuntimeError(
response,
"Got bad status code %d when querying %s: %s"
% (response.status, response.geturl(), response.data.decode("utf-8")),
)
|
fcaa2153baf457a6370daa8eb3b35444c4677ca3
| 3,646,739
|
def coord_shell_array(nvt_run, func, li_atoms, species_dict, select_dict,
run_start, run_end):
"""
Args:
nvt_run: MDAnalysis Universe
func: One of the neighbor statistical method (num_of_neighbor_one_li,
num_of_neighbor_one_li_simple)
li_atoms: Atom group of the Li atoms.
species_dict (dict): A dict of coordination cutoff distance
of the interested species.
select_dict: A dictionary of species selection.
run_start (int): Start time step.
run_end (int): End time step.
"""
num_array = func(
nvt_run, li_atoms[0], species_dict, select_dict, run_start, run_end
)
for li in tqdm_notebook(li_atoms[1::]):
this_li = func(
nvt_run, li, species_dict, select_dict, run_start, run_end
)
for kw in num_array.keys():
num_array[kw] = np.concatenate((num_array.get(kw),
this_li.get(kw)), axis=0)
return num_array
|
4a0b5f1417cb2184c866cf5ca5c138ed051f44a6
| 3,646,740
|
import os
from datetime import datetime
def get_events(number):
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
store = file.Storage(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'token.json'))
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'credentials.json', SCOPES))
creds = tools.run_flow(flow, store)
service = build('calendar', 'v3', http=creds.authorize(Http()))
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=number, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
return events
|
a4a0ca917be06655a1aa77fddbdd8ccaf58424ba
| 3,646,741
|
def plot_transactions_ts(transactional_df, frequency="M", aggregation="n_purchases", reg=False, black_friday_dates=None, plot_black_friday=False, plot_normal_only=False, **kwargs):
"""
plota a evolucao das compras no tempo
black_friday_dates:: list of datetime.date
"""
# preventing unwnated modifications to original df
transactional_df = transactional_df.copy().rename(columns={"data": "date", "receita": "revenue", "id_cliente": "customer_id"})
transactional_df = transactional_df[["date", "revenue", "customer_id"] if not 'black_friday' in transactional_df.columns else ["date", "revenue", "customer_id", "black_friday"]]
transactional_df.index = transactional_df['date']
# if black friday dates are explicity given, a new column is added to the dataframe flagging the relevant purchases
if black_friday_dates:
transactional_df["black_friday"] = transactional_df["date"].dt.date.isin(black_friday_dates).astype(np.int8)
# level of aggregation
assert frequency not in ('Y'), "invalid frequency - use plot_transactions_y"
grouper = transactional_df.resample(frequency)
# aggregating data
if aggregation == "n_purchases":
df = grouper.size().rename(aggregation).to_frame()
elif aggregation == "revenue":
df = grouper["revenue"].sum().rename(aggregation).to_frame()
elif aggregation == "mean_ticket":
df = grouper["revenue"].mean().rename(aggregation).to_frame()
elif aggregation == "n_customers":
df = grouper["customer_id"].nunique().rename(aggregation).to_frame()
else:
raise ValueError(f"unknown aggregation {aggregation} - available agregations: n_purchases, revenue, mean_ticket, n_customers")
# for frequency grouping toubleshooting
# if kwargs.get("troubleshoot_frequency", False):
df = df.join(grouper["date"].max().rename("date_max"))
df = df.join(grouper["date"].min().rename("date_min"))
df["n_days"] = (df["date_max"] - df["date_min"]).dt.days + 1
if kwargs.get("full_intervals_only", False):
if frequency == "M":
df = df[df["n_days"] >= kwargs.get("full_interval_m", 28)].copy()
elif frequency == "W":
df = df[df["n_days"] >= kwargs.get("full_interval_m", 7)].copy()
if "black_friday" in transactional_df.columns:
if frequency != 'Y':
df = df.join(grouper["black_friday"].max())
if plot_black_friday or plot_normal_only:
assert "black_friday" in df.columns, "No Black Friday Information Available"
# n_purchases on normal days
df[f"{aggregation}_normal"] = df[aggregation]
df.loc[df["black_friday"] == 1, f"{aggregation}_normal"] = np.nan
df[f"{aggregation}_normal"] = df[f"{aggregation}_normal"].interpolate(method="linear")
# por plotting reasons, considering "neighbor" rows as black_friday == 1
try:
bf_idx = [(i-1, i, i+1) for i in df.reset_index()[df.reset_index()["black_friday"] == 1].index]
bf_idx = list(set(list(sum(bf_idx, ()))))
df.iloc[bf_idx, (df.columns == "black_friday").argmax()] = 1
except IndexError:
pass
# n_purchases on black friday days
df[f"{aggregation}_bf"] = df[aggregation]
df.loc[df["black_friday"] != 1, f"{aggregation}_bf"] = np.nan
# plot!
ax = kwargs.get("ax")
if not ax:
fig, ax = plt.subplots(figsize=kwargs.get("figsize", (18,4)))
if plot_black_friday:
(df[f'{aggregation}_normal']).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label_normal", "Normal"))
(df[f'{aggregation}_bf']).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label_bf", "Black Friday"))
# simple linear regression - WARNING: simplistic treatment of timeseries data
if reg:
f = np.poly1d(np.polyfit(range(df.shape[0]), (df[f'{aggregation}_normal']).values, 1))
df["fitted_line"] = f(np.arange(df.shape[0]))
df["fitted_line"].plot(ax=ax, lw=2, ls='--', alpha=.5, label="Eq_normal: " + f"{f}".strip())
elif plot_normal_only:
(df[f'{aggregation}_normal']).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label_normal", "Normal"))
# simple linear regression - WARNING: simplistic treatment of timeseries data
if reg:
f = np.poly1d(np.polyfit(range(df.shape[0]), (df[f'{aggregation}_normal']).values, 1))
df["fitted_line"] = f(np.arange(df.shape[0]))
df["fitted_line"].plot(ax=ax, lw=2, ls='--', alpha=.5, label="Eq_normal: " + f"{f}".strip())
else:
(df[aggregation]).rolling(kwargs.get("rolling_window", 1)).mean().plot(ax=ax, label=kwargs.get("label"))
# simple linear regression - WARNING: simplistic treatment of timeseries data
if reg:
f = np.poly1d(np.polyfit(range(df.shape[0]), (df[aggregation]).values, 1))
df["fitted_line"] = f(np.arange(df.shape[0]))
df["fitted_line"].plot(ax=ax, lw=2, ls='--', alpha=.5, label="Eq_normal: " + f"{f}".strip())
if kwargs.get("legend", False):
ax.legend()
ax.set_title(kwargs.get("title", f"{aggregation.upper()} - {frequency}"), size=kwargs.get("title_size", 14))
ax.set_xlabel(kwargs.get("xlabel",""))
return ax
|
ba4764f91a37f4b88f63e52d7aa02660d8296d11
| 3,646,742
|
import time
def generate_token(public_id):
"""
Simple token generator returning encoded JWT
:param public_id: unique string user identification
:return JWT: authorization token for given public_id
"""
# if User.query.filter_by(public_id=public_id).one_or_none() is None:
# return jsonify(404, "ID unverified")
# else:
timestamp = int(time.time())
payload = {
"iss": JWT_ISSUER,
"iat": int(timestamp),
"exp": int(timestamp + JWT_LIFETIME_SECONDS),
"sub": str(public_id),
}
return jwt.encode(payload, JWT_SECRET, algorithm=JWT_ALGORITHM)
|
88bbaabfeb8ba666daf532cae22f7486349a9a9d
| 3,646,743
|
import logging
def get_user_permission_all_url_list(user_id):
"""
获取用户全部权限 url list
:param user_id: 用户id
:return:
"""
logging.info('get_user_permission_all_url_list')
try:
user_permission_list = db_get_user_permission_all_list(user_id)
permission_url_list = list()
for user_permission in user_permission_list:
permission_url_list.append(user_permission.permission_url)
return permission_url_list
except Exception as e:
logging.debug(e)
raise e
|
7ad115dea2d791a46fa10b5ff8553a8485c8167f
| 3,646,744
|
def plot_predicted_data(training_actual_df, predicted_df, date_col, actual_col,
pred_col=PredictionKeys.PREDICTION.value, prediction_percentiles=None,
title="", test_actual_df=None, is_visible=True,
figsize=None, path=None, fontsize=None,
line_plot=False, markersize=70, lw=2, linestyle='-'):
"""
plot training actual response together with predicted data; if actual response of predicted
data is there, plot it too.
Parameters
----------
training_actual_df : pd.DataFrame
training actual response data frame. two columns required: actual_col and date_col
predicted_df : pd.DataFrame
predicted data response data frame. two columns required: actual_col and pred_col. If
user provide prediction_percentiles, it needs to include them as well in such
`prediction_{x}` where x is the correspondent percentiles
prediction_percentiles : list
list of two elements indicates the lower and upper percentiles
date_col : str
the date column name
actual_col : str
pred_col : str
title : str
title of the plot
test_actual_df : pd.DataFrame
test actual response dataframe. two columns required: actual_col and date_col
is_visible : boolean
whether we want to show the plot. If called from unittest, is_visible might = False.
figsize : tuple
figsize pass through to `matplotlib.pyplot.figure()`
path : str
path to save the figure
fontsize : int; optional
fontsize of the title
line_plot : bool; default False
if True, make line plot for observations; otherwise, make scatter plot for observations
markersize : int; optional
point marker size
lw : int; optional
out-of-sample prediction line width
linestyle : str
linestyle of prediction plot
Returns
-------
matplotlib axes object
"""
if is_empty_dataframe(training_actual_df) or is_empty_dataframe(predicted_df):
raise ValueError("No prediction data or training response to plot.")
if not is_ordered_datetime(predicted_df[date_col]):
raise ValueError("Prediction df dates is not ordered.")
plot_confid = False
if prediction_percentiles is None:
_pred_percentiles = [5, 95]
else:
_pred_percentiles = prediction_percentiles
if len(_pred_percentiles) != 2:
raise ValueError("prediction_percentiles has to be None or a list with length=2.")
confid_cols = ['prediction_{}'.format(_pred_percentiles[0]),
'prediction_{}'.format(_pred_percentiles[1])]
if set(confid_cols).issubset(predicted_df.columns):
plot_confid = True
if not figsize:
figsize = (16, 8)
if not fontsize:
fontsize = 16
_training_actual_df = training_actual_df.copy()
_predicted_df = predicted_df.copy()
_training_actual_df[date_col] = pd.to_datetime(_training_actual_df[date_col])
_predicted_df[date_col] = pd.to_datetime(_predicted_df[date_col])
fig, ax = plt.subplots(facecolor='w', figsize=figsize)
if line_plot:
ax.plot(_training_actual_df[date_col].values,
_training_actual_df[actual_col].values,
marker=None, color='black', lw=lw, label='train response', linestyle=linestyle)
else:
ax.scatter(_training_actual_df[date_col].values,
_training_actual_df[actual_col].values,
marker='.', color='black', alpha=0.8, s=markersize,
label='train response')
ax.plot(_predicted_df[date_col].values,
_predicted_df[pred_col].values,
marker=None, color='#12939A', lw=lw, label=PredictionKeys.PREDICTION.value, linestyle=linestyle)
# vertical line separate training and prediction
if _training_actual_df[date_col].values[-1] < _predicted_df[date_col].values[-1]:
ax.axvline(x=_training_actual_df[date_col].values[-1], color='#1f77b4', linestyle='--')
if test_actual_df is not None:
test_actual_df = test_actual_df.copy()
test_actual_df[date_col] = pd.to_datetime(test_actual_df[date_col])
if line_plot:
ax.plot(test_actual_df[date_col].values,
test_actual_df[actual_col].values,
marker=None, color='#FF8C00', lw=lw, label='train response', linestyle=linestyle)
else:
ax.scatter(test_actual_df[date_col].values,
test_actual_df[actual_col].values,
marker='.', color='#FF8C00', alpha=0.8, s=markersize,
label='test response')
# prediction intervals
if plot_confid:
ax.fill_between(_predicted_df[date_col].values,
_predicted_df[confid_cols[0]],
_predicted_df[confid_cols[1]],
facecolor='#42999E', alpha=0.5)
ax.set_title(title, fontsize=fontsize)
ax.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.5)
ax.legend()
if path:
fig.savefig(path)
if is_visible:
plt.show()
else:
plt.close()
return ax
|
36e0fe88c664df1b93a7d96f217d4d2c94b96ad2
| 3,646,745
|
def CheckTreeIsOpen(input_api, output_api, url, closed, url_text):
"""Similar to the one in presubmit_canned_checks except it shows an helpful
status text instead.
"""
assert(input_api.is_committing)
try:
connection = input_api.urllib2.urlopen(url)
status = connection.read()
connection.close()
if input_api.re.match(closed, status):
long_text = status + '\n' + url
try:
connection = input_api.urllib2.urlopen(url_text)
text = connection.read()
connection.close()
match = input_api.re.search(r"\<div class\=\"Notice\"\>(.*)\<\/div\>",
text)
if match:
long_text = match.group(1).strip()
except IOError:
pass
return [output_api.PresubmitPromptWarning("The tree is closed.",
long_text=long_text)]
except IOError:
pass
return []
|
540dd0ceb9c305907b0439b678a6444ca24c3f76
| 3,646,746
|
def tnr_ecma_st(signal, fs, prominence=True):
"""Computation of tone-to-noise ration according to ECMA-74, annex D.9
for a stationary signal.
The T-TNR value is calculated according to ECMA-TR/108
Parameters
----------
signal :numpy.array
A stationary signal in [Pa].
fs : integer
Sampling frequency.
prominence : boolean
If True, the algorithm only returns the prominent tones, if False it returns all tones detected.
Default is True.
Output
------
t_tnr : array of float
global TNR value, along time if is_stationary = False
tnr : array of float
TNR values for each detected tone
promi : array of bool
prominence criterion for each detected tone
tones_freqs : array of float
frequency of the detected tones
"""
# Compute db spectrum
spectrum_db, freq_axis = spectrum(signal, fs, db=True)
# Compute tnr values
tones_freqs, tnr, prom, t_tnr = _tnr_main_calc(spectrum_db, freq_axis)
prom = prom.astype(bool)
if prominence == False:
return t_tnr, tnr, prom, tones_freqs
else:
return t_tnr, tnr[prom], prom[prom], tones_freqs[prom]
|
4dcb740899de5a9411fddb8ed41b0b1628a438f4
| 3,646,747
|
def pop_legacy_palette(kwds, *color_defaults):
"""
Older animations in BPA and other areas use all sorts of different names for
what we are now representing with palettes.
This function mutates a kwds dictionary to remove these legacy fields and
extract a palette from it, which it returns.
"""
palette = kwds.pop('palette', None)
if palette:
legacy = [k for k, _ in color_defaults if k in kwds]
if legacy:
raise ValueError('Cannot set palette and ' + ', '.join(legacy))
return palette
values = [kwds.pop(k, v) for k, v in color_defaults]
if values and color_defaults[0][0] in ('colors', 'palette'):
values = values[0]
return make.colors(values or None)
|
438ff6bb0f1300c614c724535d2215b2419fbb84
| 3,646,748
|
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T).
Parameters
----------
X : array-like
First matrix
Y : array-like
Second matrix
"""
return np.dot(X.ravel(), Y.ravel())
|
9c8d601144507cdbfb2d738af61a0016ea808d4a
| 3,646,749
|
import gc
from datetime import datetime
async def handle_waste_view(ack, body, client, view):
"""Process input from waste form"""
logger.info("Processing waste input...")
logger.info(body)
raw_leaders = view['state']['values']['input_a']['leader_names']['selected_options']
leader_list = [" - " + n['value'] for n in raw_leaders]
regulars = float(view['state']['values']['input_b']['regulars']['value'])
spicy = float(view['state']['values']['input_c']['spicy']['value'])
nuggets = float(view['state']['values']['input_d']['nuggets']['value'])
strips = float(view['state']['values']['input_e']['strips']['value'])
g_filets = float(view['state']['values']['input_f']['grilled1']['value'])
g_nuggets = float(view['state']['values']['input_g']['grilled2']['value'])
# Check that input is numeric when it needs to be
chicken_list = [regulars, spicy, nuggets, strips, g_filets, g_nuggets]
# for item in chicken_list:
# if not isinstance(item, float):
# payload = {
# "response_action": "errors",
# "errors": {
# "block_id": "error_message"
# }
# }
# Store data
total_weight = sum(chicken_list)
sh = gc.open_by_key(creds.waste_id)
goal_sheet = sh.worksheet("Goals")
goal_values = goal_sheet.get_all_values()
goals = {}
for row in goal_values:
if row[0] == "Type":
continue
goals[row[0]] = float(row[1])
user = await client.users_info(user=body['user']['id'])
user_name = user['user']['real_name']
new_line = "\n"
block1 = {
"type": "section",
"text": {"type": "mrkdwn", "text": f"*Submitted by:* {user_name}"}
}
block2 = {
"type": "section",
"text": {"type": "mrkdwn",
"text": (f"*Leaders on:*\n"
f"{new_line.join(leader_list)}\n")
}
}
block3_text = "*Weights:*\n"
if total_weight > 0:
if regulars:
if regulars >= goals['Filets']:
block3_text += f"_Regulars: {regulars} lbs._\n"
else:
block3_text += f"Regulars: {regulars} lbs.\n"
if spicy:
if spicy >= goals['Spicy']:
block3_text += f"_Spicy: {spicy} lbs._\n"
else:
block3_text += f"Spicy: {spicy} lbs.\n"
if nuggets:
if nuggets >= goals['Nuggets']:
block3_text += f"_Nuggets: {nuggets} lbs._\n"
else:
block3_text += f"Nuggets: {nuggets} lbs.\n"
if strips:
if strips >= goals['Strips']:
block3_text += f"_Strips: {strips} lbs._\n"
else:
block3_text += f"Strips: {strips} lbs.\n"
if g_filets:
if g_filets >= goals['Grilled Filets']:
block3_text += f"_Grilled Filets: {g_filets} lbs._\n"
else:
block3_text += f"Grilled Filets: {g_filets} lbs.\n"
if g_nuggets:
if g_nuggets >= goals['Grilled Nuggets']:
block3_text += f"_Grilled Nuggets: {g_nuggets} lbs._\n"
else:
block3_text += f"Grilled Nuggets: {g_nuggets} lbs.\n"
to_post = [str(datetime.now()), regulars, spicy, nuggets, strips, g_filets, g_nuggets]
# Handle breakfast items
if datetime.now().hour < 13:
breakfast = float(view['state']['values']['input_h']['breakfast']['value'])
to_post.append(breakfast)
g_breakfast = float(view['state']['values']['input_i']['grilled3']['value'])
to_post.append(g_breakfast)
if sum([breakfast, g_breakfast]) > 0:
total_weight += sum([breakfast, g_breakfast])
if breakfast:
if breakfast >= goals['Breakfast Filets']:
block3_text += f"_Breakfast Filets: {breakfast} lbs._\n"
else:
block3_text += f"Breakfast Filets: {breakfast} lbs.\n"
if g_breakfast:
if g_breakfast >= goals['Grilled Breakfast']:
block3_text += f"_Grilled Breakfast: {g_breakfast} lbs._\n"
else:
block3_text += f"Grilled Breakfast: {g_breakfast} lbs.\n"
block3 = {
"type": "section",
"text": {"type": "mrkdwn", "text": block3_text}
}
blocks = [block1, block2, block3]
other = view['state']['values']['input_j']['other']['value']
if other:
block4 = {
"type": "section",
"text": {"type": "mrkdwn", "text": f"*Notes:*\n{other}"}
}
blocks.append(block4)
block5 = {
"type": "section",
"text": {"type": "mrkdwn", "text": "Please remember to replace stickers on all waste containers."}
}
blocks.append(block5)
await ack()
# Send data to Google Sheet
try:
sheet = sh.worksheet("Data")
sheet.append_row(to_post, value_input_option='USER_ENTERED')
except gspread.exceptions.GSpreadException as e:
return await client.chat_postMessage(channel=body['user']['id'],
text=e)
except Exception as e:
await client.chat_postMessage(channel=body['user']['id'],
text=f"There was an error while storing the message to the Google Sheet.\n{e}")
await client.chat_postMessage(channel=creds.pj_user_id,
text=f"There was an error while storing the message to the Google Sheet.\n{e}")
return
await client.chat_postMessage(channel=creds.boh_channel,
blocks=blocks,
text="New waste report posted.")
|
101b85b947cc148176f2f4d067cb73c0386b56cd
| 3,646,750
|
def random_superposition(dim: int) -> np.ndarray:
"""
Args:
dim: Specified size returns a 2^dim length array.
Returns:
Normalized random array.
"""
state_vector = np.random.standard_normal(dim).astype(complex)
state_vector += 1j * np.random.normal(dim)
state_vector /= np.linalg.norm(state_vector)
return state_vector
|
f33867247a64c09571fcfc29c10e45e8e9921271
| 3,646,751
|
def predict(dag_model: Dag, test_data: Tensor) -> MultitaskMultivariateNormal:
"""
Can use this little helper function to predict from a Dag without
wrapping it in a DagGPyTorchModel.
"""
dag_model.eval()
with no_grad(), fast_pred_var():
return dag_model(test_data)
|
d4c0e2d48e2edf3f4e2b07b26024d92390efe4dc
| 3,646,752
|
def edit_skill():
"""Edit a skill entry in the skills table for a certain user. """
id = request.form['id']
skill_level = request.form['skill_level']
skills.update({'skill_level': skill_level}, id=id)
return good_json_response('success')
|
9b314aa51f990e1bbbd6bf75874e410e937fa595
| 3,646,753
|
def is_catalogue_link(link):
"""check whether the specified link points to a catalogue"""
return link['type'] == 'application/atom+xml' and 'rel' not in link
|
bc6e2e7f5c34f6ea198036cf1404fef8f7e7b214
| 3,646,754
|
def morlet_window(width: int, sigma: float) -> np.ndarray:
"""
Unadjusted Morlet window function.
Parameters
----------
width : integer (positive power of 2)
Window width to use - power of two as window of two corresponds to Nyquist rate.
sigma : float
Corresponds to the frequency of the frequency of the wavelet.
Returns
-------
output : real ndarray
Normalised Morlet wavelet vector.
Notes
-----
https://en.wikipedia.org/wiki/Morlet_wavelet
"""
# fixed width wavelet translates to a fixed width Fourier transformed wavelet in frequency spectrum
# Definition - https://en.wikipedia.org/wiki/Morlet_wavelet
c_pi = (1 + np.exp(- sigma ** 2) - 2 * np.exp(- 0.75 * sigma ** 2)) ** (-1 / 2)
t = (np.arange(width + 1) - (width / 2)) * (10 / width)
wavelet = c_pi * (np.pi ** (-1 / 4)) * (np.exp(1j * sigma * t) - np.exp(- (1 / 2) * sigma ** 2))
output = np.exp(- (1 / 2) * t ** 2) * wavelet.real
return output
|
2f0d6ff644a078b50bb510835a9bb2d2028ea143
| 3,646,755
|
def tfidfvec():
"""
中文特征值化
:return: None
"""
c1, c2, c3 = cutword()
print(c1, c2, c3)
tf = TfidfVectorizer()
data = tf.fit_transform([c1, c2, c3])
print(tf.get_feature_names())
print(data.toarray())
return None
|
1a0fe0e4a28e6d963f49156476ed720df492718b
| 3,646,756
|
import http
def resolve_guid(guid, suffix=None):
"""Resolve GUID to corresponding URL and return result of appropriate
view function. This effectively yields a redirect without changing the
displayed URL of the page.
:param guid: GUID value (not the object)
:param suffix: String to append to GUID route
:return: Werkzeug response
"""
# Get prefix; handles API routes
prefix = request.path.split(guid)[0].rstrip('/')
# Look up GUID
guid_object = Guid.load(guid)
if guid_object:
# verify that the object is a GuidStoredObject descendant. If a model
# was once a descendant but that relationship has changed, it's
# possible to have referents that are instances of classes that don't
# have a redirect_mode attribute or otherwise don't behave as
# expected.
if not isinstance(guid_object.referent, GuidStoredObject):
sentry.log_message(
'Guid `{}` resolved to non-guid object'.format(guid)
)
raise HTTPError(http.NOT_FOUND)
referent = guid_object.referent
if referent is None:
logger.error('Referent of GUID {0} not found'.format(guid))
raise HTTPError(http.NOT_FOUND)
mode = referent.redirect_mode
if mode is None:
raise HTTPError(http.NOT_FOUND)
url = referent.deep_url if mode == 'proxy' else referent.url
url = _build_guid_url(url, prefix, suffix)
# Always redirect API URLs; URL should identify endpoint being called
if prefix or mode == 'redirect':
if request.query_string:
url += '?' + request.query_string
return redirect(url)
return proxy_url(url)
# GUID not found; try lower-cased and redirect if exists
guid_object_lower = Guid.load(guid.lower())
if guid_object_lower:
return redirect(
_build_guid_url(
guid.lower(), prefix, suffix
)
)
# GUID not found
raise HTTPError(http.NOT_FOUND)
|
85a422f1c5709a44050595be4bf91ce1b937c0ef
| 3,646,757
|
from typing import Any
def _is_array(obj: Any) -> bool:
"""Whether the object is a numpy array."""
return isinstance(obj, np.ndarray)
|
4852e045fcef142c23a9370efbe3b5ffe7a9f8a3
| 3,646,758
|
def has_ao_1e_int_overlap(trexio_file) -> bool:
"""Check that ao_1e_int_overlap variable exists in the TREXIO file.
Parameter is a ~TREXIO File~ object that has been created by a call to ~open~ function.
Returns:
True if the variable exists, False otherwise
Raises:
- Exception from trexio.Error class if TREXIO return code ~rc~ is TREXIO_FAILURE and prints the error message using string_of_error.
- Exception from some other error (e.g. RuntimeError).
"""
try:
rc = pytr.trexio_has_ao_1e_int_overlap(trexio_file.pytrexio_s)
if rc == TREXIO_FAILURE:
raise Error(rc)
except:
raise
if rc == TREXIO_SUCCESS:
return True
else:
return False
|
53b5ec13ff7c32af5692972d586921a4d5bc07ef
| 3,646,759
|
from typing import Sequence
from typing import Set
async def get_non_existent_ids(collection, id_list: Sequence[str]) -> Set[str]:
"""
Return the IDs that are in `id_list`, but don't exist in the specified `collection`.
:param collection: the database collection to check
:param id_list: a list of document IDs to check for existence
:return: a list of non-existent IDs
"""
existing_group_ids = await collection.distinct("_id", {"_id": {"$in": id_list}})
return set(id_list) - set(existing_group_ids)
|
b13c61f4528c36a9d78a3687ce84c39158399142
| 3,646,760
|
def create_source_fc(header):
"""
Creates :class:`parser.file_configuration_t` instance, configured to
contain path to C++ source file
:param header: path to C++ source file
:type header: str
:rtype: :class:`parser.file_configuration_t`
"""
return file_configuration_t(
data=header,
content_type=file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE)
|
29cd1112b9f59091b286f5222e62e9bec309bd36
| 3,646,761
|
def StorageFlatten(cache_line_size, create_bound_attribute=False):
"""Flatten the multi-dimensional read/write to 1D.
Parameters
----------
cache_line_size: int
The size of CPU cache line.
create_bound_attribute:
Whether to create bound attributes.
Returns
-------
fpass : tvm.transform.Pass
The result pass
"""
return _ffi_api.StorageFlatten(cache_line_size, create_bound_attribute)
|
cb535450d3a503632f66e015555148d211f3e6f9
| 3,646,762
|
def wrap(node):
"""Stringify the parse tree node and wrap it in parentheses if it might be
ambiguous.
"""
if isinstance(node, (IntNode, CallNode, SymbolNode)):
return str(node)
else:
return "(" + str(node) + ")"
|
9ac5d9a7d5e6d6539231ba6897a44e2787d92809
| 3,646,763
|
def _ParseProjectNameMatch(project_name):
"""Process the passed project name and determine the best representation.
Args:
project_name: a string with the project name matched in a regex
Returns:
A minimal representation of the project name, None if no valid content.
"""
if not project_name:
return None
return project_name.lstrip().rstrip('#: \t\n')
|
cb9f92a26c7157a5125fbdb5dd8badd7ffd23055
| 3,646,764
|
from typing import OrderedDict
def assign_variables(assignment_expressions, df, locals_dict, df_alias=None, trace_rows=None):
"""
Evaluate a set of variable expressions from a spec in the context
of a given data table.
Expressions are evaluated using Python's eval function.
Python expressions have access to variables in locals_d (and df being
accessible as variable df.) They also have access to previously assigned
targets as the assigned target name.
lowercase variables starting with underscore are temp variables (e.g. _local_var)
and not returned except in trace_results
uppercase variables starting with underscore are temp scalar variables (e.g. _LOCAL_SCALAR)
and not returned except in trace_assigned_locals
This is useful for defining general purpose local constants in expression file
Users should take care that expressions (other than temp scalar variables) should result in
a Pandas Series (scalars will be automatically promoted to series.)
Parameters
----------
assignment_expressions : pandas.DataFrame of target assignment expressions
target: target column names
expression: pandas or python expression to evaluate
df : pandas.DataFrame
locals_d : Dict
This is a dictionary of local variables that will be the environment
for an evaluation of "python" expression.
trace_rows: series or array of bools to use as mask to select target rows to trace
Returns
-------
variables : pandas.DataFrame
Will have the index of `df` and columns named by target and containing
the result of evaluating expression
trace_df : pandas.DataFrame or None
a dataframe containing the eval result values for each assignment expression
"""
np_logger = NumpyLogger(logger)
def is_throwaway(target):
return target == '_'
def is_temp_scalar(target):
return target.startswith('_') and target.isupper()
def is_temp(target):
return target.startswith('_')
def to_series(x):
if x is None or np.isscalar(x):
return pd.Series([x] * len(df.index), index=df.index)
return x
assert assignment_expressions.shape[0] > 0
trace_assigned_locals = trace_results = None
if trace_rows is not None:
# convert to numpy array so we can slice ndarrays as well as series
trace_rows = np.asanyarray(trace_rows)
if trace_rows.any():
trace_results = OrderedDict()
trace_assigned_locals = OrderedDict()
# avoid touching caller's passed-in locals_d parameter (they may be looping)
_locals_dict = local_utilities()
if locals_dict is not None:
_locals_dict.update(locals_dict)
if df_alias:
_locals_dict[df_alias] = df
else:
_locals_dict['df'] = df
local_keys = list(_locals_dict.keys())
# build a dataframe of eval results for non-temp targets
# since we allow targets to be recycled, we want to only keep the last usage
variables = OrderedDict()
# need to be able to identify which variables causes an error, which keeps
# this from being expressed more parsimoniously
for e in zip(assignment_expressions.target, assignment_expressions.expression):
target, expression = e
assert isinstance(target, str), \
"expected target '%s' for expression '%s' to be string not %s" % \
(target, expression, type(target))
if target in local_keys:
logger.warning("assign_variables target obscures local_d name '%s'", str(target))
if is_temp_scalar(target) or is_throwaway(target):
try:
x = eval(expression, globals(), _locals_dict)
except Exception as err:
logger.error("assign_variables error: %s: %s", type(err).__name__, str(err))
logger.error("assign_variables expression: %s = %s", str(target), str(expression))
raise err
if not is_throwaway(target):
_locals_dict[target] = x
if trace_assigned_locals is not None:
trace_assigned_locals[uniquify_key(trace_assigned_locals, target)] = x
continue
try:
# FIXME - log any numpy warnings/errors but don't raise
np_logger.target = str(target)
np_logger.expression = str(expression)
saved_handler = np.seterrcall(np_logger)
save_err = np.seterr(all='log')
# FIXME should whitelist globals for security?
globals_dict = {}
expr_values = to_series(eval(expression, globals_dict, _locals_dict))
np.seterr(**save_err)
np.seterrcall(saved_handler)
# except Exception as err:
# logger.error("assign_variables error: %s: %s", type(err).__name__, str(err))
# logger.error("assign_variables expression: %s = %s", str(target), str(expression))
# raise err
except Exception as err:
logger.exception(f"assign_variables - {type(err).__name__} ({str(err)}) evaluating: {str(expression)}")
raise err
if not is_temp(target):
variables[target] = expr_values
if trace_results is not None:
trace_results[uniquify_key(trace_results, target)] = expr_values[trace_rows]
# update locals to allows us to ref previously assigned targets
_locals_dict[target] = expr_values
if trace_results is not None:
trace_results = pd.DataFrame.from_dict(trace_results)
trace_results.index = df[trace_rows].index
# add df columns to trace_results
trace_results = pd.concat([df[trace_rows], trace_results], axis=1)
assert variables, "No non-temp variables were assigned."
# we stored result in dict - convert to df
variables = util.df_from_dict(variables, index=df.index)
return variables, trace_results, trace_assigned_locals
|
b8e28bae70eaa56b49537011d3fd3079f640ab76
| 3,646,765
|
import io
import zipfile
def getCharts(dmldata: bytearray) -> list:
"""Get DrawingML object from clipboard"""
stream = io.BytesIO(dmldata)
with zipfile.ZipFile(stream, "r") as z:
with z.open("[Content_Types].xml") as f:
tree = ET.fromstring(f.read())
part_names = []
for link in tree.findall(Override):
content_type = link.attrib["ContentType"]
if content_type == ChartType:
part_name = link.attrib["PartName"]
part_names.append(part_name)
charts = []
for part_name in part_names:
with io.TextIOWrapper(z.open(part_name.strip("/"), "r"), encoding='utf-8') as f:
xmltext = f.read()
chartfile = ChartFile(xmltext)
charts.append(chartfile.chart)
return charts
|
402bf7e45be03ccda31563c2fc10afe2d4d09077
| 3,646,766
|
def explore_validation_time_gap_threshold_segments(participant_list, time_gap_list = [100, 200, 300, 400, 500, 1000, 2000], prune_length = None,
auto_partition_low_quality_segments = False):
"""Explores different threshiold values for the invalid time gaps in the Segments for all Participants in the list
"""
seglen = 0
segs = 0
participants = []
for p in participant_list:
print("pid:", p.pid)
if p.require_valid_segments == True:
raise Exception("explore_validation_threshold_segments should be called with a list of Participants with require_valid_segments = False")
tvalidity = []
for seg in p.segments:
seglen += seg.completion_time
segs += len(p.segments)
for tresh in time_gap_list: ##time-gap
invc = 0
invsegs=[]
for seg in p.segments:
if seg.calc_validity2(tresh) == False:
invc +=1
if len(invsegs)>0:
print("seg:",invsegs)
tvalidity.append((tresh, invc))
participants.append( (p.pid,tvalidity, len(p.segments) ) )
print ( (tvalidity, len(p.segments)) )
print("average seg len",seglen/float(segs))
return participants
|
bd88f292a00986212ae36e383c4bb4e3cd94067c
| 3,646,767
|
def convolve_design(X, hrf, opt=None):
"""convolve each column of a 2d design matrix with hrf
Args:
X ([2D design matrix]): time by cond, or list of onsets
hrf ([1D hrf function]): hrf
opt: if onset case, provides n_times and tr for
interpolation
Returns:
[convdes]: 2D: Samples by cond
"""
# if onset-time case
if type(X) is list:
errmsg = 'n_times needs to be in opt'
np.testing.assert_equal(
'n_times' in opt,
True,
err_msg=errmsg)
n_times = opt['n_times']
tr = opt['tr']
# calc
n_conditions = len(X)
convdes = np.zeros((n_times, n_conditions))
all_times = np.linspace(0, tr*(n_times-1), n_times)
hrf_times = np.linspace(0, tr*(len(hrf)-1), len(hrf))
for q in range(n_conditions):
# onset times for qth condition in run p
otimes = X[q]
# intialize
yvals = np.zeros((n_times))
# loop over onset times
for r in otimes:
# interpolate to find values at the
# data sampling time points
f = pchip(
r + hrf_times,
hrf,
extrapolate=False)(all_times)
yvals = yvals + np.nan_to_num(f)
# record
convdes[:, q] = yvals
# normal vector or matrix cases
else:
ndims = X.ndim
if ndims == 1:
ntime = X.shape[0]
convdes = np.convolve(X, hrf)
convdes = convdes[range(ntime)]
else:
ntime, ncond = X.shape
convdes = np.asarray(
[np.convolve(X[:, x], hrf, ) for x in range(ncond)]).T
convdes = convdes[range(ntime), :]
return convdes
|
02f2473ff18a78759c87884cd0f7fc94db6e0e2d
| 3,646,768
|
from typing import List
def relax_incr_dimensions(iet, **kwargs):
"""
Recast Iterations over IncrDimensions as ElementalFunctions; insert
ElementalCalls to iterate over the "main" and "remainder" regions induced
by the IncrDimensions.
"""
sregistry = kwargs['sregistry']
efuncs = []
mapper = {}
for tree in retrieve_iteration_tree(iet):
iterations = [i for i in tree if i.dim.is_Incr]
if not iterations:
continue
root = iterations[0]
if root in mapper:
continue
outer, inner = split(iterations, lambda i: not i.dim.parent.is_Incr)
# Compute the iteration ranges
ranges = []
for i in outer:
maxb = i.symbolic_max - (i.symbolic_size % i.dim.step)
ranges.append(((i.symbolic_min, maxb, i.dim.step),
(maxb + 1, i.symbolic_max, i.symbolic_max - maxb)))
# Remove any offsets
# E.g., `x = x_m + 2 to x_M - 2` --> `x = x_m to x_M`
outer = [i._rebuild(limits=(i.dim.root.symbolic_min, i.dim.root.symbolic_max,
i.step))
for i in outer]
# Create the ElementalFunction
name = sregistry.make_name(prefix="bf")
body = compose_nodes(outer)
dynamic_parameters = flatten((i.symbolic_bounds, i.step) for i in outer)
dynamic_parameters.extend([i.step for i in inner if not is_integer(i.step)])
efunc = make_efunc(name, body, dynamic_parameters)
efuncs.append(efunc)
# Create the ElementalCalls
calls = []
for p in product(*ranges):
dynamic_args_mapper = {}
for i, (m, M, b) in zip(outer, p):
dynamic_args_mapper[i.symbolic_min] = m
dynamic_args_mapper[i.symbolic_max] = M
dynamic_args_mapper[i.step] = b
for j in inner:
if j.dim.root is i.dim.root and not is_integer(j.step):
value = j.step if b is i.step else b
dynamic_args_mapper[j.step] = (value,)
calls.append(efunc.make_call(dynamic_args_mapper))
mapper[root] = List(body=calls)
iet = Transformer(mapper).visit(iet)
return iet, {'efuncs': efuncs}
|
0d7af62309d427c6477329ed6b7a5dccab390a51
| 3,646,769
|
def _get_lspci_name(line):
"""Reads and returns a 'name' from a line of `lspci` output."""
hush = line.split('[')
return '['.join(hush[0:-1]).strip()
|
92910d0f4d9dce1689ed22a963932fb85d8e2677
| 3,646,770
|
def dumps_bytes(obj):
"""
Serialize ``obj`` to JSON formatted ``bytes``.
"""
b = dumps(obj)
if isinstance(b, unicode):
b = b.encode("ascii")
return b
|
5ee94b2bd5a8bcd2f8586578e6d86084a030d93a
| 3,646,771
|
def get_child_right_position(position: int) -> int:
"""
heap helper function get the position of the right child of the current node
>>> get_child_right_position(0)
2
"""
return (2 * position) + 2
|
2a5128a89ac35fe846d296d6b92c608e50b80a45
| 3,646,772
|
from typing import List
from typing import Dict
def convert_paragraphs_to_s2orc(paragraphs: List, old_to_new: Dict) -> List[Dict]:
"""
Convert paragraphs into S2ORC format
"""
# TODO: temp code to process body text into S2ORC format. this includes getting rid of sub/superscript spans.
# also combining fig & table spans into ref spans.
# also remapping the reference / bib labels to the new ones defined earlier in this function.
# temporarily, we cant support PMC xml parse bibs, so remove all links to the bibliography (cuz they'll be wrong)
for paragraph_blob in paragraphs:
del paragraph_blob['sup_spans']
del paragraph_blob['sub_spans']
paragraph_blob['ref_spans'] = []
for fig_tab_span in paragraph_blob['fig_spans'] + paragraph_blob['table_spans']:
# replace old ref_id with new ref_id. default to None if null
# optional, just wanted to check if this ever happens
assert fig_tab_span['ref_id']
fig_tab_span['ref_id'] = old_to_new.get(fig_tab_span['ref_id'])
paragraph_blob['ref_spans'].append(fig_tab_span)
del paragraph_blob['fig_spans']
del paragraph_blob['table_spans']
for cite_span in paragraph_blob['cite_spans']:
# replace old cite ids with new cite ids. again default to None if null
# optional, just wanted to check if this ever happens
assert cite_span['ref_id']
cite_span['ref_id'] = old_to_new.get(cite_span['ref_id'])
return paragraphs
|
922f6a238f63039e68b1a14342974d6b3d47ba8f
| 3,646,773
|
def get_feature_set_details(shape_file_path):
""" This function gets the shape type of the shapefile and make a list
of fields to be added to output summary table based on that shape type """
try:
# Checking for geometry type
feat_desc = arcpy.Describe(shape_file_path)
arcpy.AddMessage(("Shapefile is of '{0}' type.")
.format(str(feat_desc.shapeType)))
# According to shape type kame a list of fields to be added to
# summary table
list_of_fields = ["summaryfield", "summaryvalue"]
if feat_desc.shapeType.upper() == "POLYGON":
list_of_fields += ["area_acres", "area_sqkm"]
elif feat_desc.shapeType.upper() == "POLYLINE":
list_of_fields += ["length_Miles", "length_Km"]
elif feat_desc.shapeType.upper() == "POINT":
list_of_fields += ["Count"]
return [feat_desc.shapeType, list_of_fields]
except Exception as error:
arcpy.AddError("Error occurred during execution:" + str(error))
|
9c4eddd9963751d195f4165ecc862d2753bb2067
| 3,646,774
|
def get_label_parts(label):
"""returns the parts of an absolute label as a list"""
return label[2:].replace(":", "/").split("/")
|
44998aad262f04fdb4da9e7d96d2a2b3afb27502
| 3,646,775
|
import argparse
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-c',
'--cdhit',
help='Output file from CD-HIT (clustered proteins)',
metavar='str',
type=str,
required=True)
parser.add_argument(
'-p',
'--proteins',
help='Proteins FASTA',
metavar='str',
type=str,required=True)
parser.add_argument(
'-o',
'--outfile',
help='Output file',
metavar='str',
type=str,
default='unclustered.fa')
return parser.parse_args()
|
d961b0a81bd0e6db9687927c8b23f8cba5a3cfce
| 3,646,776
|
import pandas
import numpy
def combined_spID(*species_identifiers):
"""Return a single column unique species identifier
Creates a unique species identifier based on one or more columns of a
data frame that represent the unique species ID.
Args:
species_identifiers: A tuple containing one or pieces of a unique
species identifier or lists of these pieces.
Returns:
A single unique species identifier or a list of single identifiers
"""
# Make standard input data types capable of element wise summation
input_type = type(species_identifiers[0])
assert input_type in [list, tuple, str, pandas.core.series.Series, numpy.ndarray]
if input_type is not str:
species_identifiers = [pandas.Series(identifier) for identifier in species_identifiers]
single_identifier = species_identifiers[0]
if len(species_identifiers) > 1:
for identifier in species_identifiers[1:]:
single_identifier += identifier
if input_type == numpy.ndarray:
single_identifier = numpy.array(single_identifier)
else:
single_identifier = input_type(single_identifier)
return single_identifier
|
d50abeccc6fb0235fd8a58dfadbd7c6bdb72825d
| 3,646,777
|
import copy
def qr(A, prec=1e-10):
"""
computes a faster and economic qr decomposition similar to:
http://www.iaa.ncku.edu.tw/~dychiang/lab/program/mohr3d/source/Jama%5CQRDecomposition.html
"""
m = len(A)
if m <= 0:
return [], A
n = len(A[0])
Rdiag = [0] * n;
QR = copy.deepcopy(A)
for k in range(n):
# Compute 2-norm of k-th column without under/overflow.
nrm = 0.0
for i in range(k, m):
nrm = sqrt(nrm ** 2 + QR[i][k] ** 2)
if abs(nrm) > prec:
# Form k-th Householder vector.
if k < m and QR[k][k] < 0:
nrm = -nrm
for i in range(k, m):
QR[i][k] /= nrm
if k < m:
QR[k][k] += 1.0
# Apply transformation to remaining columns.
for j in range(k + 1, n):
s = 0.0
for i in range(k, m):
s += QR[i][k] * QR[i][j]
if k < m:
s = -s / QR[k][k]
for i in range(k, m):
QR[i][j] += s * QR[i][k]
Rdiag[k] = -nrm;
# compute R
R = [[0] * n for z in range(min(m, n))]
for i in range(m):
for j in range(i, n):
if i < j:
R[i][j] = QR[i][j]
if i == j:
R[i][i] = Rdiag[i]
# compute Q
w = min(m, n)
Q = [[0] * w for i in range(m)]
for k in range(w - 1, -1, -1):
if k < w:
Q[k][k] = 1.0;
for j in range(k, w):
if k < m and abs(QR[k][k]) > prec:
s = 0.0
for i in range(k, m):
s += QR[i][k] * Q[i][j]
s = -s / QR[k][k]
for i in range(k, m):
Q[i][j] += s * QR[i][k]
return Q, R
|
fe1ffa20b5ad44a76837bd816ca07c3388ebcb4d
| 3,646,778
|
def split_range(r, n):
"""
Computes the indices of segments after splitting a range of r values
into n segments.
Parameters
----------
r : int
Size of the range vector.
n : int
The number of splits.
Returns
-------
segments : list
The list of lists of first and last indices of segments.
Example
-------
>>> split_range(8, 2)
[[0, 4], [4, 8]]
"""
step = int(r / n)
segments = []
for i in range(n):
new_segment = [step * i, step * (i + 1)]
segments.append(new_segment)
# correct the gap in the missing index due to the truncated step
segments[-1][-1] = r
return segments
|
34f570933a5eb8772dc4b2e80936887280ff47a4
| 3,646,779
|
def is_connected_to_mongo():
"""
Make sure user is connected to mongo; returns True if connected, False otherwise.
Check below url to make sure you are looking for the right port.
"""
maxSevSelDelay = 1 # how long to spend looking for mongo
try: # make sure this address is running
url = "mongodb://127.0.0.1:27017" # standard mongo port
client = pymongo.MongoClient(url, serverSelectionTimeoutMS=maxSevSelDelay) # check the url for specified amt of time
client.admin.command("serverStatus") # connect via serverStatus (will not cause error if connected)
except pymongo.errors.ServerSelectionTimeoutError as err: # error if serverStatus does not go through
return False # not connected
return True
|
2bf28835ae192d41836f2335ce5c1e152b0e0838
| 3,646,780
|
def _fill_three_digit_hex_color_code(*, hex_color_code: str) -> str:
"""
Fill 3 digits hexadecimal color code until it becomes 6 digits.
Parameters
----------
hex_color_code : str
One digit hexadecimal color code (not including '#').
e.g., 'aaa', 'fff'
Returns
-------
filled_color_code : str
Result color code. e.g., 'aaaaaa', 'ffffff'
"""
filled_color_code: str = ''
for char in hex_color_code:
filled_color_code += char * 2
return filled_color_code
|
d91df947fcc5f0718bbd9b3b4f69f1ad68ebeff4
| 3,646,781
|
import re
def normalize(text: str, convert_digits=True) -> str:
"""
Summary:
Arguments:
text [type:string]
Returns:
normalized text [type:string]
"""
# replacing all spaces,hyphens,... with white space
space_pattern = (
r"[\xad\ufeff\u200e\u200d\u200b\x7f\u202a\u2003\xa0\u206e\u200c\x9d]"
)
space_pattern = re.compile(space_pattern)
text = space_pattern.sub(" ", text)
# remove keshide,
text = re.sub(r"[ـ\r]", "", text)
# remove Aarab
text = re.sub(r"[\u064B\u064C\u064D\u064E\u064F\u0650\u0651\u0652]", "", text)
# replace arabic alphabets with equivalent persian alphabet
regex_list = [
(r"ء", r"ئ"),
(r"ﺁ|آ", r"آ"),
(r"ٲ|ٱ|إ|ﺍ|أ", r"ا"),
(r"ﺐ|ﺏ|ﺑ", r"ب"),
(r"ﭖ|ﭗ|ﭙ|ﺒ|ﭘ", r"پ"),
(r"ﭡ|ٺ|ٹ|ﭞ|ٿ|ټ|ﺕ|ﺗ|ﺖ|ﺘ", r"ت"),
(r"ﺙ|ﺛ", r"ث"),
(r"ﺝ|ڃ|ﺠ|ﺟ", r"ج"),
(r"ڃ|ﭽ|ﭼ", r"چ"),
(r"ﺢ|ﺤ|څ|ځ|ﺣ", r"ح"),
(r"ﺥ|ﺦ|ﺨ|ﺧ", r"خ"),
(r"ڏ|ډ|ﺪ|ﺩ", r"د"),
(r"ﺫ|ﺬ|ﻧ", r"ذ"),
(r"ڙ|ڗ|ڒ|ڑ|ڕ|ﺭ|ﺮ", r"ر"),
(r"ﺰ|ﺯ", r"ز"),
(r"ﮊ", r"ژ"),
(r"ݭ|ݜ|ﺱ|ﺲ|ښ|ﺴ|ﺳ", r"س"),
(r"ﺵ|ﺶ|ﺸ|ﺷ", r"ش"),
(r"ﺺ|ﺼ|ﺻ", r"ص"),
(r"ﺽ|ﺾ|ﺿ|ﻀ", r"ض"),
(r"ﻁ|ﻂ|ﻃ|ﻄ", r"ط"),
(r"ﻆ|ﻇ|ﻈ", r"ظ"),
(r"ڠ|ﻉ|ﻊ|ﻋ", r"ع"),
(r"ﻎ|ۼ|ﻍ|ﻐ|ﻏ", r"غ"),
(r"ﻒ|ﻑ|ﻔ|ﻓ", r"ف"),
(r"ﻕ|ڤ|ﻖ|ﻗ", r"ق"),
(r"ڭ|ﻚ|ﮎ|ﻜ|ﮏ|ګ|ﻛ|ﮑ|ﮐ|ڪ|ك", r"ک"),
(r"ﮚ|ﮒ|ﮓ|ﮕ|ﮔ", r"گ"),
(r"ﻝ|ﻞ|ﻠ|ڵ", r"ل"),
(r"ﻡ|ﻤ|ﻢ|ﻣ", r"م"),
(r"ڼ|ﻦ|ﻥ|ﻨ", r"ن"),
(r"ވ|ﯙ|ۈ|ۋ|ﺆ|ۊ|ۇ|ۏ|ۅ|ۉ|ﻭ|ﻮ|ؤ", r"و"),
(r"ﺔ|ﻬ|ھ|ﻩ|ﻫ|ﻪ|ۀ|ە|ة|ہ", r"ه"),
(r"ﭛ|ﻯ|ۍ|ﻰ|ﻱ|ﻲ|ں|ﻳ|ﻴ|ﯼ|ې|ﯽ|ﯾ|ﯿ|ێ|ے|ى|ي", r"ی"),
(r"¬", r""),
(r"•|·|●|·|・|∙|。|ⴰ", r"."),
(r",|٬|٫|‚|,", r"،"),
(r"ʕ|\?", r"؟"),
(r"|ِ||ُ||َ||ٍ||ٌ||ً", r""),
]
for pattern, replac in regex_list:
text = re.sub(pattern, replac, text)
# replace arabic and english digits with equivalent persian digits
num_dict = dict()
if convert_digits:
num_dict[u"0"] = u"۰"
num_dict[u"1"] = u"۱"
num_dict[u"2"] = u"۲"
num_dict[u"3"] = u"۳"
num_dict[u"4"] = u"۴"
num_dict[u"5"] = u"۵"
num_dict[u"6"] = u"۶"
num_dict[u"7"] = u"۷"
num_dict[u"8"] = u"۸"
num_dict[u"9"] = u"۹"
num_dict[u"%"] = u"٪"
num_dict[u"٠"] = u"۰"
num_dict[u"١"] = u"۱"
num_dict[u"٢"] = u"۲"
num_dict[u"٣"] = u"۳"
num_dict[u"٤"] = u"۴"
num_dict[u"٥"] = u"۵"
num_dict[u"٦"] = u"۶"
num_dict[u"٧"] = u"۷"
num_dict[u"٨"] = u"۸"
num_dict[u"٩"] = u"۹"
num_pattern = re.compile(r"(" + "|".join(num_dict.keys()) + r")")
text = num_pattern.sub(lambda x: num_dict[x.group()], text)
punctuation_after, punctuation_before = r"\.:!،؛؟»\]\)\}", r"«\[\(\{"
regex_list = [
# replace quotation with «»
('"([^\n"]+)"', r"«\1»"),
# replace single quotation with «»
("'([^\n\"]+)'", r"«\1»"),
# replace ٬ with «»
('٬([^\n"]+)٬', r"«\1»"),
# replace Double Angle Bracket with «»
('《([^\n"]+)》', r"«\1»"),
# replace dot with momayez
("([\d+])\.([\d+])", r"\1٫\2"),
# replace 3 dots
(r" ?\.\.\.", " … "),
# fix ی space
(r"([^ ]ه) ی ", r"\1ی "),
# put zwnj after می, نمی
(r"(^| )(ن?می) ", r"\1\2"),
# put zwnj before تر, تری, ترین, گر, گری, ها, های
(
r"(?<=[^\n\d "
+ punctuation_after
+ punctuation_before
+ "]{2}) (تر(ین?)?|گری?|های?)(?=[ \n"
+ punctuation_after
+ punctuation_before
+ "]|$)",
r"\1",
),
# join ام, ایم, اش, اند, ای, اید, ات
(
r"([^ ]ه) (ا(م|یم|ش|ند|ی|ید|ت))(?=[ \n" + punctuation_after + "]|$)",
r"\1\2",
),
# remove space before and after quotation
('" ([^\n"]+) "', r'"\1"'),
# remove space before punctuations
(" ([" + punctuation_after + "])", r"\1"),
# remove space after punctuations
("([" + punctuation_before + "]) ", r"\1"),
# put space after . and :
(
"(["
+ punctuation_after[:3]
+ "])([^ "
+ punctuation_after
+ "\w\d\\/۰۱۲۳۴۵۶۷۸۹])",
r"\1 \2",
),
# put space after punctuation
(
"([" + punctuation_after[3:] + "])([^ " + punctuation_after + "])",
r"\1 \2",
),
# put space before punctuations
(
"([^ " + punctuation_before + "])([" + punctuation_before + "])",
r"\1 \2",
),
# Remove repeating characters (keep 2 repeats)
(r"(ئآابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیچ)\1+", r"\1\1"),
]
for pattern, replac in regex_list:
text = re.sub(pattern, replac, text)
# fix "؟ " in links
text = re.sub(r"([a-zA-z]+)(؟ )", r"\1?", text)
# fix "، " in English numbers
text = re.sub(r"([0-9+])، ([0-9+])", r"\1,\2", text)
# fix "٫" in English numbers
text = re.sub(r"([0-9+])٫([0-9+])", r"\1.\2", text)
# fix "، " in farsi digits
text = re.sub(r"([۰-۹+])، ([۰-۹+])", r"\1٫\2", text)
return text
|
43bddc9c78615ab4cc58a732c164ad81b6848dc1
| 3,646,782
|
def register_name_for(entity):
"""
gets the admin page register name for given entity class.
it raises an error if the given entity does not have an admin page.
:param type[pyrin.database.model.base.BaseEntity] entity: the entity class of
admin page to get its
register name.
:raises AdminPageNotFoundError: admin page not found error.
:rtype: str
"""
return get_component(AdminPackage.COMPONENT_NAME).register_name_for(entity)
|
c43ab1a1e08058435c6680886d0e7c1dd81b1bef
| 3,646,783
|
def index():
"""Show all the posts, most recent first."""
db = get_db()
posts = db.execute(
# "SELECT p.id, title, body, created, author_id, username"
# " FROM post p"
# " JOIN user u ON p.author_id = u.id"
# " ORDER BY created DESC"
"SELECT *, l.author_id as love_author, count(distinct l.id) as likes"
" FROM post p"
" LEFT JOIN user u ON p.author_id = u.id"
" LEFT JOIN love l ON p.id = l.post_id"
" GROUP BY p.id"
" ORDER BY created DESC"
# "SELECT p.id, title, body, created, author_id, username, count(distinct love.id)"
# " FROM post p"
# " LEFT JOIN love on p.id=love.post_id"
# " JOIN user u ON p.author_id = u.id"
# " GROUP BY p.id"
).fetchall()
return render_template("blog/index.html", posts=posts)
|
b40a302ef1278f3fb0227c40db4764c22f5f0cb8
| 3,646,784
|
import requests
def get_user_information(fbid, extra_fields=[]):
""" Gets user basic information: first_name, last_name, gender,
profile_pic, locale, timezone
:usage:
>>> # Set the user fbid you want the information
>>> fbid = "<user fbid>"
>>> # Call the function passing the fbid of user.
>>> user_information = fbbotw.get_user_information(fbid=fbid)
:param str fbid: User id to get the information.
:param list extra_fields: Extra fields that your app is allowed to \
request. eg. 'locale', 'timezone', 'gender'
:return dict:
>>> user_information = {
"id": "user_id",
"name": "User Full Name",
"first_name": "User First Name",
"last_name": "User Last Name",
"profile_pic": "https://cdn_to_pic.com/123",
}
:facebook docs: `/user-profile <https://developers.facebook.com/docs/\
messenger-platform/user-profile>`_
"""
user_info_url = GRAPH_URL.format(fbid=fbid)
payload = dict()
fields = [
'name', 'first_name', 'last_name', 'profile_pic'
] + extra_fields
payload['fields'] = (
",".join(fields)
)
payload['access_token'] = PAGE_ACCESS_TOKEN
user_info = requests.get(user_info_url, payload).json()
return user_info
|
7765fec33fc70a67c3249e6417d0f75acba0ba2e
| 3,646,785
|
import re
def parseTeam(teamString):
"""Parse strings for data from official Pokemon Showdown format.
Keyword arguemnts:\n
teamString -- a team string, copied from pokepaste or pokemon showdown
"""
pokemonList = teamString.split('\n\n')
teamList = []
#print(pokemonList)
for pokemon in pokemonList:
currentPokemonDict = {}
moveCounter = 1
currentPokemon = pokemon.split('\n')
if 'Ability' not in pokemon:
continue
for attribute in currentPokemon:
if 'Happiness:' or 'IVs:' or 'Shiny:' in attribute:
pass
if '@' in attribute:
attribute = attribute.split('@')
currentPokemonDict['Species'] = attribute[0].strip().replace(' ','')
if '(' in currentPokemonDict['Species']:
currentPokemonDict['Species'] = re.search(r'\(([^)]+)', currentPokemonDict['Species']).group(1)
if len(currentPokemonDict['Species']) == 1:
temp = attribute[0].split('(')[0]
currentPokemonDict['Species'] = temp.strip()
currentPokemonDict['Item'] = attribute[1].strip().replace(' ','')
if 'Nature' in attribute:
attribute = attribute.strip()
attribute = attribute.split(' ')
currentPokemonDict['Nature'] = attribute[0].strip()
if '- ' in attribute:
currentPokemonDict['Move'+str(moveCounter)] = attribute.split('- ')[1].strip().replace(' ','')
moveCounter += 1
if 'EVs' in attribute:
currentPokemonDict['HPEVs'] = 0
currentPokemonDict['AtkEVs'] = 0
currentPokemonDict['DefEVs'] = 0
currentPokemonDict['SpAEVs'] = 0
currentPokemonDict['SpDEVs'] = 0
currentPokemonDict['SpeEVs'] = 0
attribute = attribute.split(':')
attribute = attribute[1].split('/')
for item in attribute:
item = item.strip()
item = item.split(' ')
currentPokemonDict[item[1]+'EVs'] = int(item[0])
teamList.append(currentPokemonDict)
return teamList
|
70680cf1eca50a4738b8afc7ab12fcd86b48d01d
| 3,646,786
|
import os
def url_scheme(url, path):
"""Treat local URLs as 'file://'."""
if not urlparse(url).scheme:
url = "file://" + os.path.join(path, url)
return url
|
dba51b0c0a3fbbbf32186a8820cfbd762485a3af
| 3,646,787
|
def mix_style(style_codes,
content_codes,
num_layers=1,
mix_layers=None,
is_style_layerwise=True,
is_content_layerwise=True):
"""Mixes styles from style codes to those of content codes.
Each style code or content code consists of `num_layers` codes, each of which
is typically fed into a particular layer of the generator. This function mixes
styles by partially replacing the codes of `content_codes` from some certain
layers with those of `style_codes`.
For example, if both style code and content code are with shape [10, 512],
meaning to have 10 layers and each employs a 512-dimensional latent code. And
the 1st, 2nd, and 3rd layers are the target layers to perform style mixing.
Then the top half of the content code (with shape [3, 512]) will be replaced
by the top half of the style code (also with shape [3, 512]).
NOTE: This function also supports taking single-layer latent codes as inputs,
i.e., setting `is_style_layerwise` or `is_content_layerwise` as False. In this
case, the corresponding code will be first repeated for `num_layers` before
performing style mixing.
Args:
style_codes: Style codes, with shape [num_styles, *code_shape] or
[num_styles, num_layers, *code_shape].
content_codes: Content codes, with shape [num_contents, *code_shape] or
[num_contents, num_layers, *code_shape].
num_layers: Total number of layers in the generative model. (default: 1)
mix_layers: Indices of the layers to perform style mixing. `None` means to
replace all layers, in which case the content code will be completely
replaced by style code. (default: None)
is_style_layerwise: Indicating whether the input `style_codes` are
layer-wise codes. (default: True)
is_content_layerwise: Indicating whether the input `content_codes` are
layer-wise codes. (default: True)
num_layers
Returns:
Codes after style mixing, with shape [num_styles, num_contents, num_layers,
*code_shape].
Raises:
ValueError: If input `content_codes` or `style_codes` is with invalid shape.
"""
if not is_style_layerwise:
style_codes = style_codes[:, np.newaxis]
style_codes = np.tile(
style_codes,
[num_layers if axis == 1 else 1 for axis in range(style_codes.ndim)])
if not is_content_layerwise:
content_codes = content_codes[:, np.newaxis]
content_codes = np.tile(
content_codes,
[num_layers if axis == 1 else 1 for axis in range(content_codes.ndim)])
if not (style_codes.ndim >= 3 and style_codes.shape[1] == num_layers and
style_codes.shape[1:] == content_codes.shape[1:]):
raise ValueError(f'Shapes of style codes and content codes should be '
f'[num_styles, num_layers, *code_shape] and '
f'[num_contents, num_layers, *code_shape] respectively, '
f'but {style_codes.shape} and {content_codes.shape} are '
f'received!')
layer_indices = parse_indices(mix_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
num_styles = style_codes.shape[0]
num_contents = content_codes.shape[0]
code_shape = content_codes.shape[2:]
s = style_codes[:, np.newaxis]
s = np.tile(s, [num_contents if axis == 1 else 1 for axis in range(s.ndim)])
c = content_codes[np.newaxis]
c = np.tile(c, [num_styles if axis == 0 else 1 for axis in range(c.ndim)])
from_style = np.zeros(s.shape, dtype=bool)
from_style[:, :, layer_indices] = True
results = np.where(from_style, s, c)
assert results.shape == (num_styles, num_contents, num_layers, *code_shape)
return results
|
377a0638d84ed084a91eb6dfb20302e56ab85647
| 3,646,788
|
import logging
def train_many_models(extractor, param_grid, data_dir, output_dir=None,
**kwargs):
"""
Train many extractor models, then for the best-scoring model, write
train/test block-level classification performance as well as the model itself
to disk in ``output_dir``.
Args:
extractor (:class:`Extractor`): Instance of the ``Extractor`` class to
be trained.
param_grid (dict or List[dict]): Dictionary with parameters names (str)
as keys and lists of parameter settings to try as values, or a list
of such dictionaries, in which case the grids spanned by each are
explored. See documentation for :class:`GridSearchCV` for details.
data_dir (str): Directory on disk containing subdirectories for all
training data, including raw html and gold standard blocks files
output_dir (str): Directory on disk to which the trained model files,
errors, etc. are to be written. If None, outputs are not saved.
**kwargs:
scoring (str or Callable): default 'f1'
cv (int): default 5
n_jobs (int): default 1
verbose (int): default 1
Returns:
:class:`Extractor`: The trained extractor model with the best-scoring
set of params.
See Also:
Documentation for grid search :class:`GridSearchCV` in ``scikit-learn``:
http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
"""
# set up directories and file naming
output_dir, fname_prefix = _set_up_output_dir_and_fname_prefix(output_dir, extractor)
# prepare and split the data
logging.info('preparing and splitting the data...')
data = prepare_all_data(data_dir)
training_data, test_data = train_test_split(
data, test_size=0.2, random_state=42)
train_html, train_labels, train_weights = extractor.get_html_labels_weights(training_data)
test_html, test_labels, test_weights = extractor.get_html_labels_weights(test_data)
# filter docs we can't get features from
train_blocks = np.array([extractor.blockifier.blockify(doc)
for doc in train_html])
train_mask = [extractor._has_enough_blocks(blocks) for blocks in train_blocks]
train_blocks = train_blocks[train_mask]
train_labels = np.concatenate(train_labels[train_mask])
train_weights = np.concatenate(train_weights[train_mask])
test_labels = np.concatenate(test_labels)
test_weights = np.concatenate(test_weights)
# get features
# TODO: This only 'fit's one doc at a time. No feature fitting actually
# happens for now, but this might be important if the features change
train_features = np.concatenate([extractor.features.fit_transform(blocks)
for blocks in train_blocks])
# fit many models
gscv = GridSearchCV(
extractor.model, param_grid, fit_params={'sample_weight': train_weights},
scoring=kwargs.get('scoring', 'f1'), cv=kwargs.get('cv', 5),
n_jobs=kwargs.get('n_jobs', 1), verbose=kwargs.get('verbose', 1))
gscv = gscv.fit(train_features, train_labels)
logging.info('Score of the best model, on left-out data: %s', gscv.best_score_)
logging.info('Params of the best model: %s', gscv.best_params_)
# evaluate best model on train and test data
extractor.model = gscv.best_estimator_
train_eval = evaluate_model_predictions(
train_labels, extractor.predict(train_html[train_mask]), weights=train_weights)
test_eval = evaluate_model_predictions(
test_labels, extractor.predict(test_html), weights=test_weights)
# pickle the final model
_write_model_to_disk(output_dir, fname_prefix, extractor)
return extractor
|
783cb5a524a58023d79e88eff4b46a0e713ca8cc
| 3,646,789
|
from datetime import datetime
import sys
import os
def analyze_iw(aoi, doi, dictionary, size, aoiId):
"""
Function that pre-processes sentinel-2 imagery and runs the LCC change detection algorithm
Parameters:
aoi(ee.Feature): area of interest with property 'landcover'
doi(ee.Date): date of interest
dictionary (ee.Dictionary): appropriate dictionary of lda coefficients
size (float): minimum size (ac) of changes to output
aoiId (str): unique identifier for the area of interest
Returns:
tuple: ee.FeatureCollection with properties 'id', and 'landcover',
ee.Image with bands
"""
# cast dictionary to ee.Dictionary for use in subsequent GEE ops
dictionary = ee.Dictionary(dictionary)
# grab the landcover property from aoi and then cast to geometry
lc = ee.Feature(aoi).get('mode')
aoi = aoi.geometry()
# TODO: This isn't working to add a unique ID
# function to add unique id and landcover type to output feature properties
def add_props(ft):
ftId = aoiId + '_' + '1'
print(ftId)
return ft.set({'id': ftId, 'landcover': lc})
try:
sq_meters = ee.Number(size).multiply(4047)
projdate = ee.Date(doi)
today = projdate.advance(6, 'month')
today_dt = str(datetime.fromtimestamp(int(today.getInfo()['value'])/1e3))[:10]
print('today', today_dt)
proj_dt = str(datetime.fromtimestamp(int(projdate.getInfo()['value']) / 1e3))[:10]
print('proj_dt:', proj_dt)
prior = ee.Date.fromYMD(projdate.get('year').subtract(1), projdate.get('month'), projdate.get('day'))
prior_dt = str(datetime.fromtimestamp(int(prior.getInfo()['value']) / 1e3))[:10]
print('prior_dt:', prior_dt)
rgbn = ['B2', 'B3', 'B4', 'B8', 'B11', 'B12']
print(today.get('year').getInfo())
if(prior.get('year').getInfo() >= 2019):
masked = SR.filterDate(prior, today).filterBounds(aoi).map(clouds.maskSR)
elif(today.get('year').getInfo() >= 2019):
s2 = S2.filterDate(prior, '2018-12-25').filterBounds(aoi).map(clouds.maskTOA)
sr = SR.filterDate('2018-12-26', today).filterBounds(aoi).map(clouds.maskSR)
masked = s2.select(rgbn).merge(sr.select(rgbn))
else:
masked = S2.filterDate(prior, today).filterBounds(aoi).map(clouds.maskTOA)
# if(projdate.get('year').getInfo() >= 2019):
# filtered = SR.filterDate(prior, today).filterBounds(aoi)
# masked = filtered.map(clouds.maskSR)
# else:
# filtered = S2.filterDate(prior, today).filterBounds(aoi)
# masked = filtered.map(clouds.maskTOA)
#masked = S2.filterDate(prior, today).filterBounds(aoi).map(mask)
corrected = terrain.c_correct(masked, rgbn, aoi, DEM)
after = corrected.filterDate(projdate, today)
count = after.size()
print('after size:', count.getInfo())
reference = after.sort('system:time_start', False)
time0 = ee.Image(reference.first()).get('system:time_start')
recent_date = str(datetime.fromtimestamp(int(time0.getInfo()) / 1e3))[:10]
before = corrected.filterDate(prior, projdate)
count = before.size()
print('before size:', count.getInfo())
reference = before.sort('system:time_start', False)
time0 = reference.first().get('system:time_start')
past_date = str(datetime.fromtimestamp(int(time0.getInfo()) / 1e3))[:10]
# run the IW algorithm between the before and after collections within the user defined AOI.
# by default, ag fields are masked by 'yes'
print('running the iw algorithm')
iwout = iw.runIW(before,
after,
aoi,
scl = 30,
tScl = 6,
ag = 'yes').clip(aoi)
print('performing LDA analysis')
# calculate LDA score to discriminate change/no-change pixels in iwout. Requires thresholds from habitat dictionary
scored = stats.ldaScore(
iwout,
['cv_z', 'rcvmax_z', 'ndvi_z', 'ndsi_z', 'ndwi_z', 'nbr_z'],
dictionary)
# scored = stats.ldaScore(iwout, 0 ['cv_z', 'rcvmax_z', 'ndvi_z', 'ndsi_z', 'ndwi_z', 'nbr_z'],
# [cvz, rcvz, ndviz, ndsiz, ndwiz, nbrz]).clip(aoi)
# create a binary [0, 1] image representing change and no-change pixels. Erode and dilate changed areas
selected = scored.gte(dictionary.toImage(['lda']))\
.focal_min(1, 'square', 'pixels')\
.focal_max(1, 'square', 'pixels')
# mask image to retain only pixels equal to '1'
selected = selected.updateMask(selected)
#maskSelected = selected.updateMask(selected.eq(0))
# mask out no-change areas (i.e. selected = 0) here. Creates fewer polygons which should save memory
# selected = selected.updateMask(selected.eq(1))
#print('selected is a ', type(selected))
scale = 10
tileScale = 6
# convert binary image to polygons. Note: this creates polygons for both contiguous change and contiguous no-change areas
polys = selected.reduceToVectors(
geometry=aoi,
scale=scale,
tileScale=tileScale,
eightConnected=True,
bestEffort=True,
maxPixels=1e13)
#print('polys is a ', type(polys))
count = polys.size().getInfo()
print(count)
#print('polys size:', count.getInfo(displaySize))
# return only polygons corresponding to change pixels
polys = polys.map(sz)
polys = polys.map(add_props)
# filter out change polygons smaller than the user defined minimum area
polys = polys.filter(ee.Filter.gte('area', sq_meters))
# indicator = True
return "OK", past_date, recent_date, polys, iwout.select([
'cv_z', 'nbr_z', 'ndsi_z', 'ndwi_z', 'ndvi_z', 'rcvmax_z'])
except Exception as error:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print ("")
print ("*******************************")
print ("Unexpected error in analyze.py")
print (exc_type, fname, exc_tb.tb_lineno)
#print("sys.exc_info:", sys.exc_info()[0])
print ("Error:", error)
print ("*******************************")
print ("")
return "error"
|
4883daea5b3e05d4af5dafcb257c8555ae361a9a
| 3,646,790
|
import collections
def _build_client_update(model: model_lib.Model,
use_experimental_simulation_loop: bool = False):
"""Creates client update logic for FedSGD.
Args:
model: A `tff.learning.Model` used to compute gradients.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation.
Returns:
A `tf.function`.
"""
dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(
use_experimental_simulation_loop)
@tf.function
def client_update(initial_weights, dataset):
model_weights = model_utils.ModelWeights.from_model(model)
tf.nest.map_structure(lambda a, b: a.assign(b), model_weights,
initial_weights)
def reduce_fn(state, batch):
"""Runs forward_pass on batch and sums the weighted gradients."""
accumulated_gradients, num_examples_sum = state
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
gradients = tape.gradient(output.loss, model_weights.trainable)
num_examples = tf.cast(output.num_examples, tf.float32)
accumulated_gradients = tuple(
accumulator + num_examples * gradient
for accumulator, gradient in zip(accumulated_gradients, gradients))
# We may be able to optimize the reduce function to avoid doubling the
# number of required variables here (e.g. keeping two copies of all
# gradients). If you're looking to optimize memory usage this might be a
# place to look.
return (accumulated_gradients, num_examples_sum + num_examples)
def _zero_initial_state():
"""Create a tuple of (gradient accumulators, num examples)."""
return tuple(
tf.nest.map_structure(tf.zeros_like,
model_weights.trainable)), tf.constant(
0, dtype=tf.float32)
gradient_sums, num_examples_sum = dataset_reduce_fn(
reduce_fn=reduce_fn,
dataset=dataset,
initial_state_fn=_zero_initial_state)
# We now normalize to compute the average gradient over all examples.
average_gradient = tf.nest.map_structure(
lambda gradient: gradient / num_examples_sum, gradient_sums)
model_output = model.report_local_unfinalized_metrics()
stat_output = collections.OrderedDict(num_examples=num_examples_sum)
average_gradient, has_non_finite_delta = (
tensor_utils.zero_all_if_any_non_finite(average_gradient))
if has_non_finite_delta > 0:
client_weight = tf.constant(0.0)
else:
client_weight = num_examples_sum
return client_works.ClientResult(
update=average_gradient,
update_weight=client_weight), model_output, stat_output
return client_update
|
8ba68ebba2c6520e9c7a89975d16dc20debf52eb
| 3,646,791
|
import ipaddress
def ipv4_addr_check():
"""Prompt user for IPv4 address, then validate. Re-prompt if invalid."""
while True:
try:
return ipaddress.IPv4Address(input('Enter valid IPv4 address: '))
except ValueError:
print('Bad value, try again.')
raise
|
e85681cdcedb605f47240b27e8e2bce077a39273
| 3,646,792
|
import os
import imp
def getPlugins():
"""
List the plugins located in the plugins folder.
"""
plugins = []
pluginList = os.listdir(PluginFolder)
for pluginName in pluginList:
location = os.path.join(PluginFolder, pluginName)
if not os.path.isdir(location) or not MainModule + ".py" in os.listdir(location):
continue
info = imp.find_module(MainModule, [location])
plugins.append({"name": pluginName, "info": info})
return plugins
|
d88ca8c19a35ddf49fda37e41e1f8f3b01cf9974
| 3,646,793
|
def energybalance_erg(ratio,crew,erg,w0=4.3801,dt=0.03,doplot=1,doprint=0,theconst=1.0):
"""
calculates one stroke with ratio as input, using force profile in time domain
"""
# w0 = initial flywheel angular velo
# initialising output values
dv = 100.
vavg = 0.0
vend = 0.0
power = 0.0
# stroke parameters
tempo = crew.tempo
mc = crew.mc
recprofile = crew.recprofile
d = crew.strokelength
Nrowers = 1
drag = erg.drag
inertia = erg.inertia
cord = erg.cord
cordlength = erg.cordlength
r = erg.r # sprocket radius
# nr of time steps
aantal = 1+int(round(60./(tempo*dt)))
time = linspace(0,60./tempo,aantal)
# flywheel angular velo
wf = zeros(len(time))+w0
wfdot = zeros(len(time))
# crew velo
vc = zeros(len(time))
vpull = zeros(len(time))
Fhandle = zeros(len(time))
Fres = zeros(len(time))
Fleg = zeros(len(time))
ydotdot = zeros(len(time))
ydot = zeros(len(time)) # +wf[0]*r
Pf = zeros(len(time))
Phandle = zeros(len(time))
Ebungee = zeros(len(time))
Pbungee = zeros(len(time))
handlepos = 0
vhand = ydot[0]
# initial handle and boat velocities
vc[0] = ydot[0]
# calculate average drive speed
tdrive = ratio*max(time)
vdriveavg = crew.strokelength/tdrive
idrivemax = int(round(tdrive/dt))
## powerconst = 2.58153699 # bij sin^(1/3)
## powerconst = 2 # bij sin
# powerconst = 1.5708 # bij sin^2
# macht = 2.
# vhandmax = np.pi*d/(powerconst*tdrive)
# vhand = vhandmax*(np.sin(np.pi*(time)/tdrive))**(macht)
# powerconst = 3.1733259127
# vhandmax = np.pi*d/(powerconst*tdrive)
# vhand = vhandmax*(1-np.cos(2*np.pi*(time)/tdrive))
macht = 0.5
x = np.linspace(0,1,100)
y = (x-x**2)**(macht)
s = np.cumsum(np.diff(x)*y[1:])[-1]
powerconst = 1/s
vhandmax = powerconst*d/tdrive
vhand = vhandmax*((time/tdrive)-(time/tdrive)**2)**macht
# stroke
for i in range(1,idrivemax):
now = dt*i
timerel = now/tdrive
time2 = (dt*(i+1))/tdrive
vi = vhand[i-1]
vj = vhand[i]
vpull[i] = vhand[i]
Tdrag = drag*wf[i-1]**2
handlepos += dt*vi
ydot[i] = crew.vcm(vi, handlepos)
# ydot[i] = vi*(1-timerel)
# ydot[i] = vi
ydotdot[i] = (ydot[i]-ydot[i-1])/dt
wnext = vj/r
wnext2 = wf[i-1]-dt*Tdrag/inertia
# if wnext > 0.99*wf[i-1]:
if wnext > wnext2:
wf[i] = wnext
Tacceler = inertia*(wnext-wf[i-1])/dt
else:
wf[i] = wf[i-1]-dt*Tdrag/inertia
Tacceler = 0
Tdrag = 0
wfdot[i] = (wf[i]-wf[i-1])/dt
Fhandle[i] = ((Tdrag+Tacceler)/r)+cord*(cordlength+handlepos)
Fres[i] = Nrowers*mc*ydotdot[i]
Fleg[i] = Fres[i]+Fhandle[i]
Ebungee[i] = 0.5*(cord*(cordlength+handlepos)**2 - cord*cordlength**2)
Pbungee[i] = (Ebungee[i]-Ebungee[i-1])/dt
vc[i] = ydot[i]
# recovery
trecovery = max(time)-time[idrivemax]
ratio = time[idrivemax]/max(time)
aantalstroke = idrivemax
if (recprofile == 1): # oude methode (sinus)
vhandmax = -np.pi*d/(2*trecovery)
vhand = vhandmax*np.sin(np.pi*(time-time[i])/trecovery)
for k in range(idrivemax,aantal):
Tdrag = drag*wf[k-1]**2 # drag torque
wf[k] = wf[k-1]-dt*Tdrag/inertia
ydot[k] = crew.vcm(vhand, handlepos)
# ydot[k] = vhand
vc[k] = ydot[k]
ydotdot[k] = (ydot[k]-ydot[k-1])/dt
handlepos = handlepos+vhand[k]*dt
Ebungee[k] = 0.5*(cord*(cordlength+handlepos)**2 - cord*cordlength**2)
Pbungee[k] = (Ebungee[k]-Ebungee[k-1])/dt
else:
vavgrec = d/trecovery
vcrecovery = zeros(aantal)
for k in range(idrivemax,aantal):
vhand = crew.vhandle(vavgrec,trecovery,time[k]-time[idrivemax])
vpull[k] = vhand
vcrecovery[k] = crew.vcm(vhand, handlepos)
# vcrecovery[k] = vhand
Tdrag = drag*wf[k-1]**2 # drag torque
wf[k] = wf[k-1]-dt*Tdrag/inertia
wfdot[k] = (wf[k]-wf[k-1])/dt
ydot[k] = vcrecovery[k]
vc[k] = ydot[k]
ydotdot[k] = (ydot[k]-ydot[k-1])/dt
handlepos = d+d*crew.dxhandle(vavgrec,trecovery,time[k]-time[idrivemax])
Fhandle[k] = cord*(cordlength+handlepos)
Fres[k] = Nrowers*mc*ydotdot[k]
Fleg[k] = Fres[k]+Fhandle[k]
Ebungee[k] = 0.5*(cord*(cordlength+handlepos)**2 - cord*cordlength**2)
Pbungee[k] = (Ebungee[k]-Ebungee[k-1])/dt
ydot[0] = ydot[0]/2.
ydotdot[1]=(ydot[1]-ydot[0])/dt
Pq = (Nrowers*mc)*ydotdot*ydot
Pleg = Fleg*ydot
Phandle = Fhandle*vpull
Parm = Phandle-Fhandle*ydot
Plegdiss = 0.5*theconst*(abs(Pleg)-Pleg)
Plegsource = abs(Pleg)
Parmdiss = 0.5*theconst*(abs(Parm)-Parm)
Parmsource = abs(Parm)
# sources
Elegsource = cumsum(Plegsource)*dt
Earmsource = cumsum(Parmsource)*dt
Eleg = cumsum(Pleg)*dt
Earm = cumsum(Parm)*dt
Ehandle = cumsum(Phandle)*dt
# sinks
# drag power
Pw = drag*wf**3.
Ew = cumsum(Pw)*dt
Elegdiss = cumsum(Plegdiss)*dt
Earmdiss = cumsum(Parmdiss)*dt
# storage
Pwheel = inertia*wf*wfdot
Ewheel = cumsum(Pwheel)*dt
Ewheel = Ewheel - Ewheel[0]
Ebungee = cumsum(Pbungee)*dt
Pqrower = abs(Pq)
Pdiss = 0.5*theconst*(Pqrower-Pq)
Eq = cumsum(Pq)*dt
Eqrower = cumsum(Pqrower)*dt
Ediss = cumsum(Pdiss)*dt
# printing
if (doprint==1):
print(("Ediss rower ",Ediss[aantal-1]))
print(("E drag ",Ew[aantal-1]))
print(("Eleg ",Eqrower[aantal-1]))
print(("Ehandle ",Ehandle[aantal-1]))
print(("Ebungee ",Ebungee[aantal-1]))
print("")
print(("P handle ",Ehandle[aantal-1]/time[aantal-1]))
print(("P drag ",Ew[aantal-1]/time[aantal-1]))
print("")
# plotting
if (doplot==1):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, ydot,'r-',label = 'Crew velocity')
pyplot.plot(time, vpull,'k-',label = 'Handle velocity')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('v (m/s)')
pyplot.show()
if (doplot==2):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Fhandle,'r-',label = 'Handle force')
pyplot.plot(time, Fleg,'b-',label = 'Leg force')
pyplot.plot(time, Fres,'g-',label = 'Accelerating force')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('force (N)')
pyplot.show()
if (doplot==3):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Phandle, 'r-', label = 'Handle Power')
pyplot.plot(time, Pleg,'b-',label = 'Leg power')
pyplot.plot(time, Pq,'k-',label = 'Kinetic power')
pyplot.plot(time, Parm,'y-',label = 'Arm power')
pyplot.plot(time, Pq+Phandle-Parm-Pleg,'b+', label = 'should be zero')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('power (W)')
pyplot.show()
if (doplot==4):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Ewheel,'g-',label = 'Flywheel energy stored')
pyplot.plot(time, Eq+Ebungee,'k-',label = 'Kinetic energy')
pyplot.plot(time, Ew,'r-',label = 'Drag dissipation')
pyplot.plot(time, Ediss,'b-',label = 'Rower body dissipation')
pyplot.plot(time, Ewheel+Eq+Ew+Ediss+Ebungee, 'b+', label = 'Sinks+Kinetic')
pyplot.plot(time, Ew+Ediss, 'r+', label = 'Sinks')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('Energy (J)')
pyplot.show()
if (doplot==5):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Pleg, 'y-', label = 'Leg power')
pyplot.plot(time, Plegdiss,'g-',label = 'Leg dissipation')
pyplot.plot(time, Plegsource,'g+',label = 'Leg source')
pyplot.plot(time, Parm, 'r-', label = 'Arm power')
pyplot.plot(time, Parmdiss,'k-',label = 'Arm dissipation')
pyplot.plot(time, Parmsource,'k+',label = 'Arm source')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel('power (W)')
pyplot.show()
if (doplot==6):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Elegsource+Ehandle, 'bo', label = 'Leg power')
pyplot.plot(time, Elegdiss,'g-',label = 'Leg dissipation')
pyplot.plot(time, Earm, 'r-', label = 'Arm power')
pyplot.plot(time, Ehandle, 'k+', label = 'Handle power')
pyplot.plot(time, Earmdiss,'k-',label = 'Arm dissipation')
pyplot.plot(time, Eqrower+Ewheel+Ebungee, 'y+', label = 'Eqrower+Ewheel+Ecord')
pyplot.plot(time, Elegsource+Earmsource,'b+', label = 'Sources')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel('energy (J)')
pyplot.show()
if (doplot==7):
pyplot.clf()
pyplot.plot(time, Ew+Ediss, 'r-', label = 'Total Sinks')
# pyplot.plot(time, Elegsource+Earmsource,'go',label = 'Total Sources')
pyplot.plot(time, Eqrower+Ehandle,'y-',label = 'Total Sources 2')
pyplot.plot(time, Ewheel+Eq+Ew+Ediss+Ebungee, 'b+', label = 'Sinks+Kinetic')
pylab.legend(loc='lower right')
pyplot.xlabel("time (s)")
pyplot.ylabel('energy (J)')
pyplot.show()
if (doplot==8):
pyplot.clf()
pyplot.plot(time, ydot, 'r-', label = 'Crew velocity')
pylab.legend(loc='lower right')
pyplot.xlabel("time (s)")
pyplot.ylabel("v (m/s)")
pyplot.show()
if (doplot==9):
pyplot.clf()
wref = wf
pyplot.plot(time,wref,'r-',label='flywheel speed')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel("Flywheel speed (rad/sec)")
pyplot.show()
dw = wf[len(time)-1]-wf[0]
wavg = mean(wf)
wend = wf[len(time)-1]
energy = max(Ew+Ediss)
energyd = max(Ew)
energy = energy/Nrowers
energyd = energyd/Nrowers
power = energy*tempo/60.
powerd = energyd*tempo/60.
return [dw,wend,wavg,ratio,energy,power,powerd]
|
a742b88394f194afee3ef89bc4e878027fcac8b5
| 3,646,794
|
import os
import subprocess
def get_gpu_count():
"""get avaliable gpu count
Returns:
gpu_count: int
"""
gpu_count = 0
env_cuda_devices = os.environ.get('CUDA_VISIBLE_DEVICES', None)
if env_cuda_devices is not None:
assert isinstance(env_cuda_devices, str)
try:
if not env_cuda_devices:
return 0
gpu_count = len(
[x for x in env_cuda_devices.split(',') if int(x) >= 0])
logger.info(
'CUDA_VISIBLE_DEVICES found gpu count: {}'.format(gpu_count))
except:
logger.info(
'Cannot find available GPU devices, using CPU or other devices now.'
)
gpu_count = 0
else:
try:
gpu_count = str(subprocess.check_output(["nvidia-smi",
"-L"])).count('UUID')
logger.info('nvidia-smi -L found gpu count: {}'.format(gpu_count))
except:
logger.info(
'Cannot find available GPU devices, using CPU or other devices now.'
)
gpu_count = 0
return gpu_count
|
fad511d32419daa1f2d9ac569ebd9201a9aa1de9
| 3,646,795
|
def get_EXP3_policy(Q, eta, G_previous):
"""
Obtain EXP-3 policy based on a given Q-function. Also, return updated
values of G, to be used in future calls to this function.
Inputs:
1) Q: a num_states x num_actions matrix, in which Q[s][a] specifies
the Q-function in state s and action a.
2) eta: a scalar; this is the eta parameter defined in the EPMC algorithm.
3) G_previous: num_states x num_actions matrix; this is a matrix of the
G-values defined in the EPMC algorithm. These values are from the
previous iteration.
Outputs:
1) policy: a policy, specified by a num_states x num_actions matrix, in
which policy[s][a] is the probability of taking action a in state s.
2) G: num_states x num_actions updated G matrix, as defined in the EPMC
algorithm.
"""
num_actions = Q.shape[1]
# Update the policy:
policy = np.exp((eta / num_actions) * G_previous)
policy = (policy.T / policy.sum(axis=1)).T
policy = eta / num_actions + (1 - eta) * policy
# Update G:
G = G_previous + Q / policy
return policy, G
|
80e886142fff398801e4f767ed09392d7f8b398c
| 3,646,796
|
def table_content(db, table):
"""
return a 2 dimentioanl array cont-aining all table values
========================================================
>>> table_content("sys", "host_ip")
[[1, 2, 3],
[2, 3, 4],
[3, 4, 5]]
========================================================
"""
#XXX: uses : `select * from table`
return execute_and_fetch(_SELECT_TABLE.format(db, table))
|
51b973f442131a157aa13ae0c0ed9f1d07a4115d
| 3,646,797
|
def process_sort_params(sort_keys, sort_dirs, default_keys=None,
default_dir='asc'):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list
if not already specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added
to the processed list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified or if an invalid sort
direction is specified
"""
if default_keys is None:
default_keys = ['created_at', 'id']
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs):
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys.
if sort_dirs:
result_dirs = []
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
msg = _("Unknown sort direction, must be 'desc' or 'asc'.")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _("Sort direction array size exceeds sort key array size.")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
|
1f11f5d7d4fbe5c1864ace120c1dbde7b6023acb
| 3,646,798
|
def namify(idx):
"""
Helper function that pads a given file number and return it as per the dataset image name format.
"""
len_data = 6 #Ilsvr images are in the form of 000000.JPEG
len_ = len(str(idx))
need = len_data - len_
assert len_data >= len_, "Error! Image idx being fetched is incorrect. Invalid value."
pad = '0'*need
return pad+str(idx)
|
069ff7a297f944e9e0e51e5e100276a54fa51618
| 3,646,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.