content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _get_tests(tier):
"""Return a generator of test functions."""
return TEST_TIERS[tier]
| 15,200
|
def get_first_of_iterable(iterable):
"""
Return the first element of the given sequence.
Most useful on generator types.
:param iterable iterable: input iterable
:returns: tuple(iterable, first_element). If a generator is passed,
a new generator will be returned preserving the original values.
:raises: IndexError
Example
_______
>>> a = [1,2,3]
>>> b = (str(i) for i in range(3))
>>> a, first_element = get_first_of_iterable(a)
>>> a, first_element
([1, 2, 3], 1)
When the generator ``b`` is given, a new generator is returned by ``is_empty_iterable``
to preserve original values of ``b``:
>>> b, first_element = get_first_of_iterable(b)
>>> next(b), first_element
('0', '0')
"""
if hasattr(iterable, '__getitem__'):
return iterable, iterable[0]
iterable = iter(iterable)
try:
first = next(iterable)
except StopIteration:
raise IndexError('`iterable` is empty')
return chain([first], iterable), first
| 15,201
|
def getTiers(connection=None):
"""
"""
# Open the master database if it is not supplied.
flag = False
if connection is None:
connection = sqlite3.connect(MASTER)
flag = True
# Create a cursor from the connection.
cursor = connection.cursor()
# Execute the statement to remove the tier title combo from the database.
cursor.execute("""SELECT DISTINCT tier FROM hierarchy""")
# Fetch the returned data.
tiers = [tier[0] for tier in cursor.fetchall()]
# Close the cursor.
cursor.close()
# Commit the change to the database and close the connection.
if flag:
connection.close()
return tiers
| 15,202
|
def _on_execute_sync(ctx: ExecutionContext, step: WorkflowStep):
"""Performs synchronous step work.
"""
# If unit of work is complete then signal step end.
if step.result is None:
do_step_verification.send(ctx)
# Enqueue message batch (with completion callback).
elif isinstance(step.result, tuple) and len(step.result) == 3:
_enqueue_message_batch(ctx, step)
else:
raise TypeError("Sync steps must return None or a batch of messages")
| 15,203
|
def get_repr_type(type_: Any) -> Any:
"""Parse a type and return an representative type.
Example:
All of the following expressions will be ``True``::
get_repr_type(A) == A
get_repr_type(Annotated[A, ...]) == A
get_repr_type(Union[A, B, ...]) == A
get_repr_type(Optional[A]) == A
"""
class Temporary:
__annotations__ = dict(type=type_)
unannotated = get_type_hints(Temporary)["type"]
if get_origin(unannotated) is Union:
return get_args(unannotated)[0]
return unannotated
| 15,204
|
def get_month_range_from_dict(source):
"""
:param source: dictionary with keys 'start' and 'end
:return: a tuple of datatime objects in the form (start, end)
"""
now = timezone.now()
start = source.get('start')
end = source.get('end', datetime.datetime(now.year, now.month, calendar.monthrange(now.year, now.month)[1]))
if not start:
start = datetime.datetime(end.year-1, end.month+1, 1) if end.month != 12 else datetime.datetime(end.year, 1, 1)
return start, end
| 15,205
|
def softmax_ad_set_dim_func(head, data, axis):
"""Look up the softmax_ad_set_dim_map, and return hash_value, hash_key."""
key = []
key.append(tuple(data.shape))
key.append(data.dtype)
key.append(axis)
hash_key = str(tuple(key))
if hash_key in softmax_ad_set_dim_map.keys():
return ct_util.set_dims(softmax_ad_set_dim_map[hash_key]), hash_key
return "", hash_key
| 15,206
|
def parse_latency_stats(fp):
"""
Parse latency statistics.
:param fp: the file path that stores the statistics
:returns an average latency in milliseconds to connect a pair of initiator and responder clients
"""
latency = []
with open(fp) as csvfile:
csvreader = csv.DictReader(csvfile, delimiter=' ', fieldnames=['title', 'time'])
for row in csvreader:
latency.append(float(row['time']) * 1000)
return sum(latency) / len(latency)
| 15,207
|
def get_tweets(input, out_dir, ext):
"""
This function takes the list of individuals with the periods list and runs twint for each period. It stores the result in a csv file called c.Output and returns the dictionary of uncollected names and periods.
"""
counter = 0
uncollected = {}
total_uncollected = 0
l = len(list(input.keys()))
c = twint.Config()
c.Store_csv = True
for name in input:
c.Search = name
for p in input[name]:
start = p[0].strftime("%Y-%m-%d")
end = p[1].strftime("%Y-%m-%d")
c.Output = f"{out_dir}{name}_{start}_{end}{ext}"
c.Since = str(p[0])
c.Until = str(p[1])
try:
twint.run.Search(c)
counter += 1
if counter < (l - 1):
time.sleep(7)
except Exception as e:
print(e)
if name not in uncollected:
uncollected[name] = [p]
total_uncollected += 1
else:
uncollected[name].append(p)
total_uncollected += 1
try:
os.remove(c.Output)
except OSError as e:
print(f"Error: {c.Output} --> {e.strerror}")
continue
return uncollected, total_uncollected
| 15,208
|
def bootstrap_dev_server_acls():
"""Adds default pools.cfg."""
assert utils.is_local_dev_server()
global _LOCAL_FAKE_CONFIG
_LOCAL_FAKE_CONFIG = _PoolsCfg(
{
'default': PoolConfig(
name='default',
rev='pools_cfg_rev',
scheduling_users=frozenset([
auth.Identity(auth.IDENTITY_USER, 'smoke-test@example.com'),
auth.Identity(auth.IDENTITY_BOT, 'whitelisted-ip'),
]),
scheduling_groups=frozenset(),
trusted_delegatees={},
service_accounts=frozenset(),
service_accounts_groups=tuple(),
task_template_deployment=None,
bot_monitoring=None,
default_isolate=None,
default_cipd=None,
external_schedulers=None,
),
},
(None, None),
)
| 15,209
|
def test_plugin_telegram_general(mock_post, mock_get):
"""
NotifyTelegram() General Tests
"""
# Disable Throttling to speed testing
plugins.NotifyBase.request_rate_per_sec = 0
# Bot Token
bot_token = '123456789:abcdefg_hijklmnop'
invalid_bot_token = 'abcd:123'
# Chat ID
chat_ids = 'l2g, lead2gold'
# Prepare Mock
mock_get.return_value = requests.Request()
mock_post.return_value = requests.Request()
mock_post.return_value.status_code = requests.codes.ok
mock_get.return_value.status_code = requests.codes.ok
mock_get.return_value.content = '{}'
mock_post.return_value.content = '{}'
# Exception should be thrown about the fact no bot token was specified
with pytest.raises(TypeError):
plugins.NotifyTelegram(bot_token=None, targets=chat_ids)
# Invalid JSON while trying to detect bot owner
mock_get.return_value.content = '{'
mock_post.return_value.content = '}'
obj = plugins.NotifyTelegram(bot_token=bot_token, targets=None)
obj.notify(title='hello', body='world')
# Invalid JSON while trying to detect bot owner + 400 error
mock_get.return_value.status_code = requests.codes.internal_server_error
mock_post.return_value.status_code = requests.codes.internal_server_error
obj = plugins.NotifyTelegram(bot_token=bot_token, targets=None)
obj.notify(title='hello', body='world')
# Return status back to how they were
mock_post.return_value.status_code = requests.codes.ok
mock_get.return_value.status_code = requests.codes.ok
# Exception should be thrown about the fact an invalid bot token was
# specifed
with pytest.raises(TypeError):
plugins.NotifyTelegram(bot_token=invalid_bot_token, targets=chat_ids)
obj = plugins.NotifyTelegram(
bot_token=bot_token, targets=chat_ids, include_image=True)
assert isinstance(obj, plugins.NotifyTelegram) is True
assert len(obj.targets) == 2
# Test Image Sending Exceptions
mock_post.side_effect = IOError()
assert not obj.send_media(obj.targets[0], NotifyType.INFO)
# Test our other objects
mock_post.side_effect = requests.HTTPError
assert not obj.send_media(obj.targets[0], NotifyType.INFO)
# Restore their entries
mock_get.side_effect = None
mock_post.side_effect = None
mock_get.return_value.content = '{}'
mock_post.return_value.content = '{}'
# test url call
assert isinstance(obj.url(), six.string_types) is True
# test privacy version of url
assert isinstance(obj.url(privacy=True), six.string_types) is True
assert obj.url(privacy=True).startswith('tgram://1...p/') is True
# Test that we can load the string we generate back:
obj = plugins.NotifyTelegram(**plugins.NotifyTelegram.parse_url(obj.url()))
assert isinstance(obj, plugins.NotifyTelegram) is True
# Prepare Mock to fail
response = mock.Mock()
response.status_code = requests.codes.internal_server_error
# a error response
response.content = dumps({
'description': 'test',
})
mock_get.return_value = response
mock_post.return_value = response
# No image asset
nimg_obj = plugins.NotifyTelegram(bot_token=bot_token, targets=chat_ids)
nimg_obj.asset = AppriseAsset(image_path_mask=False, image_url_mask=False)
# Test that our default settings over-ride base settings since they are
# not the same as the one specified in the base; this check merely
# ensures our plugin inheritance is working properly
assert obj.body_maxlen == plugins.NotifyTelegram.body_maxlen
# We don't override the title maxlen so we should be set to the same
# as our parent class in this case
assert obj.title_maxlen == plugins.NotifyBase.title_maxlen
# This tests erroneous messages involving multiple chat ids
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO) is False
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO) is False
assert nimg_obj.notify(
body='body', title='title', notify_type=NotifyType.INFO) is False
# This tests erroneous messages involving a single chat id
obj = plugins.NotifyTelegram(bot_token=bot_token, targets='l2g')
nimg_obj = plugins.NotifyTelegram(bot_token=bot_token, targets='l2g')
nimg_obj.asset = AppriseAsset(image_path_mask=False, image_url_mask=False)
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO) is False
assert nimg_obj.notify(
body='body', title='title', notify_type=NotifyType.INFO) is False
# Bot Token Detection
# Just to make it clear to people reading this code and trying to learn
# what is going on. Apprise tries to detect the bot owner if you don't
# specify a user to message. The idea is to just default to messaging
# the bot owner himself (it makes it easier for people). So we're testing
# the creating of a Telegram Notification without providing a chat ID.
# We're testing the error handling of this bot detection section of the
# code
mock_post.return_value.content = dumps({
"ok": True,
"result": [{
"update_id": 645421321,
"message": {
"message_id": 1,
"from": {
"id": 532389719,
"is_bot": False,
"first_name": "Chris",
"language_code": "en-US"
},
"chat": {
"id": 532389719,
"first_name": "Chris",
"type": "private"
},
"date": 1519694394,
"text": "/start",
"entities": [{
"offset": 0,
"length": 6,
"type": "bot_command",
}],
}},
],
})
mock_post.return_value.status_code = requests.codes.ok
obj = plugins.NotifyTelegram(bot_token=bot_token, targets='12345')
assert len(obj.targets) == 1
assert obj.targets[0] == '12345'
# Test the escaping of characters since Telegram escapes stuff for us to
# which we need to consider
mock_post.reset_mock()
body = "<p>\'\"This can't\t\r\nfail us\"\'</p>"
assert obj.notify(
body=body, title='special characters',
notify_type=NotifyType.INFO) is True
assert mock_post.call_count == 1
payload = loads(mock_post.call_args_list[0][1]['data'])
# Our special characters are escaped properly
assert payload['text'] == \
'<b>special characters</b>\r\n<p>'\
'\'"This can\'t\t\r\nfail us"\'</p>'
# Test sending attachments
attach = AppriseAttachment(os.path.join(TEST_VAR_DIR, 'apprise-test.gif'))
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO,
attach=attach) is True
# An invalid attachment will cause a failure
path = os.path.join(TEST_VAR_DIR, '/invalid/path/to/an/invalid/file.jpg')
attach = AppriseAttachment(path)
assert obj.notify(
body='body', title='title', notify_type=NotifyType.INFO,
attach=path) is False
obj = plugins.NotifyTelegram(bot_token=bot_token, targets=None)
# No user detected; this happens after our firsst notification
assert len(obj.targets) == 0
assert obj.notify(title='hello', body='world') is True
assert len(obj.targets) == 1
assert obj.targets[0] == '532389719'
# Do the test again, but without the expected (parsed response)
mock_post.return_value.content = dumps({
"ok": True,
"result": [],
})
# No user will be detected now
obj = plugins.NotifyTelegram(bot_token=bot_token, targets=None)
# No user detected; this happens after our firsst notification
assert len(obj.targets) == 0
assert obj.notify(title='hello', body='world') is False
assert len(obj.targets) == 0
# Detect the bot with a bad response
mock_post.return_value.content = dumps({})
obj.detect_bot_owner()
# Test our bot detection with a internal server error
mock_post.return_value.status_code = requests.codes.internal_server_error
# internal server error prevents notification from being sent
obj = plugins.NotifyTelegram(bot_token=bot_token, targets=None)
assert len(obj.targets) == 0
assert obj.notify(title='hello', body='world') is False
assert len(obj.targets) == 0
# Test our bot detection with an unmappable html error
mock_post.return_value.status_code = 999
plugins.NotifyTelegram(bot_token=bot_token, targets=None)
assert len(obj.targets) == 0
assert obj.notify(title='hello', body='world') is False
assert len(obj.targets) == 0
# Do it again but this time provide a failure message
mock_post.return_value.content = dumps({'description': 'Failure Message'})
plugins.NotifyTelegram(bot_token=bot_token, targets=None)
assert len(obj.targets) == 0
assert obj.notify(title='hello', body='world') is False
assert len(obj.targets) == 0
# Do it again but this time provide a failure message and perform a
# notification without a bot detection by providing at least 1 chat id
obj = plugins.NotifyTelegram(bot_token=bot_token, targets=['@abcd'])
assert nimg_obj.notify(
body='body', title='title', notify_type=NotifyType.INFO) is False
# iterate over our exceptions and test them
mock_post.side_effect = requests.HTTPError
# No chat_ids specified
obj = plugins.NotifyTelegram(bot_token=bot_token, targets=None)
assert len(obj.targets) == 0
assert obj.notify(title='hello', body='world') is False
assert len(obj.targets) == 0
# Test Telegram Group
obj = Apprise.instantiate(
'tgram://123456789:ABCdefghijkl123456789opqyz/-123456789525')
assert isinstance(obj, plugins.NotifyTelegram)
assert len(obj.targets) == 1
assert '-123456789525' in obj.targets
| 15,210
|
def micro_jaccard(y_true, y_pred):
"""
Calculate the micro Jaccard-score, i.e. TP / (TP + FP + FN).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The micro Jaccard-score.
"""
return jaccard_score(y_true, y_pred, average='micro')
| 15,211
|
def round_to_nreads(number_set, n_reads, digit_after_decimal=0):
"""
This function take a list of number and return a list of percentage, which represents the portion of each number in sum of all numbers
Moreover, those percentages are adding up to 100%!!!
Notice: the algorithm we are using here is 'Largest Remainder'
The down-side is that the results won't be accurate, but they are never accurate anyway:)
"""
unround_numbers = [
x / float(sum(number_set)) * n_reads * 10 ** digit_after_decimal
for x in number_set
]
decimal_part_with_index = sorted(
[(index, unround_numbers[index] % 1) for index in range(len(unround_numbers))],
key=lambda y: y[1],
reverse=True,
)
remainder = n_reads * 10 ** digit_after_decimal - sum(
[int(x) for x in unround_numbers]
)
index = 0
while remainder > 0:
unround_numbers[decimal_part_with_index[index][0]] += 1
remainder -= 1
index = (index + 1) % len(number_set)
return [int(x) / float(10 ** digit_after_decimal) for x in unround_numbers]
| 15,212
|
def _get_raster_extent(src):
"""
extract projected extent from a raster dataset
(min_x, max_x, min_y, max_y)
Parameters
----------
src : gdal raster
Returns
-------
(min_x, max_x, min_y, max_y)
"""
ulx, xres, xskew, uly, yskew, yres = src.GetGeoTransform()
lrx = ulx + (src.RasterXSize * xres)
lry = uly + (src.RasterYSize * yres)
return ulx, lrx, lry, uly
| 15,213
|
def joined_table_table_join_args(joined_table: SQLParser.JoinedTableContext) -> dict:
"""
Resolve a joinedTable ParseTree node into relevant keyword arguments for TableJoin.
These will be pushed down and applied to the child TableRef.
"""
assert isinstance(joined_table, SQLParser.JoinedTableContext)
on_clauses = None
if joined_table.expr() is not None:
on_clauses = sql_ast_clauses_from_expr(joined_table.expr())
using_columns = None
if joined_table.identifierListWithParentheses() is not None:
using_columns = sql_ast_identifiers_from_list(
joined_table.identifierListWithParentheses().identifierList()
)
return {
"on_clauses": on_clauses,
"using_columns": using_columns,
**join_type_table_join_args(joined_table),
}
| 15,214
|
def find_by_name(name):
"""
Find and return a format by name.
:param name: A string describing the name of the format.
"""
for format in FORMATS:
if name == format.name:
return format
raise UnknownFormat('No format found with name "%s"' % name)
| 15,215
|
def test_restart(throttle_obj, profiler):
"""
Tests the behavior of a single Throttle instance iterating during two
periods of time separate separated by a short sleep. After this break,
the Throttle is restarted to check that the behavior is the same in
the two periods.
"""
ticks = -1
with profiler:
for i in throttle_obj.loop(max_ticks=profiler.iter_count):
ticks += 1
assert ticks == i
assert_profiler_results(profiler, throttle_obj)
assert i == profiler.iter_count - 1
time.sleep(1)
throttle_obj.restart()
ticks = -1
with profiler:
for i in throttle_obj.loop(max_ticks=profiler.iter_count):
ticks += 1
assert ticks == i
assert_profiler_results(profiler, throttle_obj)
assert i == profiler.iter_count - 1
| 15,216
|
def test_endpoints(host, port, use_ssl, endpoints):
"""
Test each endpoint with its associated method and compile lists of endpoints that
can and cannot be accessed without prior authentication
"""
conn = get_conn(host, port, use_ssl)
if not conn:
sys.exit("Failed to connect to host {}, port {}".format(host, port))
headers = {"Content-type": "application/json"}
results = []
for entry in endpoints:
method, endpoint = entry
try_endpoint = endpoint
if ":" in endpoint:
try_endpoint = re.sub(r":[a-zA-Z]+", "1", endpoint)
try_endpoints = []
if "(s)?" in try_endpoint:
try_endpoints.append(try_endpoint.replace("(s)?","s"))
try_endpoints.append(try_endpoint.replace("(s)?",""))
else:
try_endpoints = [try_endpoint]
for try_endpoint in try_endpoints:
status, reason, body = test_endpoint(conn, headers, method, try_endpoint)
results.append({
"status":status,
"reason":reason,
"body":body,
"method":method,
"endpoint":endpoint,
"actual_endpoint":try_endpoint
})
conn.close()
return results
| 15,217
|
def create_neighborhood_polygons(gdf):
""" an attempt to muild neighborhoods polygons from asset points"""
import numpy as np
gdf = gdf.reset_index()
neis = gdf['Neighborhood'].unique()
gdf['neighborhood_shape'] = gdf.geometry
# Must be a geodataframe:
for nei in neis:
gdf1 = gdf[gdf['Neighborhood'] == nei]
inds = gdf1.index
polygon = gdf1.geometry.unary_union.convex_hull
# gdf.loc[inds, 'neighborhood_shape'] = [polygon for x in range(len(inds))]
gdf.loc[inds, 'neighborhood_shape'] = polygon
return gdf
| 15,218
|
def custom_gradient(f=None):
"""Decorator to define a function with a custom gradient.
This decorator allows fine grained control over the gradients of a sequence
for operations. This may be useful for multiple reasons, including providing
a more efficient or numerically stable gradient for a sequence of operations.
For example, consider the following function that commonly occurs in the
computation of cross entropy and log likelihoods:
```python
def log1pexp(x):
return tf.math.log(1 + tf.exp(x))
```
Due to numerical instability, the gradient of this function evaluated at x=100
is NaN. For example:
```python
x = tf.constant(100.)
y = log1pexp(x)
dy = tf.gradients(y, x) # Will be NaN when evaluated.
```
The gradient expression can be analytically simplified to provide numerical
stability:
```python
@tf.custom_gradient
def log1pexp(x):
e = tf.exp(x)
def grad(dy):
return dy * (1 - 1 / (1 + e))
return tf.math.log(1 + e), grad
```
With this definition, the gradient at x=100 will be correctly evaluated as
1.0.
Nesting custom gradients can lead to unintuitive results. The default
behavior does not correspond to n-th order derivatives. For example
```python
@tf.custom_gradient
def op(x):
y = op1(x)
@tf.custom_gradient
def grad_fn(dy):
gdy = op2(x, y, dy)
def grad_grad_fn(ddy): # Not the 2nd order gradient of op w.r.t. x.
return op3(x, y, dy, ddy)
return gdy, grad_grad_fn
return y, grad_fn
```
The function `grad_grad_fn` will be calculating the first order gradient
of `grad_fn` with respect to `dy`, which is used to generate forward-mode
gradient graphs from backward-mode gradient graphs, but is not the same as
the second order gradient of `op` with respect to `x`.
Instead, wrap nested `@tf.custom_gradients` in another function:
```python
@tf.custom_gradient
def op_with_fused_backprop(x):
y, x_grad = fused_op(x)
def first_order_gradient(dy):
@tf.custom_gradient
def first_order_custom(unused_x):
def second_order_and_transpose(ddy):
return second_order_for_x(...), gradient_wrt_dy(...)
return x_grad, second_order_and_transpose
return dy * first_order_custom(x)
return y, first_order_gradient
```
Additional arguments to the inner `@tf.custom_gradient`-decorated function
control the expected return values of the innermost function.
See also `tf.RegisterGradient` which registers a gradient function for a
primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows
for fine grained control over the gradient computation of a sequence of
operations.
Note that if the decorated function uses `Variable`s, the enclosing variable
scope must be using `ResourceVariable`s.
Args:
f: function `f(*x)` that returns a tuple `(y, grad_fn)` where:
- `x` is a sequence of (nested structures of) `Tensor` inputs to the
function.
- `y` is a (nested structure of) `Tensor` outputs of applying TensorFlow
operations in `f` to `x`.
- `grad_fn` is a function with the signature `g(*grad_ys)` which returns
a list of `Tensor`s the same size as (flattened) `x` - the derivatives
of `Tensor`s in `y` with respect to the `Tensor`s in `x`. `grad_ys` is
a sequence of `Tensor`s the same size as (flattened) `y` holding the
initial value gradients for each `Tensor` in `y`.
In a pure mathematical sense, a vector-argument vector-valued function
`f`'s derivatives should be its Jacobian matrix `J`. Here we are
expressing the Jacobian `J` as a function `grad_fn` which defines how
`J` will transform a vector `grad_ys` when left-multiplied with it
(`grad_ys * J`, the vector-Jacobian product, or VJP). This functional
representation of a matrix is convenient to use for chain-rule
calculation (in e.g. the back-propagation algorithm).
If `f` uses `Variable`s (that are not part of the
inputs), i.e. through `get_variable`, then `grad_fn` should have
signature `g(*grad_ys, variables=None)`, where `variables` is a list of
the `Variable`s, and return a 2-tuple `(grad_xs, grad_vars)`, where
`grad_xs` is the same as above, and `grad_vars` is a `list<Tensor>`
with the derivatives of `Tensor`s in `y` with respect to the variables
(that is, grad_vars has one Tensor per variable in variables).
Returns:
A function `h(x)` which returns the same value as `f(x)[0]` and whose
gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`.
"""
if f is None:
return lambda f: custom_gradient(f=f)
@Bind.decorator
def decorated(wrapped, args, kwargs):
"""Decorated function with custom gradient."""
# raise ValueError("PW: trap")
if context.executing_eagerly():
return _eager_mode_decorator(wrapped, args, kwargs)
else:
return _graph_mode_decorator(wrapped, args, kwargs)
return tf_decorator.make_decorator(f, decorated(f)) # pylint: disable=no-value-for-parameter
| 15,219
|
def Field(name,
ctype,
field_loader=FieldLoaderMethod.OPTIONAL,
comment=None,
gen_setters_and_getters=True):
"""Make a field to put in a node class.
Args:
name: field name
ctype: c++ type for this field
Should be a ScalarType like an int, string or enum type,
or the name of a node class type (e.g. ASTExpression).
Cannot be a pointer type, and should not include modifiers like
const.
field_loader: FieldLoaderMethod enum specifies which FieldLoader method
to use for this field.
comment: Comment text for this field. Text will be stripped and
de-indented.
gen_setters_and_getters: When False, suppress generation of default
template-based get and set methods. Non-standard alternatives
may be supplied via extra_defs.
Returns:
The newly created field.
Raises:
RuntimeError: If an error is detected in one or more arguments.
"""
if field_loader == FieldLoaderMethod.REST_AS_REPEATED:
is_vector = True
else:
is_vector = False
member_name = name + '_'
if isinstance(ctype, ScalarType):
member_type = ctype.ctype
cpp_default = ctype.cpp_default
is_node_ptr = False
enum_name = None
element_storage_type = None
else:
element_storage_type = 'const %s*' % ctype
if is_vector:
member_type = 'absl::Span<%s const>' % element_storage_type
cpp_default = ''
is_node_ptr = False
enum_name = None
else:
member_type = 'const %s*' % ctype
cpp_default = 'nullptr'
is_node_ptr = True
enum_name = NameToEnumName(ctype)
return {
'ctype': ctype,
'cpp_default': cpp_default,
'member_name': member_name, # member variable name
'name': name, # name without trailing underscore
'comment': CleanComment(comment, prefix=' // '),
'member_type': member_type,
'is_node_ptr': is_node_ptr,
'field_loader': field_loader.name,
'enum_name': enum_name,
'is_vector': is_vector,
'element_storage_type': element_storage_type,
'gen_setters_and_getters': gen_setters_and_getters,
}
| 15,220
|
def sql(dataframe: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, List[str], str]:
"""Infer best fit data types using dataframe values. May be an object converted to a better type,
or numeric values downcasted to a smallter data type.
Parameters
----------
dataframe (pandas.DataFrame) : contains unconverted and non-downcasted columns
Returns
-------
dataframe (pandas.DataFrame) : contains columns converted to best fit pandas data type
schema (pandas.DataFrame) : derived SQL schema
not_nullable (list[str]) : columns that should not be null
pk (str) : name of column that best fits as the primary key
"""
# numeric like: bit, tinyint, smallint, int, bigint, float
dataframe = convert_numeric(dataframe)
# datetime like: time, date, datetime2
dataframe = convert_date(dataframe)
# string like: varchar, nvarchar
dataframe = convert_string(dataframe)
# determine SQL properties
schema = sql_schema(dataframe)
not_nullable, pk = sql_unique(dataframe, schema)
return dataframe, schema, not_nullable, pk
| 15,221
|
def sync_cmdb(api):
"""
将ebs信息入库
:return:
"""
ebs_list = api.main()
with DBContext('w') as session:
# 清除数据库数据
try:
session.query(DB).delete()
session.commit()
except:
session.rollback()
# 写入新数据
for rds in ebs_list:
ins_log.read_log('info', 'ebs信息:{}'.format(rds))
new_db = DB(Attachments=rds.get('Attachments'),
AvailabilityZone=rds.get('AvailabilityZone', ),
CreateTime=rds.get('CreateTime'),
Encrypted=rds.get('Encrypted'),
Size=rds.get('Size'),
SnapshotId=rds.get('SnapshotId'),
State=rds.get('State'),
VolumeId=rds.get('VolumeId'),
Iops=rds.get('Iops'),
VolumeType=rds.get('VolumeType'),
Snapshot_overtime=rds.get('Snapshot_overtime'),
update_time=rds.get('update_time'),
)
session.add(new_db)
session.commit()
ins_log.read_log('info', 'ebs写入数据库共{}条'.format(len(ebs_list)))
| 15,222
|
def calculateDeviation(img, lineLeft,lineRight, ):
"""This function calculates
the deviation of the vehicle from the center of the
image
"""
frameCenter = np.mean([lineLeft.bestx,lineRight.bestx] , dtype=np.int32)
imgCenter = img.shape[1]//2
dev = frameCenter - imgCenter
xm_per_pix = 3.7/450 # meters per pixel in x dimension
result = dev*xm_per_pix
# Moving average deviation (Not needed as applied to bestx)
#x = np.append(lineLeft.center_deviation, [dev])
#result = moving_average(x, movingAvg)[-1]
#lineLeft.center_deviation = np.append(lineLeft.center_deviation, result)
if dev > 0.01:
text = "Vehicle is {:.2f} m -->".format(abs(result))
elif dev < -0.01:
text = "Vehicle is {:.2f} m <--".format(abs(result))
else:
text = "Vehicle is spot on center!"
return result , text
| 15,223
|
def post_new_tracker_story(message, project_id, user):
"""Posts message contents as a story to the bound project."""
if ";" in message:
name, description = message.split(";", maxsplit=1)
else:
name, description = (message, "")
story_name = "{name} (from {user})".format(
name=name.strip(), user=user)
response = requests.post(
story_post_url.format(project_id=project_id),
headers=pivotal_headers,
json={"name": story_name,
"description": description.strip()})
story_url = response.json()["url"]
return name, story_url
| 15,224
|
def nz2epsmu(N, Z):#{{{
""" Accepts index of refraction and impedance, returns effective permittivity and permeability"""
return N/Z, N*Z
| 15,225
|
def logdet_symm(m, check_symm=False):
"""
Return log(det(m)) asserting positive definiteness of m.
Parameters
----------
m : array-like
2d array that is positive-definite (and symmetric)
Returns
-------
logdet : float
The log-determinant of m.
"""
if check_symm:
if not np.all(m == m.T): # would be nice to short-circuit check
raise ValueError("m is not symmetric.")
c, _ = scipy.linalg.cho_factor(m, lower=True)
return 2 * np.sum(np.log(c.diagonal()))
| 15,226
|
def merge(left, right):
""" Merge helper
Complexity: O(n)
"""
arr = []
left_cursor, right_cursor = 0, 0
while left_cursor < len(left) and right_cursor < len(right):
# Sort each one and place into the result
if left[left_cursor] <= right[right_cursor]:
arr.append(left[left_cursor])
left_cursor += 1
else:
arr.append(right[right_cursor])
right_cursor += 1
# Add the left overs if there's any left to the result
for i in range(left_cursor, len(left)):
arr.append(left[i])
for i in range(right_cursor, len(right)):
arr.append(right[i])
# Return result
return arr
| 15,227
|
def validate_form_data(FORM_Class):
"""
Validates the passed form/json data to a request and passes the
form to the called function.
If form data is not valid, return a 406 response.
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
form = FORM_Class(csrf_enabled=False)
if not form.validate():
return json_error(code=406, data=form.errors)
kwargs['form'] = form
return f(*args, **kwargs)
return decorated_function
return decorator
| 15,228
|
def set_axis_tick_format(
ax, xtickformat=None, ytickformat=None, xrotation=0, yrotation=0
):
"""Sets the formats for the ticks of a single axis
:param ax: axis object
:param xtickformat: optional string for the format of the x ticks
:param ytickformat: optional string for the format of the y ticks
:param xrotation: rotation angle of the x ticks. Defaults to 0
:param yrotation: rotation angle of the y ticks. Defaults to 0
:returns: ax
"""
if xtickformat is not None:
ax.xaxis.set_major_formatter(FormatStrFormatter(xtickformat))
if ytickformat is not None:
ax.yaxis.set_major_formatter(FormatStrFormatter(ytickformat))
plt.setp(ax.get_xticklabels(), ha="right", rotation=xrotation)
plt.setp(ax.get_yticklabels(), ha="right", rotation=yrotation)
return ax
| 15,229
|
def two_body(y, t):
"""
Solves the two body problem
:param y: state vector
y = [rx,ry,rz,vx,vy,vz]
:param t: time
:return: dy
"""
rx, ry, rz = y[0], y[1], y[2]
vx, vy, vz = y[3], y[4], y[5]
r = np.array([rx, ry, rz])
v = np.array([vx, vy, vz])
r_mag = np.linalg.norm(r)
c = -mu / (r_mag ** 3)
dy = np.zeros(6)
dy[0] = y[3]
dy[1] = y[4]
dy[2] = y[5]
dy[3] = c*y[0]
dy[4] = c*y[1]
dy[5] = c*y[2]
return dy
| 15,230
|
def args_parse_params(params):
""" create simple arg parser with default values (input, output)
:param dict dict_params:
:return obj: object argparse<...>
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--path_in',
type=str,
required=True,
default=params['path_in'],
help='path to the folder with input image dataset'
)
parser.add_argument(
'-o',
'--path_out',
type=str,
required=True,
default=params['path_out'],
help='path to the output with experiment results'
)
parser.add_argument(
'-t', '--threshold', type=float, required=False, default=0.001, help='threshold for image information'
)
parser.add_argument(
'-m', '--thr_method', type=str, required=False, default='', choices=METHODS, help='used methods'
)
parser.add_argument(
'--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of parallel processes'
)
args = vars(parser.parse_args())
for k in (k for k in args if k.startswith('path_')):
p = update_path(os.path.dirname(args[k]))
assert os.path.exists(p), 'missing (%s): %s' % (k, p)
args[k] = os.path.join(p, os.path.basename(args[k]))
return args
| 15,231
|
def DD_carrier_sync(z,M,BnTs,zeta=0.707,type=0):
"""
z_prime,a_hat,e_phi = DD_carrier_sync(z,M,BnTs,zeta=0.707,type=0)
Decision directed carrier phase tracking
z = complex baseband PSK signal at one sample per symbol
M = The PSK modulation order, i.e., 2, 8, or 8.
BnTs = time bandwidth product of loop bandwidth and the symbol period,
thus the loop bandwidth as a fraction of the symbol rate.
zeta = loop damping factor
type = Phase error detector type: 0 <> ML, 1 <> heuristic
z_prime = phase rotation output (like soft symbol values)
a_hat = the hard decision symbol values landing at the constellation
values
e_phi = the phase error e(k) into the loop filter
Ns = Nominal number of samples per symbol (Ts/T) in the carrier
phase tracking loop, almost always 1
Kp = The phase detector gain in the carrier phase tracking loop;
This value depends upon the algorithm type. For the ML scheme
described at the end of notes Chapter 9, A = 1, K 1/sqrt(2),
so Kp = sqrt(2).
Mark Wickert July 2014
Motivated by code found in M. Rice, Digital Communications A Discrete-Time
Approach, Prentice Hall, New Jersey, 2009. (ISBN 978-0-13-030497-1).
"""
Ns = 1
Kp = np.sqrt(2.) # for type 0
z_prime = np.zeros_like(z)
a_hat = np.zeros_like(z)
e_phi = np.zeros(len(z))
theta_h = np.zeros(len(z))
theta_hat = 0
# Tracking loop constants
K0 = 1;
K1 = 4*zeta/(zeta + 1/(4*zeta))*BnTs/Ns/Kp/K0;
K2 = 4/(zeta + 1/(4*zeta))**2*(BnTs/Ns)**2/Kp/K0;
# Initial condition
vi = 0
for nn in range(len(z)):
# Multiply by the phase estimate exp(-j*theta_hat[n])
z_prime[nn] = z[nn]*np.exp(-1j*theta_hat)
if M == 2:
a_hat[nn] = np.sign(z_prime[nn].real) + 1j*0
elif M == 4:
a_hat[nn] = np.sign(z_prime[nn].real) + 1j*np.sign(z_prime[nn].imag)
elif M == 8:
a_hat[nn] = np.angle(z_prime[nn])/(2*np.pi/8.)
# round to the nearest integer and fold to nonnegative
# integers; detection into M-levels with thresholds at mid points.
a_hat[nn] = np.mod(round(a_hat[nn]),8)
a_hat[nn] = np.exp(1j*2*np.pi*a_hat[nn]/8)
else:
raise ValueError('M must be 2, 4, or 8')
if type == 0:
# Maximum likelihood (ML)
e_phi[nn] = z_prime[nn].imag * a_hat[nn].real - \
z_prime[nn].real * a_hat[nn].imag
elif type == 1:
# Heuristic
e_phi[nn] = np.angle(z_prime[nn]) - np.angle(a_hat[nn])
else:
raise ValueError('Type must be 0 or 1')
vp = K1*e_phi[nn] # proportional component of loop filter
vi = vi + K2*e_phi[nn] # integrator component of loop filter
v = vp + vi # loop filter output
theta_hat = np.mod(theta_hat + v,2*np.pi)
theta_h[nn] = theta_hat # phase track output array
#theta_hat = 0 # for open-loop testing
# Normalize outputs to have QPSK points at (+/-)1 + j(+/-)1
#if M == 4:
# z_prime = z_prime*np.sqrt(2)
return z_prime, a_hat, e_phi, theta_h
| 15,232
|
def rubrik_gps_vm_snapshot_create(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Trigger an on-demand vm snapshot.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
object_id = validate_required_arg("object_id", args.get("object_id", ""))
sla_domain_id = args.get("sla_domain_id", "")
raw_response = client.create_vm_snapshot(object_id, sla_domain_id)
outputs = raw_response.get("data", {}).get("vsphereOnDemandSnapshot", {})
outputs = remove_empty_elements(outputs)
if not outputs or not outputs.get("id"):
return CommandResults(readable_output=MESSAGES['NO_RESPONSE'])
hr_content = {
"On-Demand Snapshot Request ID": outputs.get("id"),
"Status": outputs.get("status")
}
hr = tableToMarkdown("GPS VM Snapshot", hr_content, headers=["On-Demand Snapshot Request ID", "Status"],
removeNull=True)
return CommandResults(outputs_prefix=OUTPUT_PREFIX["GPS_SNAPSHOT_CREATE"],
outputs_key_field="id",
outputs=outputs,
raw_response=raw_response,
readable_output=hr)
| 15,233
|
def getFullCorpus(emotion, speakerID = None):
"""
Return the 6 speakers files in a massive vstack
:param emotion:
:param speakerID:
:return:
"""
if emotion not in emotions or (speakerID is not None and speakerID not in speakers):
raise Exception("No Such speaker: {} or emotion: {}".format(speakerID, emotion))
#error check
if speakerID is None:
#return whole corpus
speakerID = "Derpington"
# should not be in file
MFCCFiles = os.listdir(ExtractedMFCCs)
try:
MFCCFiles.remove('.DS_Store')
except ValueError:
pass
# It didn't need to be removed
MFCCVals = []
for file in MFCCFiles:
if emotion in file and speakerID not in file:
# print "Currently reading", file
with open(os.path.join(ExtractedMFCCs, file)) as f:
speakerEmotion = cPickle.load(f)
speakerEmotion = np.vstack(speakerEmotion)
MFCCVals.append(speakerEmotion)
return np.vstack(MFCCVals)
| 15,234
|
def list_public(config):
"""List public datasets."""
data = [
["ID", "STATUS", "NAME", "SIZE"],
["-" * 80, "-" * 80, "-" * 80, "-" * 80],
]
datasets = config.trainml.run(config.trainml.client.datasets.list_public())
for dset in datasets:
data.append(
[
dset.id,
dset.status,
dset.name,
pretty_size(dset.size),
]
)
for row in data:
click.echo(
"{: >38.36} {: >13.11} {: >10.8} {: >40.38} {: >14.12}"
"".format(*row),
file=config.stdout,
)
| 15,235
|
def CT_freezing_first_derivatives(SA, p, saturation_fraction):
"""
Calculates the first derivatives of the Conservative Temperature at
which seawater freezes, with respect to Absolute Salinity SA and
pressure P (in Pa).
Parameters
----------
SA : array-like
Absolute Salinity, g/kg
p : array-like
Sea pressure (absolute pressure minus 10.1325 dbar), dbar
saturation_fraction : array-like
Saturation fraction of dissolved air in seawater. (0..1)
Returns
-------
CTfreezing_SA : array-like, K kg/g
the derivative of the Conservative Temperature at
freezing (ITS-90) with respect to Absolute Salinity at
fixed pressure [ K/(g/kg) ] i.e.
CTfreezing_P : array-like, K/Pa
the derivative of the Conservative Temperature at
freezing (ITS-90) with respect to pressure (in Pa) at
fixed Absolute Salinity
"""
return _gsw_ufuncs.ct_freezing_first_derivatives(SA, p, saturation_fraction)
| 15,236
|
def run(gParameters):
"""
Runs the model using the specified set of parameters
Args:
gParameters: a python dictionary containing the parameters (e.g. epoch)
to run the model with.
"""
#
if 'dense' in gParameters:
dval = gParameters['dense']
if type(dval) != list:
res = list(dval)
# try:
# is_str = isinstance(dval, basestring)
# except NameError:
# is_str = isinstance(dval, str)
# if is_str:
# res = str2lst(dval)
gParameters['dense'] = res
print(gParameters['dense'])
if 'conv' in gParameters:
flat = gParameters['conv']
gParameters['conv'] = [flat[i:i + 3] for i in range(0, len(flat), 3)]
print('Conv input', gParameters['conv'])
# print('Params:', gParameters)
# Construct extension to save model
ext = benchmark.extension_from_parameters(gParameters, '.keras')
logfile = gParameters['logfile'] if gParameters['logfile'] else gParameters['output_dir'] + ext + '.log'
fh = logging.FileHandler(logfile)
fh.setFormatter(logging.Formatter("[%(asctime)s %(process)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S"))
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(''))
sh.setLevel(logging.DEBUG if gParameters['verbose'] else logging.INFO)
benchmark.logger.setLevel(logging.DEBUG)
benchmark.logger.addHandler(fh)
benchmark.logger.addHandler(sh)
benchmark.logger.info('Params: {}'.format(gParameters))
# Get default parameters for initialization and optimizer functions
kerasDefaults = candle.keras_default_config()
seed = gParameters['rng_seed']
# Build dataset loader object
loader = benchmark.DataLoader(seed=seed, dtype=gParameters['data_type'],
val_split=gParameters['val_split'],
test_cell_split=gParameters['test_cell_split'],
cell_features=gParameters['cell_features'],
drug_features=gParameters['drug_features'],
feature_subsample=gParameters['feature_subsample'],
scaling=gParameters['scaling'],
scramble=gParameters['scramble'],
min_logconc=gParameters['min_logconc'],
max_logconc=gParameters['max_logconc'],
subsample=gParameters['subsample'],
category_cutoffs=gParameters['category_cutoffs'])
# Initialize weights and learning rule
initializer_weights = candle.build_initializer(gParameters['initialization'], kerasDefaults, seed)
initializer_bias = candle.build_initializer('constant', kerasDefaults, 0.)
# Define model architecture
gen_shape = None
out_dim = 1
model = Sequential()
if 'dense' in gParameters: # Build dense layers
for layer in gParameters['dense']:
if layer:
model.add(Dense(layer, input_dim=loader.input_dim,
kernel_initializer=initializer_weights,
bias_initializer=initializer_bias))
if gParameters['batch_normalization']:
model.add(BatchNormalization())
model.add(Activation(gParameters['activation']))
if gParameters['dropout']:
model.add(Dropout(gParameters['dropout']))
else: # Build convolutional layers
gen_shape = 'add_1d'
layer_list = list(range(0, len(gParameters['conv'])))
lc_flag = False
if 'locally_connected' in gParameters:
lc_flag = True
for _, i in enumerate(layer_list):
if i == 0:
add_conv_layer(model, gParameters['conv'][i], input_dim=loader.input_dim, locally_connected=lc_flag)
else:
add_conv_layer(model, gParameters['conv'][i], locally_connected=lc_flag)
if gParameters['batch_normalization']:
model.add(BatchNormalization())
model.add(Activation(gParameters['activation']))
if gParameters['pool']:
model.add(MaxPooling1D(pool_size=gParameters['pool']))
model.add(Flatten())
model.add(Dense(out_dim))
# Define optimizer
optimizer = candle.build_optimizer(gParameters['optimizer'],
gParameters['learning_rate'],
kerasDefaults)
# Compile and display model
model.compile(loss=gParameters['loss'], optimizer=optimizer)
model.summary()
benchmark.logger.debug('Model: {}'.format(model.to_json()))
train_gen = benchmark.DataGenerator(loader, batch_size=gParameters['batch_size'], shape=gen_shape, name='train_gen', cell_noise_sigma=gParameters['cell_noise_sigma']).flow()
val_gen = benchmark.DataGenerator(loader, partition='val', batch_size=gParameters['batch_size'], shape=gen_shape, name='val_gen').flow()
val_gen2 = benchmark.DataGenerator(loader, partition='val', batch_size=gParameters['batch_size'], shape=gen_shape, name='val_gen2').flow()
test_gen = benchmark.DataGenerator(loader, partition='test', batch_size=gParameters['batch_size'], shape=gen_shape, name='test_gen').flow()
train_steps = int(loader.n_train / gParameters['batch_size'])
val_steps = int(loader.n_val / gParameters['batch_size'])
test_steps = int(loader.n_test / gParameters['batch_size'])
if 'train_steps' in gParameters:
train_steps = gParameters['train_steps']
if 'val_steps' in gParameters:
val_steps = gParameters['val_steps']
if 'test_steps' in gParameters:
test_steps = gParameters['test_steps']
checkpointer = ModelCheckpoint(filepath=gParameters['output_dir'] + '.model' + ext + '.h5', save_best_only=True)
progbar = MyProgbarLogger(train_steps * gParameters['batch_size'])
loss_history = MyLossHistory(progbar=progbar, val_gen=val_gen2, test_gen=test_gen,
val_steps=val_steps, test_steps=test_steps,
metric=gParameters['loss'], category_cutoffs=gParameters['category_cutoffs'],
ext=ext, pre=gParameters['output_dir'])
# Seed random generator for training
np.random.seed(seed)
candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters)
# history = model.fit(train_gen, steps_per_epoch=train_steps, # this should be the deprecation fix
history = model.fit(train_gen, steps_per_epoch=train_steps,
epochs=gParameters['epochs'],
validation_data=val_gen,
validation_steps=val_steps,
verbose=0,
callbacks=[checkpointer, loss_history, progbar, candleRemoteMonitor],
)
# callbacks=[checkpointer, loss_history, candleRemoteMonitor], # this just caused the job to hang on Biowulf
benchmark.logger.removeHandler(fh)
benchmark.logger.removeHandler(sh)
return history
| 15,237
|
def get_territory_center(territory: inkex.Group) -> inkex.Vector2d:
"""
Get the name of the territory from its child title element. If no title, returns
Warzone.UNNAMED_TERRITORY_NAME
:param territory:
:return:
territory name
"""
center_rectangle: inkex.Rectangle = territory.find(f"./{Svg.GROUP}/{Svg.RECTANGLE}", NSS)
return inkex.Vector2d(
center_rectangle.left + center_rectangle.rx / 2,
center_rectangle.top + center_rectangle.ry / 2
)
| 15,238
|
def batch_norm_relu(inputs, is_training, data_format):
"""Performs a batch normalization followed by a ReLU."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
inputs = tf.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else -1,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=is_training, fused=True)
# unary = {"1":lambda x:x ,"2":lambda x: -x, "3":lambda x:tf.abs, "4":lambda x : tf.pow(x,2),"5":lambda x : tf.pow(x,3),
# "6":lambda x:tf.sqrt,"7":lambda x: tf.Variable(tf.truncated_normal([1], stddev=0.08))*x,
# "8":lambda x : x + tf.Variable(tf.truncated_normal([1], stddev=0.08)),"9":lambda x: tf.log(tf.abs(x)+10e-8),
# "10":lambda x:tf.exp,"11":lambda x:tf.sin,"12":lambda x:tf.sinh,"13":lambda x:tf.cosh,"14":lambda x:tf.tanh,"15":lambda x:tf.asinh,"16":lambda x:tf.atan,"17":lambda x: tf.sin(x)/x,
# "18":lambda x : tf.maximum(x,0),"19":lambda x : tf.minimum(x,0),"20":tf.sigmoid,"21":lambda x:tf.log(1+tf.exp(x)),
# "22":lambda x:tf.exp(-tf.pow(x,2)),"23":lambda x:tf.erf,"24":lambda x: tf.Variable(tf.truncated_normal([1], stddev=0.08))}
# binary = {"1":lambda x,y: tf.add(x,y),"2":lambda x,y:tf.multiply(x,y),"3":lambda x,y:tf.add(x,-y),"4":lambda x,y:x/(y+10e-8),
# "5":lambda x,y:tf.maximum(x,y),"6":lambda x,y: tf.sigmoid(x)*y,"7":lambda x,y:tf.exp(-tf.Variable(tf.truncated_normal([1], stddev=0.08))*tf.pow(x-y,2)),
# "8":lambda x,y:tf.exp(-tf.Variable(tf.truncated_normal([1], stddev=0.08))*tf.abs(x-y)),
# "9":lambda x,y: tf.Variable(tf.truncated_normal([1], stddev=0.08))*x + (1-tf.Variable(tf.truncated_normal([1], stddev=0.08)))*y}
unary = {"1":lambda x:x ,"2":lambda x: -x, "3": lambda x: tf.maximum(x,0), "4":lambda x : tf.pow(x,2),"5":lambda x : tf.tanh(tf.cast(x,tf.float32))}
binary = {"1":lambda x,y: tf.add(x,y),"2":lambda x,y:tf.multiply(x,y),"3":lambda x,y:tf.add(x,-y),"4":lambda x,y:tf.maximum(x,y),"5":lambda x,y: tf.sigmoid(x)*y}
input_fun = {"1":lambda x:tf.cast(x,tf.float32) , "2":lambda x:tf.zeros(tf.shape(x)), "3": lambda x:2*tf.ones(tf.shape(x)),"4": lambda x : tf.ones(tf.shape(x)), "5": lambda x: -tf.ones(tf.shape(x))}
with open("tmp","r") as f:
activation = f.readline()
activation = activation.split(" ")
#inputs = binary[activation[8]](unary[activation[5]](binary[activation[4]](unary[activation[2]](input_fun[activation[0]](inputs)),unary[activation[3]](input_fun[activation[1]](inputs)))),unary[activation[7]](input_fun[activation[6]](inputs)))
inputs = binary[activation[5]](unary[activation[3]](binary[activation[2]](unary[activation[0]](inputs),unary[activation[1]]((inputs)))),unary[activation[4]]((inputs)))
#inputs = binary[activation[4]]((unary[activation[2]](input_fun[activation[0]](inputs))),(unary[activation[3]](input_fun[activation[1]](inputs)))) #b[4](u1[2](x1[0]),u2[3](x2[1])) #core unit
#inputs = binary[activation[2]]((unary[activation[0]](inputs)),(unary[activation[1]](inputs))) #b[2](u1[0](x),u2[1](x)) #core unit
#inputs = tf.nn.relu(inputs)
functions = open("./functions.txt", "a")
functions.write(str(inputs) + "\n")
return inputs
| 15,239
|
def file_opener(
fname: str,
cache: Optional[CacheFSSpecTarget] = None,
copy_to_local: bool = False,
bypass_open: bool = False,
secrets: Optional[dict] = None,
**open_kwargs,
) -> Iterator[Union[OpenFileType, str]]:
"""
Context manager for opening files.
:param fname: The filename / url to open. Fsspec will inspect the protocol
(e.g. http, ftp) and determine the appropriate filesystem type to use.
:param cache: A target where the file may have been cached. If none, the file
will be opened directly.
:param copy_to_local: If True, always copy the file to a local temporary file
before opening. In this case, function yields a path name rather than an open file.
:param bypass_open: If True, skip trying to open the file at all and just
return the filename back directly. (A fancy way of doing nothing!)
"""
if bypass_open:
if cache or copy_to_local:
raise ValueError("Can't bypass open with cache or copy_to_local.")
logger.debug(f"Bypassing open for '{fname}'")
yield fname
return
if cache is not None:
logger.info(f"Opening '{fname}' from cache")
opener = cache.open(fname, mode="rb")
else:
logger.info(f"Opening '{fname}' directly.")
opener = _get_opener(fname, secrets, **open_kwargs)
if copy_to_local:
_, suffix = os.path.splitext(fname)
ntf = tempfile.NamedTemporaryFile(suffix=suffix)
tmp_name = ntf.name
logger.info(f"Copying '{fname}' to local file '{tmp_name}'")
target_opener = open(tmp_name, mode="wb")
_copy_btw_filesystems(opener, target_opener)
yield tmp_name
ntf.close() # cleans up the temporary file
else:
logger.debug(f"file_opener entering first context for {opener}")
with opener as fp:
logger.debug(f"file_opener entering second context for {fp}")
yield fp
logger.debug("file_opener yielded")
logger.debug("opener done")
| 15,240
|
def function_handler(event, context):
"""
Shows how to access local resources in an AWS Lambda function.
Gets volume information for the local file system and publishes it.
Writes a file named 'test' and then reads the file and publishes its contents.
"""
iot_client.publish(topic='LRA/test', payload='Sent from AWS IoT Greengrass Core.')
try:
volume_info = os.stat(volume_path)
iot_client.publish(topic='LRA/test', payload=str(volume_info))
with open(volume_path + '/test', 'a') as output:
output.write('Successfully write to a file.\n')
with open(volume_path + '/test', 'r') as file:
data = file.read()
iot_client.publish(topic='LRA/test', payload=data)
except Exception as err:
logging.exception("Got error : %s", err)
| 15,241
|
def create_success_status(found_issue):
"""Create a success status for when an issue number was found in the title."""
issue_number = found_issue.group("issue")
url = f"https://bugs.python.org/issue{issue_number}"
return util.create_status(STATUS_CONTEXT, util.StatusState.SUCCESS,
description=f"Issue number {issue_number} found",
target_url=url)
| 15,242
|
def check(args: 'Namespace'):
"""Check jina config, settings, imports, network etc"""
from jina.checker import ImportChecker
ImportChecker(args)
| 15,243
|
def stop_compose():
"""
停止compose服务
:return:
"""
home_path = os.path.abspath(os.path.join(os.getcwd(), "..")) + '/loonflow_shutongflow'
cmd_str = 'cd {}&&docker-compose stop'.format(home_path)
flag, result = run_cmd(cmd_str)
if flag:
print('-' * 30)
print('停止服务成功')
else:
print('-' * 30)
print('停止失败:{}'.format(result))
| 15,244
|
def score_to_rating_string(score):
"""
Convert score to rating
"""
if score < 1:
rating = "Terrible"
elif score < 2:
rating = "Bad"
elif score < 3:
rating = "OK"
elif score < 4:
rating = "Good"
else:
rating = "Excellent"
return rating
| 15,245
|
def deconv1d_df(t, observed_counts, one_sided_prf, background_count_rate, column_name='deconv', same_time=True,
deconv_func=emcee_deconvolve, **kwargs):
"""
deconvolve and then return results in a pandas.DataFrame
"""
#print("working on chunk with length {}".format(len(observed_counts)))
with util.timewith("deconvolve chunk with {} elements".format(len(observed_counts))) as timer:
results = deconv_func(t, observed_counts, one_sided_prf,
background_count_rate, **kwargs)
sampler, A, t_ret = results[:3]
mean_est = A.mean(axis=0)
percentiles = np.percentile(A, [10, 16, 50, 84, 90], axis=0)
d = {column_name + '_mean': mean_est,
column_name + '_p10': percentiles[0],
column_name + '_p16': percentiles[1],
column_name + '_p50': percentiles[2],
column_name + '_p84': percentiles[3],
column_name + '_p90': percentiles[4]}
df = pd.DataFrame(data=d, index=t_ret)
if same_time:
df = df.ix[t]
return df
| 15,246
|
def create_parser() -> ArgumentParser:
"""Create a parser instance able to parse args of script.
return:
Returns the parser instance
"""
parser = ArgumentParser()
version = get_distribution('hexlet-code').version
parser.add_argument('first_file', help='path to JSON or YAML file')
parser.add_argument('second_file', help='path to JSON or YAML file')
parser.add_argument(
'-f',
'--format',
choices=FORMATS.keys(),
default=DEFAULT_FORMAT,
help='set format of output',
)
parser.add_argument(
'-v',
'--version',
action='version',
version='{prog} {version}'.format(prog=parser.prog, version=version),
help='print version info',
)
return parser
| 15,247
|
def _create_fake_users(usernames):
"""Create fake Users with the listed usernames"""
for user in usernames:
User.objects.create(
username=user[:30],
password='fakepassword',
email=user,
is_active=True,
)
| 15,248
|
def show_mirror(args):
"""
Add port mirror session
"""
session(args.session)
| 15,249
|
def parse_alignment_file(file_path):
"""Parse the buildAlignment.tsv output file from CreateHdpTrainingData
:param file_path: path to alignment file
:return: panda DataFrame with column names "kmer", "strand", "level_mean", "prob"
"""
assert os.path.exists(file_path), "File path does not exist: {}".format(file_path)
data = pd.read_csv(file_path, delimiter="\t",
usecols=(4, 12, 13, 15),
names=["strand", "prob", "level_mean", "kmer"],
dtype={"kmer": np.str, "strand": np.str, "level_mean": np.float64, "prob": np.float64},
header=None)[["kmer", "strand", "level_mean", "prob"]]
return data
| 15,250
|
def remap_key(ctx, origin_key, destination_key, *, mode=None, level=None):
"""Remap *origin_key* to *destination_key*.
Returns an instance of :class:`RemappedKey`.
For valid keys refer to `List of Keys
<https://www.autohotkey.com/docs/KeyList.htm>`_.
The optional keyword-only *mode* and *level* arguments are passed to the
:func:`send` function that will send the *destination_key* when the user
presses the *origin_key*.
For more information refer to `Remapping Keys
<https://www.autohotkey.com/docs/misc/Remap.htm>`_.
"""
mouse = destination_key.lower() in {"lbutton", "rbutton", "mbutton", "xbutton1", "xbutton2"}
if mouse:
def origin_hotkey():
if not is_key_pressed(destination_key):
send("{Blind}{%s DownR}" % destination_key, mode=mode, level=level, mouse_delay=-1)
def origin_up_hotkey():
send("{Blind}{%s Up}" % destination_key, mode=mode, level=level, mouse_delay=-1)
else:
ctrl_to_alt = (
origin_key.lower() in {"ctrl", "lctrl", "rctrl"} and
destination_key.lower() in {"alt", "lalt", "ralt"}
)
if ctrl_to_alt:
def origin_hotkey():
send(
"{Blind}{%s Up}{%s DownR}" % (origin_key, destination_key),
mode=mode,
level=level,
key_delay=-1,
)
else:
def origin_hotkey():
send("{Blind}{%s DownR}" % destination_key, mode=mode, level=level, key_delay=-1)
def origin_up_hotkey():
send("{Blind}{%s Up}" % destination_key, mode=mode, level=level, key_delay=-1)
origin_hotkey = ctx.hotkey(f"*{origin_key}", origin_hotkey)
origin_up_hotkey = ctx.hotkey(f"*{origin_key} Up", origin_up_hotkey)
return RemappedKey(origin_hotkey, origin_up_hotkey)
| 15,251
|
def get_anime_list(wf):
"""Get an Animelist instance.
:param Workflow3 wf: the Workflow3 object
:returns: Animelist object
:rtype: Animelist
"""
try:
animelist = Animelist(
wf.settings['UID'], wf.get_password('bangumi-auth-token')
)
except Exception as e:
raise LogoutException("Please login first")
else:
return animelist
| 15,252
|
def __saveMapping():
"""Save in memory dictinary cache to disc"""
for dbName, d in lemma_mappings.items():
if not d is None:
os.makedirs(CACHE, exist_ok=True)
pickle.dump(d, open(os.path.join(CACHE, dbName), 'wb'))
| 15,253
|
def use_linear_strategy():
"""
Uses a linear function to generate target velocities.
"""
max_velocity = kmph2mps(rospy.get_param("~velocity", 40))
stop_line_buffer = 2.0
def linear_strategy(distances_to_waypoints, current_velocity):
# Target velocity function should be a line
# going from (0, current_velocity)
# to (last_waypoint - buffer, 0)
# (after x-intercept, y = 0)
d = max(distances_to_waypoints[-1] - stop_line_buffer, 0) # stopping distance
v = current_velocity # amount by which to slow down within given distance
# Protect against divide by 0 case
if d < 0.01:
return [0 for x in distances_to_waypoints]
f = lambda x: min(
max(
# [0, d]: downward line:
# y = (-v / d)x + v = (1 - (x/d)) * v
(1. - (x / d)) * v,
# (-inf, 0) && (d, +inf): flat
# y = 0
0
),
# Never faster than maximum
max_velocity
)
return map(f, distances_to_waypoints)
return linear_strategy
| 15,254
|
def recomputation_checkpoint(module: nn.Module):
"""Annotates the output of a module to be checkpointed instead of
recomputed"""
def recompute_outputs(module, inputs, outputs):
return tuple(poptorch.recomputationCheckpoint(y) for y in outputs)
return module.register_forward_hook(recompute_outputs)
| 15,255
|
def get_random_quote() -> str:
"""Retrieve a random quote from the Forismatic API.
Returns:
str: The retrieved quote
"""
quote = ""
while quote == "":
response = requests.get(
"http://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=json"
)
if response.status_code != 200:
print(f"Error while getting image: {response}")
continue
try:
response_json = json.loads(response.text.replace("\\'", "'"))
except json.decoder.JSONDecodeError as error:
print(f"Error while decoding JSON: {response.text}\n{error}")
continue
quote_text: str = response_json["quoteText"]
if contains_no_blacklisted_regexes(quote_text):
quote = quote_text
return quote
| 15,256
|
def main():
"""
Implements the first step of the experiment pipeline. Creates a series of \
X_train, X_test pairs (based on different features parameters \
combinations) for each one of the folds.
Returns:
None
"""
# Construct argument parser and parse argument
ap = argparse.ArgumentParser()
ap.add_argument('-poi_fpath', required=True)
args = vars(ap.parse_args())
# Create folder to store experiment
date_time = datetime.datetime.now().strftime("%d-%m-%Y_%H-%M-%S")
exp_path = os.path.join(config.experiments_path, 'exp_' + date_time)
os.makedirs(exp_path)
# Create folder to store feature extraction results
results_path = os.path.join(exp_path, 'features_extraction_results')
os.makedirs(results_path)
wrtrs.write_feature_space(os.path.join(results_path, 'feature_space.csv'))
# Load pois
poi_gdf = feat_ut.load_poi_gdf(args['poi_fpath'])
# Shuffle
poi_gdf = poi_gdf.sample(frac=1).reset_index(drop=True)
# Remove barely populated labels
# poi_gdf = poi_gdf.groupby(config.label_col).filter(lambda x: len(x) >= config.n_folds).reset_index(drop=True)
poi_gdf, encoder = feat_ut.encode_labels(poi_gdf)
poi_gdf.to_csv(os.path.join(results_path, 'train_poi_gdf.csv'), index=False)
pickle.dump(encoder, open(os.path.join(results_path, 'encoder.pkl'), 'wb'))
poi_ids = list(poi_gdf[config.id_col])
poi_labels = list(poi_gdf['label'])
feat_ut.get_required_external_files(poi_gdf, results_path)
fold = 1
skf = StratifiedKFold(n_splits=config.n_folds)
t1 = time.time()
for train_idxs, test_idxs in skf.split(poi_ids, poi_labels):
print('Fold:', fold)
fold_path = os.path.join(results_path, 'fold_' + str(fold))
os.makedirs(fold_path)
feat_ut.create_single_features(poi_gdf, train_idxs, fold_path)
feat_ut.create_concatenated_features(poi_gdf, train_idxs, test_idxs, fold_path)
fold += 1
print(f'Feature extraction done in {time.time() - t1:.3f} sec.')
return
| 15,257
|
def has_datapoint(fake_services, metric_name=None, dimensions=None, value=None, metric_type=None, count=1):
"""
Returns True if there is a datapoint seen in the fake_services backend that
has the given attributes. If a property is not specified it will not be
considered. Dimensions, if provided, will be tested as a subset of total
set of dimensions on the datapoint and not the complete set.
"""
found = 0
# Try and cull the number of datapoints that have to be searched since we
# have to check each datapoint.
if dimensions is not None:
datapoints = []
for k, v in dimensions.items():
datapoints += fake_services.datapoints_by_dim[f"{k}:{v}"]
elif metric_name is not None:
datapoints = fake_services.datapoints_by_metric[metric_name]
else:
datapoints = fake_services.datapoints
for dp in fake_services.datapoints:
if metric_name and dp.metric != metric_name:
continue
if dimensions and not has_all_dims(dp, dimensions):
continue
if metric_type and dp.metricType != metric_type:
continue
if value is not None:
if dp.value.HasField("intValue"):
if dp.value.intValue != value:
continue
elif dp.value.HasField("doubleValue"):
if dp.value.doubleValue != value:
continue
else:
# Non-numeric values aren't supported, so they always fail to
# match
continue
found += 1
if found >= count:
return True
return False
| 15,258
|
def optimizeMemoryUsage(foregroundTasks, backgroundTasks, K):
"""
:type foregroundTasks: List[int]
:type backgroundTasks: List[int]
:type K: int
:rtype: List[List[int]]
"""
res = []
curr_max = 0
if len(foregroundTasks) == 0:
for j in range(len(backgroundTasks)):
add_result(backgroundTasks[j], K, curr_max, res, j, 1)
if len(backgroundTasks) == 0:
for i in range(len(foregroundTasks)):
add_result(foregroundTasks[i], K, curr_max, res, i, 0)
for i in range(len(foregroundTasks)):
for j in range(len(backgroundTasks)):
curr_usage = foregroundTasks[i] + backgroundTasks[j]
if curr_usage > K:
add_result(foregroundTasks[i], K, curr_max, res, i, 0)
add_result(backgroundTasks[j], K, curr_max, res, j, 1)
if curr_usage > curr_max and curr_usage <= K:
res = [[i, j]]
curr_max = curr_usage
elif curr_usage == curr_max:
res.append([i, j])
return res if len(res) > 0 else [[-1, -1]]
| 15,259
|
def test_empty_package_method_name():
"""fully_qualified_service works when there's no package."""
mn = MethodName("", "SearchService", "Search")
assert mn.fully_qualified_service == "SearchService"
| 15,260
|
def fetch_commits():
"""Yields batches of commits from the DB."""
count_cursor = DB.cursor()
count_cursor.execute(COUNT_SQL)
count = count_cursor.fetchone()['MAX(order_id)']
read_cursor = DB.cursor()
for start in range(0, count + BATCH_SIZE, BATCH_SIZE):
print 'Starting at', start
read_cursor.execute(BATCH_SQL, (start, start + BATCH_SIZE))
yield read_cursor.fetchall()
| 15,261
|
def cv_indices(num_folds,num_samples):
"""
Given number of samples and num_folds automatically create a subjectwise cross validator
Assumption: per subject we have 340 samples of data
>>> cv_set = cv_indices(2,680)
>>> cv_set
>>> (([0:340],[340:680]),([340:680,0:340]))
Algo:
1.Compute all the permutations.
2.itreate through all the permutations and first calculate the train indices by taking first five then
six,seven so on of each combination of arrangement.The rest will be the values of test indices
3. Finally zip it to form the indices.
:param num_folds: folds for cv
:param num_samples: number of samples of input of data (should be a multiple of 340)
:return: return a zipped list of tuples
of ranges of training and testing data
"""
n_epoch = 340
n_subjects = num_samples/n_epoch
rem=num_samples%n_epoch
assert (rem == 0),"samples passed in not a multiple of 340"
assert (num_folds<=n_subjects),"number of subjects is less then number of folds"
n_set = np.round(n_subjects/num_folds)
n_set = int(n_set)
n_subjects=int(n_subjects)
flag=[]
for i in range(num_folds):
if i<num_folds-1:
flag=flag+[list(range(i*n_set,(i+1)*n_set))]
else:
flag=flag+[list(range(i*n_set,n_subjects))]
train_indices=[]
test_indices=[]
#permutations=perm1(range(num_folds))
permutations=list(itertools.combinations(list(range(num_folds)),num_folds-1))
permutations=list(map(list,permutations))
sets = len(permutations)
permutations_test=list(itertools.combinations(list(range(num_folds)),1))
permutations_test=list(map(list,permutations_test))
permutations_test.reverse()
for i in range(num_folds-1):
for j in range(sets):
for k in range(len(flag[permutations[j][i]])):
if i<1:
train_indices=train_indices+[list(range(flag[permutations[j][i]][k]*n_epoch,(flag[permutations[j][i]][k]+1)*n_epoch))]
test_indices=test_indices+[list(range(flag[permutations_test[j][i]][k]*n_epoch,(flag[permutations_test[j][i]][k]+1)*n_epoch))]
else:
train_indices=train_indices+[list(range(flag[permutations[j][i]][k]*n_epoch,(flag[permutations[j][i]][k]+1)*n_epoch))]
custom_cv=list(zip(train_indices,test_indices))
return custom_cv
| 15,262
|
def load_schema(schema_name: str) -> dict:
"""Load a JSON schema.
This function searches within apollon's own schema repository.
If a schema is found it is additionally validated agains Draft 7.
Args:
schema_name: Name of schema. Must be file name without extension.
Returns:
Schema instance.
Raises:
IOError
"""
schema_path = 'schema/' + schema_name + SCHEMA_EXT
if pkg_resources.resource_exists('apollon', schema_path):
schema = pkg_resources.resource_string('apollon', schema_path)
schema = json.loads(schema)
jsonschema.Draft7Validator.check_schema(schema)
return schema
raise IOError(f'Schema ``{schema_path.name}`` not found.')
| 15,263
|
def dataset_ls(prj, dataset):
""" List the contents of a dataset.
"""
validate_name(dataset, "dataset")
client = boto3.client("s3")
prefix = prj.s3.path("/datasets/{}/".format(dataset))
len_prefix = len(prefix)
response = client.list_objects_v2(Bucket=prj.s3.bucket(), Prefix=prefix)
for item in response["Contents"]:
filename = item["Key"][len_prefix:]
filesize = item["Size"]
click_echo_json({"filename": filename, "size": filesize})
| 15,264
|
def shorten_sequence(base_sequence, seq_to_fitness, program):
"""Tries to shorten this sequence by omitting flag by flag and checking if
the smaller sequence has at least the same fitness value as the
original one.
"""
key_base_sequence = str(base_sequence)
sequences = set()
current_sequences = set()
# Create all sequences that contain one flag less than the base sequence.
for i in range(len(base_sequence)):
seq = list(base_sequence)
seq.pop(i)
current_sequences.add(tuple(seq))
# Shorten the sequences until there are no more changes.
while current_sequences:
# Calculate the fitness of the sequences.
pool = multiprocessing.Pool()
for seq in current_sequences:
sequence = list(seq)
pool.apply_async(calculate_fitness, args=(
sequence, seq_to_fitness, str(sequence), program))
pool.close()
pool.join()
# Check if the smaller sequences are better or equal as the original
# sequence. If this is true, mark the smaller sequence for shortening.
to_shorten = set()
for seq in current_sequences:
if seq_to_fitness[str(list(seq))] \
<= seq_to_fitness[key_base_sequence]:
to_shorten.add(seq)
sequences = sequences.union(current_sequences)
# Create smaller sequences for next round
current_sequences = set()
while to_shorten:
seq = to_shorten.pop()
for i in range(len(seq)):
new_seq = list(seq)
new_seq.pop(i)
current_sequences.add(tuple(new_seq))
| 15,265
|
def create_flatmap_from_dm_command(dm_command_path, output_path, file_name=None, dm_num=1):
"""
Converts a dm_command_2d.fits to the format used for the flatmap, and outputs a new flatmap fits file.
:param dm_command_path: Full path to the dm_command_2d.fits file.
:param output_path: Path to output the new flatmap fits file. Default is hardware/boston/
:param file_name: Filename for new flatmap fits file. Default is flatmap_<timestamp>.fits
:return: None
"""
dm_command_data = fits.getdata(dm_command_path)
dm_string = "dm1" if dm_num == 1 else "dm2"
if file_name is None:
# Create a string representation of the current timestamp.
time_stamp = time.time()
date_time_string = datetime.datetime.fromtimestamp(time_stamp).strftime("%Y-%m-%dT%H-%M-%S")
file_name = "flat_map_volts_" + str(dm_string) + "_" + date_time_string + ".fits"
if output_path is None:
raise ValueError
# Convert the dm command units to volts.
max_volts = CONFIG_INI.getint("boston_kilo952", "max_volts")
dm_command_data *= max_volts
catkit.util.write_fits(dm_command_data, output_path)
| 15,266
|
def append_lightcone_id(block_num, step_num, tbl):
"""Assigns a unique ID to each row in the astropy table, with block
and step embedded into the id. The id will read as
XXXYYYZZZZZZZZZZ (decimal), were xxx is the block number, yyy is
the step number and zzzzzzzzzz is unique id for this block/step
combination. The exact size of xxx and yyy are specified by
lc_block_num_offset and lc_step_num_offset. The left over space
in np.int64 is for zzz.
"""
keys = tbl.keys()
table_size = tbl[keys[0]].quantity.shape[0]
lightcone_id = np.arange(table_size,dtype=np.int64)
max_id= np.max(lightcone_id)
validate_lightcone_ids(block_num, step_num, max_id)
lightcone_id_b = add_lightcone_id_block_num(block_num, lightcone_id)
lightcone_id_bs = add_lightcone_id_step_num(step_num, lightcone_id_b)
tbl['lightcone_id'] = lightcone_id_bs
| 15,267
|
def barGraph(data, ylabel='', title='', xticklabels=None):
"""
Displays all of the data points in data as a series of bars.
Optionally a user can provide a label for the y-axis, a title, and
tick labels for the bars.
"""
N = len(data) # Number of data points
width = 0.50 # the width of the bars
offset = width/2.
ind = np.arange(N)+offset # the x locations for the groups
matplotlib.rcParams.update({'font.size': 18})
fig, ax = plot.subplots(figsize=(20,10))
rects1 = ax.bar(ind, data, width, color='r')
# add some text for labels, title and axes ticks
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xticks(ind + offset)
if xticklabels is not None:
ax.set_xticklabels(xticklabels)
# puts graph labels above bars
def autolabel(rects):
# attach some text labels
for index, rect in enumerate(rects):
height = rect.get_height()
# special case for data that is not found
if data[index] == -1.0:
ax.text(rect.get_x() + offset, 1.01*height,
'Not given\n by algorithm',
ha='center', va='bottom')
# labels all of the data
else:
ax.text(rect.get_x() + offset, 1.01*height,
'%d' % int(height),
ha='center', va='bottom')
autolabel(rects1)
plot.ylim(0,max(data)*1.5) # enforces limits on axis range
plot.show()
| 15,268
|
def delete_group_from_ldap(group, node=None, exitcode=0):
"""Delete group entry from LDAP.
"""
if node is None:
node = current().context.ldap_node
with By(f"deleting group {group['dn']}"):
r = node.command(
f"ldapdelete -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin \"{group['dn']}\"")
if exitcode is not None:
assert r.exitcode == exitcode, error()
| 15,269
|
def question_12(data):
"""
Question 12 linear transform the data, plot it, and show the newly created cov matrix.
:param data: data
:return: data after linear transformation
"""
s_mat = np.array([[0.1, 0, 0], [0, 0.5, 0], [0, 0, 2]])
new_data = np.matmul(s_mat, data)
plot_3d(new_data, "Q12: Linear Transformed the prev data")
print("------ Covariance Matrix (QUESTION 12) ------")
print_cov_mat(new_data)
return new_data
| 15,270
|
def update_storage(user_choice):
"""It updates the Coffee Machine resources after a beverage is ordered."""
resources["water"] = resources["water"] - MENU[user_choice]["ingredients"]["water"]
resources["milk"] -= MENU[user_choice]["ingredients"]["milk"]
resources["coffee"] -= MENU[user_choice]["ingredients"]["coffee"]
return resources
| 15,271
|
def process_callback(callback):
""" Process a callback """
global total
# Read variables
total += 1
# args = callback['args']
body_data = callback['body']['data']
# body_size = callback['body']['size']
date = callback['date']
headers = callback['headers']
id_ = callback['id']
method = callback['method']
# referrer = callback['referrer']
# remote_addr = callback['remote_addr']
print('**** Payload ID %s ****' % (f'{id_:,}'))
print('Payload number: %s' % (f'{total:,}'))
print('Method %s on %s' % (method, date))
print('%s headers' % (len(headers)))
print('Body -> %s' % (body_data))
print()
| 15,272
|
def second_test_function(dataset_and_processing_pks):
"""
Pass a result of JSON processing to a function that saves result on a model.
:param dataset_and_processing_pks: tuple of two (Dataset PK, Processing PK)
:return: tuple of two (Dataset PK; JSON (Python's list of dicts))
"""
# unpack tuple; needed for Celery chain compatibility
dataset_pk, processing_pk = dataset_and_processing_pks
# re-fetch Dataset and Processing
dataset = Dataset.objects.get(pk=dataset_pk)
processing = Processing.objects.get(pk=processing_pk)
result = []
# calculate result; handle exceptions
try:
result = [{'result': pair['a'] + pair['b']} for pair in dataset.data]
except Exception as err:
# exception string = exception type + exception args
exception_message = "{type}: {message}". \
format(type=type(err).__name__, message=err)
# save exception to db
dataset.exception = exception_message
processing.exceptions = True
dataset.save()
processing.save()
return dataset_pk, result
| 15,273
|
def prepend_python_path(path: str) -> Iterator[None]:
"""Simple context manager to help import module within the repo"""
try:
# Entering the with statement
sys.path.insert(0, path)
yield
finally:
# Exiting the with statement
sys.path.remove(path)
| 15,274
|
def matplotlib_axes_from_gridspec_array(arr, figsize=None):
"""Returned axes layed out as indicated in the array
Example:
--------
>>> # Returns 3 axes layed out as indicated by the array
>>> fig, axes = matplotlib_axes_from_gridspec_array([
>>> [1, 1, 3],
>>> [2, 2, 3],
>>> [2, 2, 3],
>>> ])
"""
fig = plt.figure(figsize=figsize)
gridspecs = matplotlib_gridspecs_from_array(arr)
axes = []
for gridspec in gridspecs:
axes.append(fig.add_subplot(gridspec))
return fig, axes
| 15,275
|
def test_return_stmt():
"""Test that the return statement functions correctly."""
decl = emptyfn("return 1;")
compile(decl)
| 15,276
|
def booleanGenerator():
"""
Creates a generator which returns only True and False.
"""
gen = valueFromSetGenerator([True, False])
while True:
yield gen.next()
| 15,277
|
def _fetch_gene_annotation(gene, gtf):
"""
Fetch gene annotation (feature boundaries) and the corresponding sequences.
Parameters:
-----------
gene
gene name that should be found in the "gene_name" column of the GTF DataFrame.
type: str
gtf
GTF annotation DataFrame loaded by the gtfparse library.
pandas.DataFrame
Returns:
--------
gene_df
subset of the input gtf DataFrame corresponding to rows that match the input gene
type: pandas.DataFrame
gene_id
name of the gene. ideally mathces the passed "gene" argument.
type: str
"""
gene_df = gtf.loc[gtf["gene_name"].str.contains(gene)]
gene_id = _check_gene_name(gene, gene_df["gene_name"])
return gene_df, gene_id
| 15,278
|
def deploy_static():
"""
Deploy static (application) versioned media
"""
if not env.STATIC_URL or 'http://' in env.STATIC_URL: return
from django.core.servers.basehttp import AdminMediaHandler
remote_dir = '/'.join([deployment_root(),'env',env.project_fullname,'static'])
m_prefix = len(env.MEDIA_URL)
#if app media is not handled by django-staticfiles we can install admin media by default
if 'django.contrib.admin' in env.INSTALLED_APPS and not 'django.contrib.staticfiles' in env.INSTALLED_APPS:
if env.MEDIA_URL and env.MEDIA_URL == env.ADMIN_MEDIA_PREFIX[:m_prefix]:
print "ERROR: Your ADMIN_MEDIA_PREFIX (Application media) must not be on the same path as your MEDIA_URL (User media)"
sys.exit(1)
admin = AdminMediaHandler('DummyApp')
local_dir = admin.base_dir
remote_dir = ''.join([remote_dir,env.ADMIN_MEDIA_PREFIX])
else:
if env.MEDIA_URL and env.MEDIA_URL == env.STATIC_URL[:m_prefix]:
print "ERROR: Your STATIC_URL (Application media) must not be on the same path as your MEDIA_URL (User media)"
sys.exit(1)
elif env.STATIC_ROOT:
local_dir = env.STATIC_ROOT
static_url = env.STATIC_URL[1:]
if static_url:
remote_dir = '/'.join([remote_dir,static_url])
else: return
if env.verbosity:
print env.host,"DEPLOYING static",remote_dir
return deploy_files(local_dir,remote_dir)
| 15,279
|
def T_SFLU_DRFPMI_show_full(dprint, tpath_join, fpath_join):
"""
Show a graph reduction using networkx+tikz
"""
sflu = SFLU.SFLU(
DRFPMI_edges,
graph=True,
)
# match=False allows a reduced input/output set
sflu.graph_nodes_pos(DRFPMI_locs, match=True)
#sflu.graph_nodes_pos(DRFPMI_locs, match=True)
print('inputs: ', sflu.inputs)
print('outputs: ', sflu.outputs)
print('nodes: ', sflu.nodes)
#print('nodes')
#print(sflu.graph_nodes_repr())
G1 = sflu.G.copy()
sflu.graph_reduce_auto_pos(lX=-10, rX=+10, Y=0, dY=-2)
sflu.reduce(*reduce_list)
sflu.graph_reduce_auto_pos_io(lX=-30, rX=+30, Y=-5, dY=-5)
G2 = sflu.G.copy()
nx2tikz.dump_pdf(
[G1, G2],
fname = tpath_join('testG.pdf'),
texname = tpath_join('testG.tex'),
# preamble = preamble,
scale='10pt',
)
| 15,280
|
def randomPolicy(Ts):
""" Each action is equally likely. """
numA = len(Ts)
dim = len(Ts[0])
return ones((dim, numA)) / float(numA), mean(array(Ts), axis=0)
| 15,281
|
def calc_TiTiO2(P, T):
"""
Titanium-Titanium Oxide (Ti-TiO2)
================================
Define TiTiO2 buffer value at 1 bar
Parameters
----------
P: float
Pressure in GPa
T: float or numpy array
Temperature in degrees K
Returns
-------
float or numpy array
log_fO2
References
----------
Barin (1993) Thermo database
"""
if isinstance(T, float) or isinstance(T, int):
log_fO2 = log10(exp((-945822 + 219.6816*T -
5.25733*T*log(T)) /
(8.314*T)))
if isinstance(T, np.ndarray):
log_fO2_list = []
for temp in T:
log_fO2_list.append(log10(exp((-945822 + 219.6816*temp -
5.25733*temp*log(temp)) /
(8.314*temp))))
log_fO2 = np.array(log_fO2_list)
return log_fO2
| 15,282
|
def nlevenshtein_scoredistance(first_data, memento_data):
"""Calculates the Normalized Levenshtein Distance given the content in
`first_data` and `memento_data`.
"""
score = compute_scores_on_distance_measure(
first_data, memento_data, distance.nlevenshtein)
return score
| 15,283
|
def parameter_from_numpy(model, name, array):
""" Create parameter with its value initialized according to a numpy tensor
Parameters
----------
name : str
parameter name
array : np.ndarray
initiation value
Returns
-------
mxnet.gluon.parameter
a parameter object
"""
p = model.params.get(name, shape=array.shape, init=mx.init.Constant(array))
return p
| 15,284
|
def test_get_filetype_with_unsupported_filetype_raises_exception():
"""Test all the unsupported file types, where file type is inferred via file extension."""
unsupported_filetype = "sample.inexistent"
with pytest.raises(ValueError) as exc_info:
get_filetype(unsupported_filetype)
expected_msg = "Unsupported filetype 'inexistent' from file 'sample.inexistent'."
assert exc_info.value.args[0] == expected_msg
| 15,285
|
def execute_workflow_command():
"""Command that executes a workflow."""
return (
Command().command(_execute_workflow).require_migration().require_clean().with_database(write=True).with_commit()
)
| 15,286
|
def fast_parse(python_class, parse_function, data_to_parse, number_of_workers=4, **kwargs):
"""
Util function to split any data set to the number of workers,
Then return results using any give parsing function
Note that when using dicts the Index of the Key will be passed to the function
Object too, so that needs to be handled
:param python_class: Instantiated class object which contains the parse function
:param parse_function: Function to parse data, can either be list or dict
:param data_to_parse: Data to be parsed
:param number_of_workers: Number of workers to split the parsing to
:param kwargs: Optional, extra params which parse function may need
:return:
"""
try:
function_object = getattr(python_class, parse_function)
except AttributeError as e:
logger.error(f"{python_class} doesn't have {parse_function}")
return
else:
results = []
data_len = len(data_to_parse)
with tqdm(total=data_len) as pbar:
with futures.ThreadPoolExecutor(max_workers=number_of_workers) as executor:
if type(data_to_parse) == list:
future_to_result = {executor.submit(function_object, data, **kwargs): data for data in data_to_parse}
elif type(data_to_parse) == dict:
for index, data in data_to_parse.items():
future_to_result = {executor.submit(function_object, data, **kwargs)}
else:
logger.error("Unsupported data type")
return
for future in futures.as_completed(future_to_result):
try:
data = future.result()
except Exception as exc:
logger.error(f"{future_to_result[future]} generated an exception: {exc}")
else:
results.append(data)
pbar.update(1)
return results
| 15,287
|
def create_plate(dim=DIMENSION, initial_position=-1):
"""
Returns a newly created plate which is a matrix of dictionnaries (a matrix of cells) and places the first crystal cell in it at the inital_pos
The keys in a dictionnary represent the properties of the cell
:Keys of the dictionnary:
- "is_in_crystal" : (bool) True if the cell belongs to the crystal, False otherwise
- "b": (float) the proportion of quasi-liquid water
- "c" : (float) the proportion of ice
- "d" : (float) the proportion of steam
:param dim: (tuple) [DEFAULT: DIMENSION] couple of positives integers (row, column), the dimension of the plate
:param initial_position: (tuple) [DEFAULT: The middle of the plate] the coordinates of the first crystal
:return: (list of list of dictionnaries) the plate
Exemples:
>>> DEFAULT_CELL["d"] = 1 # Used in order to not have any problems with doctest
>>> plate = create_plate(dim=(3,3))
>>> for line in plate:
... print("[", end="")
... for d in line:
... print("{", end="")
... for k in sorted(d.keys()):
... print(k, ":", d[k], ", ", end="")
... print("}, ", end="")
... print("]")
[{b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, ]
[{b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 1 , d : 0 , i : 0 , is_in_crystal : True , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, ]
[{b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, ]
>>> DEFAULT_CELL["d"] = RHO # Reverts to original state
"""
plate = [[copy(DEFAULT_CELL) for j in range(dim[1])] for i in range(dim[0])]
if initial_position == -1:
initial_position = (dim[0]//2, dim[1]//2)
plate[initial_position[0]][initial_position[1]] = {"is_in_crystal":True, "b":0, "c":1, "d":0, "i":0}
return plate
| 15,288
|
def CanonicalizeName(raw_name: Text):
"""Strips away all non-alphanumeric characters and converts to lowercase."""
unicode_norm = unicodedata.normalize('NFKC', raw_name).lower()
# We only match Ll (lowercase letters) since alphanumeric filtering is done
# after converting to lowercase. Nl and Nd are numeric-like letters and
# numeric digits.
return ''.join(
x for x in unicode_norm if unicodedata.category(x) in ('Ll', 'Nl', 'Nd'))
| 15,289
|
def data_v1( request ):
""" Handles all /v1/ urls. """
( service_response, rq_now, rq_url ) = ( {}, datetime.datetime.now(), common.make_request_url(request) ) # initialization
dump_param_handler = views_helper.DumpParamHandler( rq_now, rq_url )
if request.GET.get( 'data', '' ) == 'dump':
return_values = dump_param_handler.grab_all_v1()
service_response = {'data': 'dump'}
elif 'callnumber' in request.GET:
call_param_handler = views_helper.CallParamHandler( request.GET['callnumber'].split(','), rq_now, rq_url )
return_values = call_param_handler.grab_callnumbers()
service_response['query'] = { 'request_type': 'call number', 'request_numbers': call_param_handler.callnumbers }
service_response['result'] = { 'items': return_values, 'service_documentation': settings_app.README_URL }
output = json.dumps( service_response, sort_keys=True, indent=2 )
return HttpResponse( output, content_type='application/json')
| 15,290
|
def ssh(server, cmd, checked=True):
""" Runs command on a remote machine over ssh."""
if checked:
return subprocess.check_call('ssh %s "%s"' % (server, cmd),
shell=True, stdout=sys.stdout)
else:
return subprocess.call('ssh %s "%s"' % (server, cmd),
shell=True, stdout=sys.stdout)
| 15,291
|
def permutations(n, r=None):
"""Returns the number of ways of arranging r elements of a set of size n in
a given order - the number of permuatations.
:param int n: The size of the set containing the elements.
:param int r: The number of elements to arange. If not given, it will be\
assumed to be equal to n.
:raises TypeError: if non-integers are given.
:raises ValueError: if r is greater than n.
:rtype: ``int``"""
if not isinstance(n, int): raise TypeError("n {} must be integer".format(n))
if r is None: return factorial(n)
if not isinstance(r, int): raise TypeError("r {} must be integer".format(r))
if r > n:
raise ValueError("r {} is larger than n {}".format(r, n))
return factorial(n) / factorial(n - r)
| 15,292
|
def backtrace_warn(msg, back_trace_len=0):
"""
warning msg with backtrace support
"""
try:
msg = _log_file_func_info(msg, back_trace_len)
logging_instance = _LoggerInstance()
logging_instance.get_logger().warn(msg)
except u_exception.LoggerException:
return
except Exception as e:
_fail_handle(msg, e)
| 15,293
|
def create_out_dir(dir_path):
"""creates a directory 'dir_path' if it does not exist"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
| 15,294
|
def cyk(word: str, cfg: CFG) -> bool:
"""
Checks whether grammar derive the word.
This function is applicable to any CFG.
Parameters
----------
word: str
A word to derive in cfg
cfg: CFG
A CFG to derive a word
Returns
-------
bool:
Whether grammar derive the word
"""
word_len = len(word)
if not word_len:
return cfg.generate_epsilon()
cnf = cfg.to_normal_form()
terminal_productions = [
production for production in cnf.productions if len(production.body) == 1
]
variable_productions = [
production for production in cnf.productions if len(production.body) == 2
]
matrix = [[set() for _ in range(word_len)] for _ in range(word_len)]
for i in range(word_len):
matrix[i][i].update(
production.head.value
for production in terminal_productions
if production.body[0].value == word[i]
)
for length in range(1, word_len):
for start in range(word_len - length):
end = start + length
for current in range(start, end):
matrix[start][end].update(
production.head.value
for production in variable_productions
if production.body[0].value in matrix[start][current]
and production.body[1].value in matrix[current + 1][end]
)
return cnf.start_symbol.value in matrix[0][word_len - 1]
| 15,295
|
def all_dynamic_dt_needed_paths(f, paths):
""" Return a dictionary of all the DT_NEEDED => Library Paths for
a given ELF file obtained by recursively following linkage.
"""
with open(f, 'rb') as file:
try:
readelf = ReadElf(file)
eclass = readelf.elf_class()
# This needs to be iterated until we traverse the entire linkage tree
dt_needed = readelf.dynamic_dt_needed()
dt_needed_paths = dynamic_dt_needed_paths(dt_needed, eclass, paths)
for n, lib in dt_needed_paths.items():
dt_needed_paths = dict(all_dynamic_dt_needed_paths(lib, paths), **dt_needed_paths)
except ELFError as ex:
sys.stderr.write('ELF error: %s\n' % ex)
sys.exit(1)
return dt_needed_paths
| 15,296
|
def macd(df, ewa_short, ewa_long, ewa_signal, price_col="adj_close"):
"""Moving Average Convergence Divergence
Parameters:
-----------
df : DataFrame
Input dataframe.
ewa_short : int
Exponentially weighted average time-window for a short time-span.
A common choice for the short time-window is 12 intervals.
ewa_long : int
Exponentially weighted average time-window for a longer time-span.
A common choice for the long time-window is 26 intervals.
ewa_signal : int
Time-window for the EWA of the difference between long and short
averages.
price_col : str
Column name in `df` used for defining the current indicator (e.g. "open",
"close", etc.)
Returns:
--------
macd_ts : Series
Moving average convergence-divergence indicator for the time series.
"""
ewa_short = int(ewa_short)
ewa_long = int(ewa_long)
ewa_signal = int(ewa_signal)
ewa12 = df[price_col].ewm(span=ewa_short).mean()
ewa26 = df[price_col].ewm(span=ewa_long).mean()
macd_ts = ewa12 - ewa26
signal_line = macd_ts.ewm(span=ewa_signal).mean()
return macd_ts - signal_line, 'stationary'
| 15,297
|
def make_annotation(field: ModelField):
"""
Convert a field annotation type to form data accepted type.
The method convert structural field such as `BaseModel` and `Dict` to a str. Such as the model's value is
supplied as a serialized JSON string format. Such string will be converted back to a dictionary, and used
for initialize previous field.
"""
field_outer_type = field.outer_type_
is_literal = False
# check outer type
if isgeneric(field_outer_type):
# outer type is a generic class
if field_outer_type.__origin__ is Union:
# only Union is valid generic class
inner_types = field_outer_type.__args__
else:
return str, True
else:
inner_types = (field_outer_type,)
field_outer_type = None
# check inner types
inner_types_new = list()
for inner_type in inner_types:
if inner_type in (str, int, float, ..., Any):
# inner type of `str`, `int` and `float` will be natively used as form data value
inner_types_new.append(inner_type)
elif issubclass(inner_type, Enum):
inner_types_new.append(_make_form_enum(inner_type))
else:
# other types will be converted to string literal
is_literal = True
inner_types_new.append(str)
if field_outer_type is None:
field_outer_type = inner_types_new[0]
else:
# set new generic type args
field_outer_type = field_outer_type.__origin__[tuple(inner_types_new)]
return field_outer_type, is_literal
| 15,298
|
def rawfile_to_h5_external_dataset(bin_file, output_url, shape, dtype,
overwrite=False):
"""
Create a HDF5 dataset at `output_url` pointing to the given vol_file.
Either `shape` or `info_file` must be provided.
:param str bin_file: Path to the .vol file
:param DataUrl output_url: HDF5 URL where to save the external dataset
:param tuple shape: Shape of the volume
:param numpy.dtype dtype: Data type of the volume elements (default: float32)
:param bool overwrite: True to allow overwriting (default: False).
"""
assert isinstance(output_url, silx.io.url.DataUrl)
assert isinstance(shape, (tuple, list))
v_majeur, v_mineur, v_micro = h5py.version.version.split('.')
if v_majeur <= '2' and v_mineur < '9':
raise Exception('h5py >= 2.9 should be installed to access the '
'external feature.')
with h5py.File(output_url.file_path(), mode="a") as _h5_file:
if output_url.data_path() in _h5_file:
if overwrite is False:
raise ValueError('data_path already exists')
else:
logger.warning('will overwrite path %s' % output_url.data_path())
del _h5_file[output_url.data_path()]
external = [(bin_file, 0, h5py.h5f.UNLIMITED)]
_h5_file.create_dataset(output_url.data_path(),
shape,
dtype=dtype,
external=external)
| 15,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.