content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def index_objects(
*, ids, indexer_class, index=None, transforms=None, manager_name=None
):
"""
Index specified `ids` in ES using `indexer_class`. This is done in a single
bulk action.
Pass `index` to index on the specific index instead of the default index
alias from the `indexed_class`.
Pass `transforms` or `manager_name` to change the queryset used to fetch
the objects to index.
Unless an `index` is specified, if a reindexing is taking place for the
default index then this function will index on both the old and new indices
to allow indexing to still work while reindexing isn't complete yet.
"""
if index is None:
index = indexer_class.get_index_alias()
# If we didn't have an index passed as argument, then we should index
# on both old and new indexes during a reindex.
indices = Reindexing.objects.get_indices(index)
else:
# If we did have an index passed then the caller wanted us to only
# consider the index they specified, so we only consider that one.
indices = [index]
if manager_name is None:
manager_name = 'objects'
manager = getattr(indexer_class.get_model(), manager_name)
if transforms is None:
transforms = []
qs = manager.filter(id__in=ids)
for transform in transforms:
qs = qs.transform(transform)
bulk = []
es = amo_search.get_es()
major_version = get_major_version(es)
for obj in qs.order_by('pk'):
data = indexer_class.extract_document(obj)
for index in indices:
item = {
'_source': data,
'_id': obj.id,
'_index': index,
}
if major_version < 7:
# While on 6.x, we use the `addons` type when creating indices
# and when bulk-indexing. We completely ignore it on searches.
# When on 7.x, we don't pass type at all at creation or
# indexing, and continue to ignore it on searches.
# That should ensure we're compatible with both transparently.
item['_type'] = 'addons'
bulk.append(item)
return helpers.bulk(es, bulk) | 5,334,000 |
def get_attrs_titles_with_transl() -> dict:
"""Returns attribut titles and translation"""
attr_titles = []
attrs = Attribute.objects.filter(show_in_list=True).order_by('weight')
for attr in attrs:
attr_titles.append(attr.name)
result = {}
for title in attr_titles:
result[title] = _(title)
return result | 5,334,001 |
def update(op, table, model):
"""Upgrade database schema and/or data, creating a new revision."""
# this hacky check for table to exist is needed because once in future
# we'll drop object_folders table and we have object_folders data migrated
# inside two modules (ggrc and ggrc_workflows), and migrations for one of
# the modules would run prior to another one it'll fail
res = op.get_bind().execute(HAVE_TABLE)
if res.fetchone()[0]:
op.execute(UPDATE_SQL.format(table=table, model=model)) | 5,334,002 |
def kde_KL_divergence_2d(x, y, h_x, h_y, nb_bins=100, fft=True):
"""Uses Kernel Density Estimator with Gaussian kernel on two
dimensional samples x and y and returns estimated Kullback-
Leibler divergence.
@param x, y: samples, given as a (n, 2) shaped numpy array,
@param h: width of the Gaussian kernel,
@param nb_bins: number of grid points to use,
@param fft: whether to use FFT to compute convolution.
"""
min_ = np.min(np.vstack([np.min(x, axis=0), np.min(y, axis=0)]), axis=0)
max_ = np.max(np.vstack([np.max(x, axis=0), np.max(y, axis=0)]), axis=0)
bounds_ = np.vstack((min_, max_))
(x_grid, y_grid, kde_x) = gaussian_kde_2d(x, h_x, h_y,
nb_bins=nb_bins,
fft=fft,
bounds=bounds_
)
(x_grid2, y_grid2, kde_y) = gaussian_kde_2d(y, h_x, h_y,
nb_bins=nb_bins,
fft=fft,
bounds=bounds_
)
delta_x = x_grid[1] - x_grid[0]
delta_y = y_grid[1] - y_grid[0]
plogp = - kde_x * np.log((kde_x + EPSILON) / (kde_y + EPSILON))
# Integrate
div = trapz(trapz(plogp, dx=delta_x, axis=1), dx=delta_y, axis=0)
return div | 5,334,003 |
def gml_init(code):
"""
Initializes a Group Membership List (GML) for schemes of the given type.
Parameters:
code: The code of the scheme.
Returns:
A native object representing the GML. Throws an Exception on error.
"""
gml = lib.gml_init(code)
if gml == ffi.NULL:
raise Exception('Error initializing GML.')
return gml | 5,334,004 |
def before_run(func, force=False):
"""
Adds a function *func* to the list of callbacks that are invoked right before luigi starts
running scheduled tasks. Unless *force* is *True*, a function that is already registered is not
added again and *False* is returned. Otherwise, *True* is returned.
"""
if func not in _before_run_funcs or force:
_before_run_funcs.append(func)
return True
else:
return False | 5,334,005 |
def calc_z_scores(baseline, seizure):
""" This function is meant to generate the figures shown in the Brainstorm
demo used to select the 120-200 Hz frequency band. It should also
be similar to panel 2 in figure 1 in David et al 2011.
This function will compute a z-score for each value of the seizure power
spectrum using the mean and sd of the control power spectrum at each
frequency. In the demo, the power spectrum is calculated for the 1st
10 seconds of all three seizures and then averaged. Controls are
similarly averaged
Parameters
----------
baseline : ndarray
power spectrum of baseline EEG
seizure : ndarray
power spectrum of seizure EEG
Returns
-------
ndarray
seizure power spectrum scaled to a z-score by baseline power spectrum
mean and SD
"""
mean = np.mean(baseline, 1)
sd = np.std(baseline, 1)
z_scores = (seizure - mean)/sd
return z_scores | 5,334,006 |
def get_column(data, column_index):
"""
Gets a column of data from the given data.
:param data: The data from the CSV file.
:param column_index: The column to copy.
:return: The column of data (as a list).
"""
return [row[column_index] for row in data] | 5,334,007 |
def analytic_solution(num_dims,
t_val,
x_val=None,
domain_bounds=(0.0, 1.0),
x_0=(0.5, 0.5),
d=1.0,
k_decay=0.0,
k_influx=0.0,
trunc_order=100,
num_points=None):
"""This function returns the analytic solution to the heat equation with decay i.e. du/dt = nabla^2 u + k_1 - k_2 u
k_1 is the production rate, k_2 is the decay rate
Returns x-axis values, followed by an array of the solutions at different time points"""
if isinstance(t_val, (int, float)):
t_val = np.array([t_val])
if isinstance(num_points, (int, float)):
num_points = [num_points, num_points]
if isinstance(x_0, (int, float)):
x_0 = np.array([x_0, x_0])
if len(domain_bounds) < 4:
domain_bounds = (domain_bounds[0], domain_bounds[1], domain_bounds[0], domain_bounds[1])
assert isinstance(t_val, (list, tuple, np.ndarray))
assert isinstance(x_val, (tuple, list, np.ndarray)) or x_val is None
assert isinstance(domain_bounds, (list, tuple, np.ndarray))
assert isinstance(x_0, (tuple, list, np.ndarray))
assert isinstance(d, (int, float))
assert isinstance(k_decay, (int, float))
assert isinstance(k_influx, (int, float))
assert isinstance(trunc_order, int)
length = float(domain_bounds[1] - domain_bounds[0])
t = np.array(t_val)
if x_val is None:
assert num_points is not None
x_val = [np.linspace(domain_bounds[0], domain_bounds[1], num_points[0]),
np.linspace(domain_bounds[0], domain_bounds[1], num_points[1])]
if num_dims == 1:
if isinstance(x_val[0], (tuple, list, np.ndarray)):
x = np.array(x_val[0])
y = np.array(x_val[0])
else:
x = np.array(x_val)
y = np.array(x_val)
assert t.ndim == 1
t = t.reshape([t.shape[0], 1])
u = 1.0 / length
for n in range(1, trunc_order):
u += (2/length)*np.cos((n*np.pi/length)*x_0[0])*np.cos((n*np.pi/length)*x)*np.exp(-d*(n*np.pi/length)**2*t)
else:
assert isinstance(x_val[0], (tuple, list, np.ndarray))
assert isinstance(x_val[1], (tuple, list, np.ndarray))
x = np.array(x_val[0])
y = np.array(x_val[1])
xx, yy = np.meshgrid(x, y)
assert t.ndim == 1
t = t.reshape([t.shape[0], 1, 1])
u = 1.0 / length ** 2
for k in range(1, trunc_order):
u += (2.0 / length ** 2) * np.cos(k * np.pi * x_0[1] / length) * np.cos(k * np.pi * yy / length) * np.exp(
-d * t * (k * np.pi / length) ** 2)
for j in range(1, trunc_order):
u += (2.0 / length ** 2) * np.cos(j * np.pi * x_0[0] / length) * np.cos(j * np.pi * xx / length) * np.exp(
-d * t * (j * np.pi / length) ** 2)
for j in range(1, trunc_order):
for k in range(1, trunc_order):
u += (4.0 / length ** 2) * np.cos(j * np.pi * x_0[0] / length) * np.cos(k * np.pi * x_0[1] / length) * \
np.cos(j * np.pi * xx / length) * np.cos(k * np.pi * yy / length) * \
np.exp(-d * t * ((j * np.pi / length) ** 2 + (k * np.pi / length) ** 2))
if k_decay > 0.0 and k_influx == 0.0:
u *= np.exp(- k_decay * t)
elif k_decay == 0.0 and k_influx > 0.0:
u += k_influx * t
elif k_decay > 0.0 and k_influx > 0.0:
u += k_influx * (1.0 - np.exp(-k_decay * t)) / k_decay
if num_dims == 1:
return u, x
else:
return u, x, y | 5,334,008 |
def measurement_output_parser(raw_bytes, sender_addr):
"""
Prints the measurement on screen
:param raw_bytes: byteArray from the WlanMeter
"""
response = JSONResponse.from_encoded(raw_bytes)
measurement = EMeterData.from_json(response.json)
print("{}, {}, {}, {}, {}, {}".format(measurement.date, measurement.current, measurement.total,
measurement.power, measurement.voltage, measurement.err_code)) | 5,334,009 |
def test_id_g023_id_g023_v(mode, save_output, output_format):
"""
TEST :Identity-constraint Definition Schema Component : key category,
field points to element from imported schema
"""
assert_bindings(
schema="msData/identityConstraint/idG023.xsd",
instance="msData/identityConstraint/idG023.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,334,010 |
def safe_gas_limit(*estimates: int) -> int:
"""Calculates a safe gas limit for a number of gas estimates
including a security margin
"""
assert None not in estimates, "if estimateGas returned None it should not reach here"
calculated_limit = max(estimates)
return int(calculated_limit * constants.GAS_FACTOR) | 5,334,011 |
def test_GET_request_not_chunked(httpserver, transfer_encoding_header):
"""
Test that setting the chunked attribute of httpserver to NO causes
the response not to be sent using chunking even if the Transfer-encoding
header is set.
"""
httpserver.serve_content(
('TEST!', 'test'),
headers={'Content-type': 'text/plain', transfer_encoding_header: 'chunked'},
chunked=http.Chunked.NO
)
with pytest.raises(requests.exceptions.ChunkedEncodingError):
resp = requests.get(httpserver.url, headers={'User-Agent': 'Test method'}) | 5,334,012 |
def main():
"""Main function, it implements the application loop"""
# Initialize pygame, with the default parameters
pygame.init()
# Define the size/resolution of our window
res_x = 640
res_y = 480
# Create a window and a display surface
screen = pygame.display.set_mode((res_x, res_y))
# Create a scene
scene = Scene("TestScene")
scene.camera = Camera(False, res_x, res_y)
# Moves the camera back 2 units
scene.camera.position -= Vector3(0, 0, 2)
# Create a sphere and place it in a scene, at position (0,0,0)
obj1 = Object3d("TestObject")
obj1.scale = Vector3(1, 1, 1)
obj1.position = Vector3(0, 0, 0)
obj1.mesh = Mesh.create_sphere((1, 1, 1), 12, 12)
obj1.material = Material(Color(1, 0, 0, 1), "TestMaterial1")
scene.add_object(obj1)
# Specify the rotation of the object. It will rotate 15 degrees around the axis given,
# every second
angle = 15
axis = Vector3(1, 0.7, 0.2)
axis.normalize()
# Timer
delta_time = 0
prev_time = time.time()
pygame.mouse.set_visible(True)
pygame.event.set_grab(False)
# Game loop, runs forever
while True:
# Process OS events
for event in pygame.event.get():
# Checks if the user closed the window
if event.type == pygame.QUIT:
# Exits the application immediately
return
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
return
# Clears the screen with a very dark blue (0, 0, 20)
screen.fill((0, 0, 0))
# Rotates the object, considering the time passed (not linked to frame rate)
q = from_rotation_vector((axis * math.radians(angle) * delta_time).to_np3())
obj1.rotation = q * obj1.rotation
scene.render(screen)
# Swaps the back and front buffer, effectively displaying what we rendered
pygame.display.flip()
# Updates the timer, so we we know how long has it been since the last frame
delta_time = time.time() - prev_time
prev_time = time.time() | 5,334,013 |
def ingressacltemplate_update(ctx, ingressacltemplate_id, key_value):
"""Update key/value for a given ingressacltemplate"""
params = {}
for kv in key_value:
key, value = kv.split(':', 1)
params[key] = value
ctx.obj['nc'].put("ingressacltemplates/%s?responseChoice=1" %
ingressacltemplate_id, params)
result = ctx.obj['nc'].get("ingressacltemplates/%s" %
ingressacltemplate_id)[0]
print_object(result, only=ctx.obj['show_only']) | 5,334,014 |
def integer_years(dates: typing.Any) -> typing.List[int]:
"""Maps a list of 'normalized_date' strings to a sorted list of integer years.
Args:
dates: A list of strings containing dates in the 'normalized_date' format.
Returns:
A list of years extracted from "dates".
"""
if not isinstance(dates, typing.Iterable):
return []
years: typing.Set[int] = set()
for date in dates:
if not isinstance(date, str):
continue
match = RANGE.search(date)
if match:
start_str, end_str = match.groups()
start = get_year(start_str)
end = get_year(end_str)
if start and end:
years.update(range(start, end + 1))
else:
year = get_year(date)
if year:
years.add(year)
return sorted(years) | 5,334,015 |
def test_user_cannot_delete_record_in_unowned_zone(shared_zone_test_context):
"""
Test user cannot delete a record that in an unowned zone
"""
client = shared_zone_test_context.dummy_vinyldns_client
unauthorized_client = shared_zone_test_context.ok_vinyldns_client
rs = None
try:
rs = client.create_recordset(
{
'zoneId': shared_zone_test_context.dummy_zone['id'],
'name': 'test-user-cannot-delete-record-in-unowned-zone',
'type': 'A',
'ttl': 100,
'records': [
{
'address': '10.10.10.10'
}
]
}, status=202)['recordSet']
client.wait_until_recordset_exists(rs['zoneId'], rs['id'])
unauthorized_client.delete_recordset(rs['zoneId'], rs['id'], status=403)
finally:
if rs:
try:
client.delete_recordset(rs['zoneId'], rs['id'], status=(202, 404))
client.wait_until_recordset_deleted(rs['zoneId'], rs['id'])
finally:
pass | 5,334,016 |
def export_all(node_label, project_id, file_format, db, without_id):
"""
Export all nodes of type with name ``node_label`` to a TSV file and yield
rows of the resulting TSV.
Args:
node_label (str): type of nodes to look up, for example ``'case'``
project_id (str): project to look under
file_format (str): json or tsv
db (psqlgraph.PsqlGraphDriver): database driver to use for queries
Return:
Generator[str]: generator of rows of the TSV file
Example:
Example of streaming a TSV in a Flask response using this function:
.. code-block:: python
return flask.Response(export_all(
'case', 'acct-test', flask.current_app.db
))
"""
# Examples in coments throughout function will start from ``'case'`` as an
# example ``node_label`` (so ``gdcdatamodel.models.Case`` is the example
# class).
titles_non_linked, titles_linked = get_all_titles(node_label, without_id)
with db.session_scope() as session:
# ``linked_props`` is a list of attributes belonging to linked classes
# (for example, ``Experiment.node_id``).
# Example ``cls._pg_links`` for reference:
#
# Case._pg_links == {
# 'experiments': {
# 'dst_type': gdcdatamodel.models.Experiment,
# 'edge_out': '_CaseMemberOfExperiment_out',
# }
# }
#
# This is used to look up the classes for the linked nodes.
# Now, fill out the properties lists from the titles.
cls = psqlgraph.Node.get_subclass(node_label)
linked_props = make_linked_props(cls, titles_linked)
# Bui ld up the query. The query will contain, firstly, the node class,
# and secondly, all the relevant properties in linked nodes.
query_args = [cls] + linked_props
query = session.query(*query_args).prop("project_id", project_id)
#add filter by id the user is authorized to access
auth_ids = auth.get_authorized_ids(project_id.split('-')[0], project_id.split('-')[1])
if auth_ids:
query = query.prop_in('submitter_id', auth_ids)
# Join the related node tables using the links.
for link in cls._pg_links.values():
query = (
query.outerjoin(link["edge_out"])
.outerjoin(link["dst_type"])
.order_by("src_id")
)
# The result from the query should look like this (header just for
# example):
#
# Case instance experiments.id experiments.submitter_id
# (<Case(...[uuid]...)>, u'...[uuid]...', u'exp-01')
# ``props`` is just a list of strings of the properties of the node
# class that should go in the result.
props = [format_prop(t) for t in titles_non_linked]
if file_format == "json":
yield '{ "data": ['
else: # json
# Yield the lines of the file.
yield "{}\n".format("\t".join(titles_non_linked + titles_linked))
js_list_separator = ""
last_id = None
current_obj = None
for result in query.yield_per(1000):
node = result[0]
node_id = node["node_id"]
if node_id != last_id:
new_obj = {
prop: list_to_comma_string(node[prop], file_format)
for prop in props
}
if current_obj != None:
yield from yield_result(
current_obj,
js_list_separator,
props,
titles_linked,
file_format,
)
js_list_separator = ","
last_id = node_id
current_obj = new_obj
current_obj = append_links_to_obj(result, current_obj, titles_linked)
if current_obj is not None:
yield from yield_result(
current_obj,
js_list_separator,
props,
titles_linked,
file_format,
)
if file_format == "json":
yield "]}" | 5,334,017 |
def add_wmts_gibs_basemap(ax, date='2016-02-05'):
"""http://gibs.earthdata.nasa.gov/"""
URL = 'http://gibs.earthdata.nasa.gov/wmts/epsg4326/best/wmts.cgi'
wmts = WebMapTileService(URL)
# Layers for MODIS true color and snow RGB
# NOTE: what other tiles available?: TONS!
#https://wiki.earthdata.nasa.gov/display/GIBS/GIBS+Available+Imagery+Products#expand-ReferenceLayers9Layers
#layer = 'MODIS_Terra_SurfaceReflectance_Bands143'
#layer = 'MODIS_Terra_CorrectedReflectance_Bands367'
#layer = 'ASTER_GDEM_Greyscale_Shaded_Relief' #better zoomed in
layer = 'SRTM_Color_Index'
#layer = 'BlueMarble_ShadedRelief' #static
#layer = 'BlueMarble_NextGeneration'
#layer = 'BlueMarble_ShadedRelief_Bathymetry'
#layer = 'Reference_Labels'
#layer = 'Reference_Features'
ax.add_wmts(wmts, layer, wmts_kwargs={'time': date}) # alpha=0.5
#NOTE: can access attributes:
#wmts[layer].title
return wmts | 5,334,018 |
def encode_position(
batch_size: int,
axis: list,
max_frequency: float,
num_frequency_bands: int,
sine_only: bool = False,
) -> torch.Tensor:
"""
Encode the Fourier Features and return them
Args:
batch_size: Batch size
axis: List containing the size of each axis
max_frequency: Max frequency
num_frequency_bands: Number of frequency bands to use
sine_only: (bool) Whether to only use Sine features or both Sine and Cosine, defaults to both
Returns:
Torch tensor containing the Fourier Features of shape [Batch, *axis]
"""
axis_pos = list(
map(
lambda size: torch.linspace(-1.0, 1.0, steps=size),
axis,
)
)
pos = torch.stack(torch.meshgrid(*axis_pos), dim=-1)
enc_pos = fourier_encode(
pos,
max_frequency,
num_frequency_bands,
sine_only=sine_only,
)
enc_pos = einops.rearrange(enc_pos, "... n d -> ... (n d)")
enc_pos = einops.repeat(enc_pos, "... -> b ...", b=batch_size)
return enc_pos | 5,334,019 |
def expr_erode(src, size = 5):
"""
Same result as core.morpho.Erode(), faster and workable in 32 bit.
"""
expr = _morpho_matrix(size, mm = 'min')
return core.akarin.Expr(src, expr) | 5,334,020 |
def print_grid(zone_names, options):
"""
Print the tzgrid.
@param zone_names list of zones to print
@param options command line options to control printing behavior
"""
tzs = []
for name in zone_names:
tz = gettz(name)
tzs.append(tuple([name, gettz(name)]))
size = label_size(tzs)
tzs = get_sorted_zones(tzs, options)
if not options.twelve:
fmt = get_color_label_format(size, tz) + " %s"
times = format_range_hours_days(
tzs[0][1], options.width - size - 5, options)
print(fmt % ("", times))
for name, tz in tzs:
fmt = get_color_label_format(size, tz) + " | %s"
if (options.twelve):
times = format_range_am_pm(tz, options.width - size - 5, options)
else:
times = format_range_hours(tz, options.width - size - 5, options)
print(fmt % (name, times)) | 5,334,021 |
def response_modification(response):
"""
Modify API response format.
"""
if (
status.is_client_error(response.status_code)
or status.is_server_error(response.status_code)
) and (status.HTTP_400_BAD_REQUEST != response.status_code):
return response
# Modify the response data
modified_data = {}
modified_data["code"] = response.status_code
modified_data["status"] = get_status(response.status_code)
modified_data["data"] = response.data
response.data = modified_data
return response | 5,334,022 |
def test_lookahead_final_acceptance_fractions():
"""Test `pyabc.visualization.plot_lookahead_final_acceptance_fractions`"""
for relative, fill in itertools.product([True, False], [True, False]):
pyabc.visualization.plot_lookahead_final_acceptance_fractions(
sampler_df, history, relative=relative, fill=fill, size=(5, 5))
plt.close() | 5,334,023 |
def http_request(
url,
json_string,
username = None,
password = None,
timeout = None,
additional_headers = None,
content_type = None,
cookies = None,
gzipped = None,
ssl_context = None,
debug = None
):
"""
Fetch data from webserver (POST request)
:param json_string: JSON-String
:param username: If *username* is given, BASE authentication will be used.
:param timeout: Specifies a timeout in seconds for blocking operations
like the connection attempt (if not specified, the global default
timeout setting will be used).
See: https://github.com/gerold-penz/python-jsonrpc/pull/6
:param additional_headers: Dictionary with additional headers
See: https://github.com/gerold-penz/python-jsonrpc/issues/5
:param content_type: Possibility to change the content-type header.
:param cookies: Possibility to add simple cookie-items as key-value pairs.
The key and the value of each cookie-item must be a bytestring.
Unicode is not allowed here.
:param gzipped: If `True`, the JSON-String will be gzip-compressed.
:param ssl_context: Specifies custom TLS/SSL settings for connection.
Python > 2.7.9
See: https://docs.python.org/2/library/ssl.html#client-side-operation
:param debug: If `True` --> *logging.debug*
"""
# Debug
if debug:
logging.debug("Client-->Server: {json_string}".format(json_string=repr(json_string)))
# Create request and add data
request = urllib2.Request(url)
if gzipped:
# Compress content (SpooledTemporaryFile to reduce memory usage)
spooled_file = tools.SpooledFile()
tools.gzip_str_to_file(json_string, spooled_file)
del json_string
request.add_header("Content-Encoding", "gzip")
request.add_header("Accept-Encoding", "gzip")
spooled_file.seek(0)
if six.PY2:
request.add_data(spooled_file)
else:
request.data = spooled_file
else:
if six.PY2:
request.add_data(json_string)
else:
request.data = json_string
# Content Type
request.add_header("Content-Type", content_type or "application/json")
# Authorization
if username:
base64string = base64.b64encode("%s:%s" % (username, password)).strip()
request.add_unredirected_header("Authorization", "Basic %s" % base64string)
# Cookies
if cookies:
cookie = Cookie.SimpleCookie(cookies)
request.add_header("Cookie", cookie.output(header = "", sep = ";"))
# Additional headers (overrides other headers)
if additional_headers:
for key, val in six.iteritems(additional_headers):
request.add_header(key, val)
# Send request to server
http_error_exception = urllib2.HTTPError if six.PY2 else urllib.error.HTTPError
try:
if ssl_context:
try:
response = urllib2.urlopen(
request, timeout=timeout, context=ssl_context
)
except TypeError as err:
if "context" in unicode(err):
raise NotImplementedError("SSL-Context needs Python >= 2.7.9")
else:
raise
else:
response = urllib2.urlopen(request, timeout=timeout)
except http_error_exception as err:
if debug:
retval = err.read()
logging.debug("Client<--Server: {retval}".format(retval=repr(retval)))
raise
# Analyze response and return result
try:
if "gzip" in response.headers.get("Content-Encoding", ""):
response_file = tools.SpooledFile(source_file = response)
if debug:
retval = tools.gunzip_file(response_file)
logging.debug("Client<--Server: {retval}".format(retval = repr(retval)))
return retval
return tools.gunzip_file(response_file)
else:
if debug:
retval = response.read()
logging.debug("Client<--Server: {retval}".format(retval=repr(retval)))
return retval
return response.read()
finally:
response.close() | 5,334,024 |
def perform_svn():
"""Create svn repo."""
targ = "gcc-trunk"
url = "svn://gcc.gnu.org/svn/gcc/trunk"
if flag_google:
targ = "gcc-google-4.9"
url = "svn://gcc.gnu.org/svn/gcc/branches/google/gcc-4_9"
elif flag_49_branch:
targ = "gcc-4.9"
url = "svn://gcc.gnu.org/svn/gcc/branches/gcc-4_9-branch"
elif flag_5_branch:
targ = "gcc-5"
url = "svn://gcc.gnu.org/svn/gcc/branches/gcc-5-branch"
docmd("svn co %s %s" % (url, targ)) | 5,334,025 |
def process_source_lineage(grid_sdf, data_sdf, value_field=None):
"""
performs the operation to generate the
"""
try:
subtypes = arcpy.da.ListSubtypes(data_sdf)
st_dict = {}
for stcode, stdict in list(subtypes.items()):
st_dict[stcode] = subtypes[stcode]['Name']
fields = arcpy.ListFields(data_sdf)
use_subtypes = False
for field in fields:
if field.name == value_field and field.type == 'Integer':
arcpy.AddMessage("Field has subtypes")
use_subtypes = True
poly_desc = arcpy.Describe(grid_sdf)
fc_desc = arcpy.Describe(data_sdf)
if poly_desc.extent.within(fc_desc.extent):
temp_fc = 'in_memory/clip'
arcpy.AddMessage('Clipping features to polygon')
arcpy.Clip_analysis(data_sdf, grid_sdf, temp_fc)
arcpy.AddMessage('Created in_memory fc')
data_sdf = geomotion.SpatialDataFrame.from_featureclass(temp_fc,
fields=[value_field])
arcpy.AddMessage('features read into spatial dataframe after clipping')
else:
data_sdf = geomotion.SpatialDataFrame.from_featureclass(data_sdf, fields=[value_field])
arcpy.AddMessage('features read into spatial dataframe without clipping')
grid_sdf = geomotion.SpatialDataFrame.from_featureclass(grid_sdf)
#data_sdf = geomotion.SpatialDataFrame.from_featureclass(data_sdf, fields=[value_field])
index = data_sdf.sindex
results = []
for idx, row in enumerate(grid_sdf.iterrows()):
geom = row[1].SHAPE
ext = [geom.extent.lowerLeft.X, geom.extent.lowerLeft.Y,
geom.extent.upperRight.X, geom.extent.upperRight.Y]
row_oids = list(index.intersect(ext))
df_current = data_sdf.loc[data_sdf.index.isin(row_oids)]
# disjoint == False means intersection with Grid polygon
df_sub = df_current.loc[df_current.disjoint(geom) == False].copy()
df_sub = df_sub.replace({np.nan: "NULL"})
grp = df_sub.groupby(by=value_field).size() # Get the counts.
# sort the values to get the biggest on the top
grp.sort_values(axis=0, ascending=False,
inplace=True, kind='quicksort',
na_position='last')
if use_subtypes:
if len(grp) > 1:
grp = grp.head(2)
results.append(
(
int(row[1].OBJECTID),
",".join([st_dict[i] for i in df_sub[value_field].unique().tolist()]),
st_dict[grp.index[0]],
int(grp[grp.index[0]]),
round(float(grp[grp.index[0]]) * 100.0 / float(len(df_sub)),1),
st_dict[grp.index[1]],
int(grp[grp.index[1]]),
round(float(grp[grp.index[1]]) * 100.0 / float(len(df_sub)),1),
)
)
elif len(grp) == 0:
results.append(
(int(row[1].OBJECTID),
'None',
'None',
0,
float(0),
'None',
0,
float(0))
)
elif len(grp) == 1:
results.append(
(
int(row[1].OBJECTID),
",".join([st_dict[i] for i in df_sub[value_field].unique().tolist()]),
st_dict[grp.index[0]],
int(grp[grp.index[0]]),
round(float(grp[grp.index[0]]) * 100.0 / float(len(df_sub)),1),
'None',
0,
float(0)
)
)
else:
if len(grp) > 1:
grp = grp.head(2)
results.append(
(
int(row[1].OBJECTID),
",".join(df_sub[value_field].unique().tolist()),
grp.index[0],
int(grp[0]),
round(float(grp[0]) * 100.0 / float(len(df_sub)),1),
grp.index[1],
int(grp[1]),
round(float(grp[1]) * 100.0 / float(len(df_sub)),1),
)
)
elif len(grp) == 0:
results.append(
(int(row[1].OBJECTID),
'None',
'None',
0,
float(0),
'None',
0,
float(0))
)
elif len(grp) == 1:
results.append(
(
int(row[1].OBJECTID),
",".join(df_sub[value_field].unique().tolist()),
grp.index[0],
int(grp[0]),
round(float(grp[0]) * 100.0 / float(len(df_sub)),1),
'None',
0,
float(0)
)
)
del grp
del df_sub
del row_oids
del df_current
del grid_sdf
del data_sdf
dtypes = np.dtype(
[
('_ID', np.int),
('THEME_LIST', '|S1024'),
('PRI_THEME', '|S256'),
('PRI_THEME_CNT', np.int32),
('PRI_THEME_PER', np.float64),
('SEC_THEME', '|S256'),
('SEC_THEME_CNT', np.int32),
('SEC_THEME_PER', np.float64)
]
)
array = np.array(results, dtypes)
del results
return array
except:
line, filename, synerror = trace()
raise FunctionError(
{
"function": "process_source_lineage",
"line": line,
"filename": filename,
"synerror": synerror,
"arc" : str(arcpy.GetMessages(2))
}
) | 5,334,026 |
def episode(src, dest):
"""Clone an episode's properties from SRC to DEST,
assuming they both are from the same season
"""
props = [
wp.INSTANCE_OF,
wp.PART_OF_THE_SERIES,
wp.ORIGINAL_NETWORK,
wp.ORIGNAL_LANGUAGE_OF_FILM_OR_TV_SHOW,
wp.COUNTRY_OF_ORIGIN,
wp.PRODUCTION_COMPANY,
wp.PUBLICATION_DATE,
wp.DIRECTOR,
wp.SEASON,
]
_clone(src, dest, props) | 5,334,027 |
def class_to_mask(classes: np.ndarray, class_colors: np.ndarray) -> np.ndarray:
"""クラスIDの配列をRGBのマスク画像に変換する。
Args:
classes: クラスIDの配列。 shape=(H, W)
class_colors: 色の配列。shape=(num_classes, 3)
Returns:
ndarray shape=(H, W, 3)
"""
return np.asarray(class_colors)[classes] | 5,334,028 |
def make(T):
"""
T = 0
folder = 'trainT-0'
"""
if T==-1:
folder = 'test'
else:
folder = 'trainT-'+str(T)
log_ = log[log.order_number_rev>T]
cnt = log_.groupby(['user_id', 'product_id']).size()
cnt.name = 'cnt'
cnt = cnt.reset_index()
# chance
user_onb_max = log_.groupby('user_id').order_number.max().reset_index()
user_onb_max.columns = ['user_id', 'onb_max']
user_item_min = log_.groupby(['user_id', 'product_id']).order_number.min().reset_index()
user_item_min.columns = ['user_id', 'product_id', 'onb_min']
chance = pd.merge(user_item_min, user_onb_max, on='user_id', how='left')
chance['chance'] = chance.onb_max - chance.onb_min +1
df = pd.merge(cnt, chance, on=['user_id', 'product_id'], how='left')
df['order_ratio_bychance'] = df.cnt / df.chance
col = ['user_id', 'product_id', 'chance', 'order_ratio_bychance']
df[col].to_pickle('../feature/{}/f309_user-product.p'.format(folder))
# === near5 ===
log_ = log[log.order_number_rev>T][log.order_number_rev<=(T+5)]
cnt = log_.groupby(['user_id', 'product_id']).size()
cnt.name = 'cnt'
cnt = cnt.reset_index()
# chance
user_onb_max = log_.groupby('user_id').order_number.max().reset_index()
user_onb_max.columns = ['user_id', 'onb_max']
user_item_min = log_.groupby(['user_id', 'product_id']).order_number.min().reset_index()
user_item_min.columns = ['user_id', 'product_id', 'onb_min']
chance = pd.merge(user_item_min, user_onb_max, on='user_id', how='left')
chance['chance_n5'] = chance.onb_max - chance.onb_min +1
df = pd.merge(cnt, chance, on=['user_id', 'product_id'], how='left')
df['order_ratio_bychance_n5'] = df.cnt / df.chance_n5
col = ['user_id', 'product_id', 'chance_n5', 'order_ratio_bychance_n5']
df[col].to_pickle('../feature/{}/f309_user-product_n5.p'.format(folder)) | 5,334,029 |
def get_vocab(iob2_files:List[str]) -> List[str]:
"""Retrieve the vocabulary of the iob2 annotated files
Arguments:
iob2_files {List[str]} -- List of paths to the iob2 annotated files
Returns:
List[str] -- Returns the unique list of vocabulary found in the files
"""
vocab = set()
for iob2_file in iob2_files:
logging.info("Loading file %s for creating corpus embeddings", iob2_file)
for line in open(iob2_file):
token = line.split("\t")[0]
vocab.add(token)
return list(vocab) | 5,334,030 |
def reset_monotonic_time(value=0.0):
"""
Make the monotonic clock return the real time on its next
call.
.. versionadded:: 2.0
"""
global _current_time # pylint:disable=global-statement
_current_time = value | 5,334,031 |
def taylor_green_vortex(x, y, t, nu):
"""Return the solution of the Taylor-Green vortex at given time.
Parameters
----------
x : numpy.ndarray
Gridline locations in the x direction as a 1D array of floats.
y : numpy.ndarray
Gridline locations in the y direction as a 1D array of floats.
t : float
Time value.
nu : float
Coefficient of viscosity.
Returns
-------
numpy.ndarray
x-component of the velocity field as a 2D array of floats.
numpy.ndarray
y-component of the velocity field as a 2D array of floats.
numpy.ndarray
pressure field as a 2D array of floats.
"""
X, Y = numpy.meshgrid(x, y)
a = 2 * numpy.pi
u = -numpy.cos(a * X) * numpy.sin(a * Y) * numpy.exp(-2 * a**2 * nu * t)
v = +numpy.sin(a * X) * numpy.cos(a * Y) * numpy.exp(-2 * a**2 * nu * t)
p = (-0.25 * (numpy.cos(2 * a * X) + numpy.cos(2 * a * Y)) *
numpy.exp(-4 * a**2 * nu * t))
return u, v, p | 5,334,032 |
def static_initial_state(batch_size, h_size):
""" Function to make an initial state for a single GRU.
"""
state = jnp.zeros([h_size], dtype=jnp.complex64)
if batch_size is not None:
state = add_batch(state, batch_size)
return state | 5,334,033 |
def get_desklamp(request, index):
"""
A pytest fixture to initialize and return the DeskLamp object with
the given index.
"""
desklamp = DeskLamp(index)
try:
desklamp.open()
except RuntimeError:
pytest.skip("Could not open desklamp connection")
def fin():
desklamp.unsubscribe()
desklamp.off()
desklamp.close()
request.addfinalizer(fin)
return desklamp | 5,334,034 |
def conj(x):
"""
Calculate the complex conjugate of x
x is two-channels complex torch tensor
"""
assert x.shape[-1] == 2
return torch.stack((x[..., 0], -x[..., 1]), dim=-1) | 5,334,035 |
def label_clusters(img, min_cluster_size=50, min_thresh=1e-6, max_thresh=1, fully_connected=False):
"""
Label Clusters
"""
dim = img.dimension
clust = threshold_image(img, min_thresh, max_thresh)
temp = int(fully_connected)
args = [dim, clust, clust, min_cluster_size, temp]
processed_args = _int_antsProcessArguments(args)
lib.LabelClustersUniquely(processed_args)
return clust | 5,334,036 |
def update_schema(schema_old, schema_new):
"""
Given an old BigQuery schema, update it with a new one.
Where a field name is the same, the new will replace the old. Any
new fields not present in the old schema will be added.
Arguments:
schema_old: the old schema to update
schema_new: the new schema which will overwrite/extend the old
"""
old_fields = schema_old["fields"]
new_fields = schema_new["fields"]
output_fields = list(old_fields)
field_indices = {field["name"]: i for i, field in enumerate(output_fields)}
for field in new_fields:
name = field["name"]
if name in field_indices:
# replace old field with new field of same name
output_fields[field_indices[name]] = field
else:
# add new field
output_fields.append(field)
return {"fields": output_fields} | 5,334,037 |
def get_estimators(positions_all, positions_relevant):
"""
Extracts density estimators from a judged sample of paragraph positions.
Parameters
----------
positions_all : dict of (Path, float)
A sample of paragraph positions from various datasets in the NTCIR-11
Math-2, and NTCIR-12 MathIR format.
positions_relevant : dict of (Path, float)
A sample of relevant paragraph positions from various datasets in the
NTCIR-11 A subsample of relevant paragraph positions.
Returns
-------
(float, KernelDensity, KernelDensity)
An estimate of P(relevant), and estimators of p(position), and p(position | relevant).
"""
samples_all = [
(position,) for _, positions in positions_all.items() for position in positions]
samples_relevant = [
(position,) for _, positions in positions_relevant.items() for position in positions]
estimators = dict()
estimators["P(relevant)"] = len(samples_relevant) / len(samples_all)
LOGGER.info("Fitting prior p(position) density estimator")
estimators["p(position)"] = KernelDensity(**KERNEL).fit(samples_all)
LOGGER.info("Fitting conditional p(position | relevant) density estimator")
estimators["p(position|relevant)"] = KernelDensity(**KERNEL).fit(samples_relevant)
return (
estimators["P(relevant)"], estimators["p(position)"], estimators["p(position|relevant)"]) | 5,334,038 |
def write_relation_row(mcf_file, row, drug_is_first, genes_pharm_to_dcid,
drugs_pharm_to_dcid):
"""Writes mcf string of a row from drug_gene_df or gene_drug_df to file.
Determines the drug dcid and gene dcids, then retreives the
ChemicalCompoundGeneAssociation mcf for each gene dcid and writes the mcf to
the given file.
Args:
f: output mcf file
row: a row from either drug_gene_df or gene_drug_df
drug_is_first: boolean indicating if the pharmGKB id of the drug is
'Entity1_id' or 'Entity2_id'
genes_pharm_to_dcid: pharmgkb id to dcids of gene dictionary mapping
drugs_pharm_to_dcid: pharmgkb id to dcids of drug dictionary mapping
"""
if drug_is_first:
drug_pharm = row['Entity1_id']
gene_pharm = row['Entity2_id']
else:
drug_pharm = row['Entity2_id']
gene_pharm = row['Entity1_id']
if drug_pharm not in drugs_pharm_to_dcid:
print('unrecognized drug pharm id: ' + drug_pharm)
return
if gene_pharm not in genes_pharm_to_dcid:
print('unrecognized gene pharm id: ' + gene_pharm)
return
drug_dcid = drugs_pharm_to_dcid[drug_pharm]
gene_dcids = genes_pharm_to_dcid[gene_pharm]
for gene_dcid in gene_dcids:
mcf = get_relation_mcf(row, drug_dcid, gene_dcid)
mcf_file.write(mcf) | 5,334,039 |
def drift_correction(ds, ds_lo):
## testing a drift time correction code
# t1df = ds.get_t1df()
# t1df.reset_index(inplace=True)
# t2df = ds.get_t2df()
"""
Take a single DataSet and window it so that the output file only contains
events near an expected peak location.
"""
# a user has to figure out the uncalibrated energy range of the K40 peak
# xlo, xhi, xpb = 0, 2e6, 2000 # show phys. spectrum (top feature is 2615 pk)
# xlo, xhi, xpb = 990000, 1030000, 250 # k40 peak, ds 3
t2df = ds.get_t2df()
calDB = ds.calDB
query = db.Query()
table = calDB.table("cal_pass1")
vals = table.all()
df_cal = pd.DataFrame(vals) # <<---- omg awesome
df_cal = df_cal.loc[df_cal.ds==ds_lo]
p1cal = df_cal.iloc[0]["p1cal"]
cal = p1cal * np.asarray(t2df["e_ftp"])
xlo = 2.46e6
xhi = 2.5e6
hE, xE = ph.get_hist(t2df["energy"], bins=100, range=(xlo, xhi))
plt.semilogy(xE, hE, ls='steps', lw=1, c='r')
import matplotlib.ticker as ticker
plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.4e'))
plt.locator_params(axis='x', nbins=5)
plt.xlabel("Energy (uncal.)", ha='right', x=1)
plt.ylabel("Counts", ha='right', y=1)
plt.show()
# plt.savefig(f"./plots/cage_ds{ds.ds_lo}_winK40.pdf")
t1df = pd.DataFrame()
for run in ds.paths:
ft1 = ds.paths[run]["t1_path"]
print(f"Scanning ds {ds.ds_lo}, run {run}\n file: {ft1}")
for chunk in pd.read_hdf(ft1, 'ORSIS3302DecoderForEnergy', chunksize=5e4):
t1df_win = chunk.loc[(chunk.energy > xlo) & (chunk.energy < xhi)]
print(t1df_win.shape)
t1df = pd.concat([t1df, t1df_win], ignore_index=True)
print('It worked? maybe?')
h5_opts = {
"mode":"w", # overwrite existing
"append":False,
"format":"table",
# "complib":"blosc:zlib", # no compression, increases I/O speed
# "complevel":1,
# "data_columns":["ievt"]
}
t1df.reset_index(inplace=True)
t1df.to_hdf('./test_dt_file.h5', key="df_windowed", **h5_opts)
print("wrote file")
exit()
# key = "/ORSIS3302DecoderForEnergy"
# wf_chunk = pd.read_hdf(t1df, key, where="ievt < {}".format(75000))
# wf_chunk.reset_index(inplace=True) # required step -- fix pygama "append" bug
t2df = t2df.reset_index(drop=True)
# create waveform block. mask wfs of unequal lengths
number = 20000
icols = []
for idx, col in enumerate(t1df.columns):
if isinstance(col, int):
icols.append(col)
wfs = t1df[icols].values
wfs = np.asarray(wfs)
# wfs = wfs[:number]
# t2df_chunk = t2df[:number]
# print(wf_block.shape, type(wf_block))
# print(t2df_chunk)
t0 = np.asarray(t2df['t0'])
energy = np.asarray(t2df['e_ftp'])
# energy = 0.4066852222964447 * energy
baseline = wfs[:, 0:500]
avg_bl = []
for i in range(len(wfs)):
avg_bl.append(np.mean(baseline[i], keepdims=True))
avg_bl = np.asarray(avg_bl)
wfs = np.asarray(wfs)
wfs = wfs - avg_bl
clk = 100e6
decay = 78
wfs = pz(wfs, decay, clk)
t100 = []
t0_raw = []
wf_raw = []
e_raw = []
for i in range(len(wfs)):
t100_t = np.where(wfs[i] > energy[i])
t100_t = t100_t[0]
if len(t100_t) > 0:
t100_t = t100_t[0]
t100.append(t100_t)
t0_raw.append(t0[i])
wf_raw.append(wfs[i])
e_raw.append(energy[i])
e_raw = np.asarray(e_raw)
index = np.where(e_raw < 7300)[0]
t100 = np.asarray(t100)
t0_raw = np.asarray(t0_raw)
wf_raw = np.asarray(wf_raw)
e_raw = e_raw[index]
t100 = t100[index]
t0_raw = t0_raw[index]
wf_raw = wf_raw[index]
e_raw = 0.4066852222964447 * e_raw
wf_raw = 0.4066852222964447 * wf_raw
hist, bins = np.histogram(e_raw, bins=2700, range=[0,2700])
b = (bins[:-1] + bins[1:]) / 2
plt.plot(b, hist, ls="steps", color='black')
plt.tight_layout()
plt.show()
plt.clf()
# xvals = np.arange(0,3000)
# start = time.time()
# for i in range(len(t100)):
#
# plt.plot(xvals, wf_raw[i], lw=1)
# plt.vlines(t0_raw[i], np.amin(wf_raw[i]), e_raw[i], color='r', linewidth=1.5, label='t0')
# plt.vlines(t100[i], np.amin(wf_raw[i]), e_raw[i], color='g', linewidth=1.5, label='t100')
# plt.hlines(e_raw[i], t0_raw[i], 3000, color='k', linewidth=1.5, zorder=10, label='e_ftp')
# plt.xlabel('Sample Number', ha='right', x=1.0)
# plt.ylabel('ADC Value', ha='right', y=1.0)
# plt.legend()
# plt.tight_layout()
# plt.show()
# exit()
"""
a1 = (t100 - t0_raw) * e_raw
a_wf = []
for i in range(len(wf_raw)):
a2 = sum(wf_raw[i,t0[i]:t100[i]])
a_wf.append(a2)
a_drift = a1 - a_wf
# a_drift = a_drift.tolist()
# print(a_drift)
# exit()
a_test = a_drift[np.where((e_raw > 2600) & (e_raw < 2630))]
e_test = e_raw[np.where((e_raw > 2600) & (e_raw < 2630))]
plt.hist2d(e_test, a_test, bins=[30,100], range=[[2600, 2630], [0, np.amax(a_test)]], norm=LogNorm(), cmap='jet')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Counts')
plt.tight_layout()
plt.show()
exit()
"""
xvals = np.arange(0,3000)
start = time.time()
for i in range(0,number):
# for i in range(0,5):
plt.plot(xvals, wfs[i], lw=1)
plt.vlines(t0[i], np.amin(wfs[i]), energy[i], color='r', linewidth=1.5, label='t0')
plt.vlines(t100[i], np.amin(wfs[i]), energy[i], color='g', linewidth=1.5, label='t100')
plt.hlines(energy[i], t0[i], 3000, color='k', linewidth=1.5, zorder=10, label='e_ftp')
plt.xlabel('Sample Number', ha='right', x=1.0)
plt.ylabel('ADC Value', ha='right', y=1.0)
plt.legend()
plt.tight_layout()
plt.show()
# input:
# fsignal: PZ-corrected and INL-corrected signal of length len, from channel chan
# Dets: MJ detector info data structure
# PSA: contains filter params to use for trapezoids
# CTC_factor: the value used in the correction, usually CTC.e_dt_slope[chan]
# outputs:
# returned value: energy in keV, or -1.0f in case of error
# t0: start time of drift/signal
# e_adc: energy in ADC units
# e_raw: uncorrected energy in 0.001 ADC units
# drift: charge trapping value (drift time * charge)
# to be used for optimizing correction, in ADC units
# CTC correction = drift*ctc_factor[chan] | 5,334,040 |
def openbabel_force_field(label, mol, num_confs=None, xyz=None, force_field='GAFF', return_xyz_strings=True,
method='diverse'):
"""
Optimize conformers using a force field (GAFF, MMFF94s, MMFF94, UFF, Ghemical)
Args:
label (str): The species' label.
mol (Molecule, optional): The RMG molecule object with connectivity and bond order information.
num_confs (int, optional): The number of random 3D conformations to generate.
xyz (list, optional): The 3D coordinates in an array format.
force_field (str, optional): The type of force field to use.
return_xyz_strings (bool, optional): Whether to return xyz in string or array format. True for string.
method (str, optional): The conformer searching method to use in open babel.
For method description, see http://openbabel.org/dev-api/group__conformer.shtml
Returns:
list: Entries are optimized xyz's in a list format.
Returns:
list: Entries are float numbers representing the energies in kJ/mol.
"""
xyzs, energies = list(), list()
ff = ob.OBForceField.FindForceField(force_field)
if xyz is not None:
if isinstance(xyz, (str, unicode)):
xyz = converter.get_xyz_matrix(xyz)[0]
# generate an open babel molecule
obmol = ob.OBMol()
atoms = mol.vertices
ob_atom_ids = dict() # dictionary of OB atom IDs
for i, atom in enumerate(atoms):
a = obmol.NewAtom()
a.SetAtomicNum(atom.number)
a.SetVector(xyz[i][0], xyz[i][1], xyz[i][2]) # assume xyz is ordered like mol; line not in in toOBMol
if atom.element.isotope != -1:
a.SetIsotope(atom.element.isotope)
a.SetFormalCharge(atom.charge)
ob_atom_ids[atom] = a.GetId()
orders = {1: 1, 2: 2, 3: 3, 4: 4, 1.5: 5}
for atom1 in mol.vertices:
for atom2, bond in atom1.edges.items():
if bond.isHydrogenBond():
continue
index1 = atoms.index(atom1)
index2 = atoms.index(atom2)
if index1 < index2:
obmol.AddBond(index1 + 1, index2 + 1, orders[bond.order])
# optimize
ff.Setup(obmol)
ff.SetLogLevel(0)
ff.SetVDWCutOff(6.0) # The VDW cut-off distance (default=6.0)
ff.SetElectrostaticCutOff(10.0) # The Electrostatic cut-off distance (default=10.0)
ff.SetUpdateFrequency(10) # The frequency to update the non-bonded pairs (default=10)
ff.EnableCutOff(False) # Use cut-off (default=don't use cut-off)
# ff.SetLineSearchType('Newton2Num')
ff.SteepestDescentInitialize() # ConjugateGradientsInitialize
v = 1
while v:
v = ff.SteepestDescentTakeNSteps(1) # ConjugateGradientsTakeNSteps
if ff.DetectExplosion():
raise ConformerError('Force field {0} exploded with method {1} for {2}'.format(
force_field, 'SteepestDescent', label))
ff.GetCoordinates(obmol)
elif num_confs is not None:
obmol, ob_atom_ids = toOBMol(mol, returnMapping=True)
pybmol = pyb.Molecule(obmol)
pybmol.make3D()
ff.Setup(obmol)
if method.lower() == 'weighted':
ff.WeightedRotorSearch(num_confs, 2000)
elif method.lower() == 'random':
ff.RandomRotorSearch(num_confs, 2000)
elif method.lower() == 'diverse':
rmsd_cutoff = 0.5
energy_cutoff = 50.
confab_verbose = False
ff.DiverseConfGen(rmsd_cutoff, num_confs, energy_cutoff, confab_verbose)
elif method.lower() == 'systematic':
ff.SystematicRotorSearch(num_confs)
else:
raise ConformerError('Could not identify method {0} for {1}'.format(method, label))
else:
raise ConformerError('Either num_confs or xyz should be given for {0}'.format(label))
ff.GetConformers(obmol)
obconversion = ob.OBConversion()
obconversion.SetOutFormat('xyz')
for i in range(obmol.NumConformers()):
obmol.SetConformer(i)
ff.Setup(obmol)
xyz = '\n'.join(obconversion.WriteString(obmol).splitlines()[2:])
if not return_xyz_strings:
xyz = converter.get_xyz_matrix(xyz)[0]
xyz = [xyz[ob_atom_ids[mol.atoms[j]]] for j, _ in enumerate(xyz)] # reorder
xyzs.append(xyz)
energies.append(ff.Energy())
return xyzs, energies | 5,334,041 |
def _wait_for_exit(please_stop):
"""
/dev/null PIPED TO sys.stdin SPEWS INFINITE LINES, DO NOT POLL AS OFTEN
"""
try:
import msvcrt
_wait_for_exit_on_windows(please_stop)
return
except:
pass
cr_count = 0 # COUNT NUMBER OF BLANK LINES
try:
# NO LONGER NEEDED, THE HAPPY PATH WILL EXIT
_signal.signal(_signal.SIGTERM, _signal.default_int_handler)
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
while not please_stop:
# DEBUG and Log.note("inside wait-for-shutdown loop")
if cr_count > 30:
(Till(seconds=3) | please_stop).wait()
try:
# line = ""
line = STDIN.readline()
except Exception as e:
Except.wrap(e)
if "Bad file descriptor" in e:
Log.note("can not read from stdin")
_wait_for_interrupt(please_stop)
break
# DEBUG and Log.note("read line {{line|quote}}, count={{count}}", line=line, count=cr_count)
if not line:
cr_count += 1
else:
cr_count = -1000000 # NOT /dev/null
if line.strip() == b"exit":
Log.alert("'exit' Detected! Stopping...")
return
except Exception as e:
Log.warning("programming error", cause=e)
finally:
if please_stop:
Log.note("please_stop has been requested")
Log.note("done waiting for exit") | 5,334,042 |
def parse_flarelabels(label_file):
"""
Parses a flare-label file and generates a dictionary mapping residue identifiers (e.g. A:ARG:123) to a
user-specified label, trees that can be parsed by flareplots, and a color indicator for vertices.
Parameters
----------
label_file : file
A flare-label file where each line contains 2-3 columns formatted as
- CHAIN:RESN:RESI (e.g. A:ARG:123)
- [[TOPLEVEL.]MIDLEVEL.]LABEL (e.g. Receptor.Helix2.2x44)
- COLOR (e.g. #FF0000 or white)
Returns
-------
dict of str : (dict of str : str)
Keys are all residue identifiers and values are dicts that hold both the LABEL by itself (key "label", the full
tree-path (key "treepath") and a CSS-compatible color string (key "color").
Raises
------
AssertionError
if a residue identifier (CHAIN:RESN:RESI) is specified twice in the file, or if a LABEL appears twice.
"""
if label_file is None:
return None
ret = {}
flarelabels = set() # Only used to check for duplicates
for line in label_file:
line = line.strip()
if not line:
continue # Ignore empty lines
columns = line.split("\t")
residentifier = columns[0]
flaretreepath = columns[1] if len(columns) > 1 else columns[0]
flarelabel = flaretreepath.split(".")[-1]
flarecolor = columns[2] if len(columns) > 2 else "white"
if residentifier in ret:
raise AssertionError("Residue identifier '"+residentifier+"' appears twice in "+label_file.name)
if flarelabel in flarelabels:
raise AssertionError("Flare label '"+flarelabel+"' used twice in "+label_file.name)
ret[residentifier] = {"label": flarelabel, "treepath": flaretreepath, "color": flarecolor}
flarelabels.add(flarelabel)
return ret | 5,334,043 |
def remove_background(data, dim="t2", deg=0, regions=None):
"""Remove polynomial background from data
Args:
data (DNPData): Data object
dim (str): Dimension to perform background fit
deg (int): Polynomial degree
regions (None, list): Background regions, by default entire region is background corrected. Regions can be specified as a list of tuples [(min, max), ...]
Returns:
DNPData: Background corrected data
"""
proc_parameters = {
"dim": dim,
"deg": deg,
"regions": regions,
}
fit = background(data, dim=dim, deg=deg, regions=regions)
data = data - fit
proc_attr_name = "remove_backround"
data.add_proc_attrs(proc_attr_name, proc_parameters)
return data | 5,334,044 |
def run_as3_deffered():
""" run the AS3 deferred deployment script """
deploy_script_name = "%s/%s" % (
os.path.dirname(os.path.realpath(__file__)), AS3_DELAYED_DEPLOYMENT_SCRIPT)
if not is_startup_injected(AS3_DELAYED_DEPLOYMENT_SCRIPT):
LOG.info(
'injecting f5-appsvcs-extension delayed deployment script %s into /config/startup',
deploy_script_name
)
with open('/config/startup', 'a+') as css:
css.write("/usr/bin/env python %s %s &\n" %
(deploy_script_name, MODULE_NAME))
LOG.info(
'running f5-appsvcs-extension delayed deployment script %s', deploy_script_name)
subprocess.call(['/usr/bin/env', 'python',
deploy_script_name, MODULE_NAME, '&']) | 5,334,045 |
def deferral_video(message, video_url):
"""Функция пропустить видео"""
conn = get_connection()
c = conn.cursor()
c.execute(
"UPDATE channel_list SET rating = Null WHERE video_url IN(?);",
(video_url,),
)
markup = types.ReplyKeyboardMarkup(
one_time_keyboard=True, resize_keyboard=True
)
markup.add(types.InlineKeyboardButton(text="👉 Следующее видео"))
BOT.send_message(message.chat.id, "Видео отложено.", reply_markup=markup)
conn.commit() | 5,334,046 |
def get_hmm_datatype(query_file):
"""Takes an HMM file (HMMer3 software package) and determines what data
type it has (i.e., generated from an amino acid or nucleic acid alignment).
Returns either "prot" or "nucl".
"""
datatype = None
with open(query_file) as infh:
for i in infh:
if i.startswith('ALPH'):
dname = i.strip().split(' ')[1]
if dname == 'amino':
datatype = 'prot'
elif dname == 'DNA':
datatype = 'nucl'
break
# Check that it worked.
assert datatype is not None, """Error: Data type could not be
determined for input file: %s""" % query_file
# Return the data type.
return datatype | 5,334,047 |
def detect_min_threshold_outliers(series, threshold):
"""Detects the values that are lower than the threshold passed
series : series, mandatory
The series where to detect the outliers
threshold : integer, float, mandatory
The threshold of the minimum value that will be considered outliers.
"""
bool_outliers = series < threshold
return bool_outliers | 5,334,048 |
def select_sort(L: List[int or float or long]):
"""
假设 L 是列表,其中的元素可以用 > 进行比较
对 L 进行升序排序
"""
suffix_start = 0
while suffix_start != len(L):
for i in range(suffix_start, len(L)):
if L[i] < L[suffix_start]:
L[suffix_start], L[i] = L[i], L[suffix_start]
suffix_start += 1 | 5,334,049 |
def new_datetime(d):
"""
Generate a safe datetime from a datetime.date or datetime.datetime object.
"""
kw = [d.year, d.month, d.day]
if isinstance(d, real_datetime):
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
return datetime(*kw) | 5,334,050 |
def _to_str(x):
"""Converts a bool tensor to a string with True/False values."""
x = tf.convert_to_tensor(x)
if x.dtype == tf.bool:
return tf.where(x, 'True', 'False')
return x | 5,334,051 |
def inpaintn(x,m=100, x0=None, alpha=2):
""" This function interpolates the input (2-dimensional) image 'x' with missing values (can be NaN of Inf). It is based on a recursive process
where at each step the discrete cosine transform (dct) is performed of the residue, multiplied by some weights, and then the inverse dct is taken.
The initial guess 'x0' for the interpolation can be provided by the user, otherwise it starts with a nearest neighbor filling.
Args
INPUTS:
x (numpy array) - is the image with missing elements (eiher np.nan or np.inf) from which you want to perform interpolation
m (int) - is the number of iteration; default=100
x0 (numpy array) - can be your initial guess; defaut=None
alpha (float) - some input number used as a power scaling; default=2
OUT:
y (numpy array) - is the interpolated image wrt proposed method
"""
sh = x.shape
ids0 = np.isfinite(x)
if ids0.all(): #Nothing to interpolate...
return x
# Smoothness paramaters:
s0 = 3
s1 = -6
s = np.logspace(s0,s1,num=m)
# Relaxation factor:
rf = 2
# Weight matrix, here we add some basis vectors to Lambda depending on original size of 'x':
Lambda = np.zeros(sh, float)
u0 = np.cos(np.pi*np.arange(0,sh[0]).reshape((sh[0],1))/sh[0])
u1 = np.cos(np.pi*np.arange(0,sh[1]).reshape((1,sh[1]))/sh[1])
Lambda = np.add(np.add(Lambda,u0),u1)
Lambda = 2*(2-Lambda)
Lambda = Lambda**alpha
# Starting interpolation:
if x0 is None:
y = initial_nn(x)
else:
y = np.copy(x0)
for mu in range(m):
Gamma = 1/(1+s[mu]*Lambda)
a = np.copy(y)
a[ids0] = (x-y)[ids0]+y[ids0]
y = rf*idct(Gamma*dct(a, norm='ortho'), norm='ortho')+(1-rf)*y
y[ids0] = x[ids0]
return y | 5,334,052 |
def amend_pinmux_io(top: Dict, name_to_block: Dict[str, IpBlock]):
""" Process pinmux/pinout configuration and assign available IOs
"""
pinmux = top['pinmux']
pinout = top['pinout']
targets = top['targets']
temp = {}
temp['inouts'] = []
temp['inputs'] = []
temp['outputs'] = []
for sig in pinmux['signals']:
# Get the signal information from the IP block type of this instance/
mod_name = sig['instance']
m = lib.get_module_by_name(top, mod_name)
if m is None:
raise SystemExit("Module {} is not searchable.".format(mod_name))
block = name_to_block[m['type']]
# If the signal is explicitly named.
if sig['port'] != '':
# If this is a bus signal with explicit indexes.
if '[' in sig['port']:
name_split = sig['port'].split('[')
sig_name = name_split[0]
idx = int(name_split[1][:-1])
else:
idx = -1
sig_name = sig['port']
sig_inst = deepcopy(block.get_signal_by_name_as_dict(sig_name))
if idx >= 0 and idx >= sig_inst['width']:
raise SystemExit("Index {} is out of bounds for signal {}"
" with width {}.".format(idx, sig_name, sig_inst['width']))
if idx == -1 and sig_inst['width'] != 1:
raise SystemExit("Bus signal {} requires an index.".format(sig_name))
# If we got this far we know that the signal is valid and exists.
# Augment this signal instance with additional information.
sig_inst.update({'idx': idx,
'pad': sig['pad'],
'attr': sig['attr'],
'connection': sig['connection'],
'desc': sig['desc']})
sig_inst['name'] = mod_name + '_' + sig_inst['name']
append_io_signal(temp, sig_inst)
# Otherwise the name is a wildcard for selecting all available IO signals
# of this block and we need to extract them here one by one signals here.
else:
sig_list = deepcopy(block.get_signals_as_list_of_dicts())
for sig_inst in sig_list:
# If this is a multibit signal, unroll the bus and
# generate a single bit IO signal entry for each one.
if sig_inst['width'] > 1:
for idx in range(sig_inst['width']):
sig_inst_copy = deepcopy(sig_inst)
sig_inst_copy.update({'idx': idx,
'pad': sig['pad'],
'attr': sig['attr'],
'connection': sig['connection'],
'desc': sig['desc']})
sig_inst_copy['name'] = sig['instance'] + '_' + sig_inst_copy['name']
append_io_signal(temp, sig_inst_copy)
else:
sig_inst.update({'idx': -1,
'pad': sig['pad'],
'attr': sig['attr'],
'connection': sig['connection'],
'desc': sig['desc']})
sig_inst['name'] = sig['instance'] + '_' + sig_inst['name']
append_io_signal(temp, sig_inst)
# Now that we've collected all input and output signals,
# we can go through once again and stack them into one unified
# list, and calculate MIO/DIO global indices.
pinmux['ios'] = (temp['inouts'] +
temp['inputs'] +
temp['outputs'])
# Remember these counts to facilitate the RTL generation
pinmux['io_counts'] = {'dedicated': {'inouts': 0, 'inputs': 0, 'outputs': 0, 'pads': 0},
'muxed': {'inouts': 0, 'inputs': 0, 'outputs': 0, 'pads': 0}}
for sig in pinmux['ios']:
glob_idx = get_index_and_incr(pinmux['io_counts'], sig['connection'], sig['type'])
sig['glob_idx'] = glob_idx
# Calculate global indices for pads.
j = k = 0
for pad in pinout['pads']:
if pad['connection'] == 'muxed':
pad['idx'] = j
j += 1
else:
pad['idx'] = k
k += 1
pinmux['io_counts']['muxed']['pads'] = j
pinmux['io_counts']['dedicated']['pads'] = k
# For each target configuration, calculate the special signal indices.
known_muxed_pads = {}
for pad in pinout['pads']:
if pad['connection'] == 'muxed':
known_muxed_pads[pad['name']] = pad
known_mapped_dio_pads = {}
for sig in pinmux['ios']:
if sig['connection'] in ['muxed', 'manual']:
continue
if sig['pad'] in known_mapped_dio_pads:
raise SystemExit('Cannot have multiple IOs mapped to the same DIO pad {}'
.format(sig['pad']))
known_mapped_dio_pads[sig['pad']] = sig
for target in targets:
for entry in target['pinmux']['special_signals']:
# If this is a muxed pad, the resolution is
# straightforward. I.e., we just assign the MIO index.
if entry['pad'] in known_muxed_pads:
entry['idx'] = known_muxed_pads[entry['pad']]['idx']
# Otherwise we need to find out which DIO this pad is mapped to.
# Note that we can't have special_signals that are manual, since
# there needs to exist a DIO connection.
elif entry['pad'] in known_mapped_dio_pads:
# This index refers to the stacked {dio, mio} array
# on the chip-level, hence we have to add the amount of MIO pads.
idx = (known_mapped_dio_pads[entry['pad']]['glob_idx'] +
pinmux['io_counts']['muxed']['pads'])
entry['idx'] = idx
else:
assert(0) | 5,334,053 |
def obtain_bboxs(path) -> list:
"""
obatin bbox annotations from the file
"""
file = open(path, "r")
lines = file.read().split("\n")
lines = [x for x in lines if x and not x.startswith("%")]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
bboxs = []
for line in lines:
items = line.split(" ")
bboxs.append([items[0], float(items[1]), float(items[2]), float(items[3]), float(items[4])])
return bboxs | 5,334,054 |
def test_supremacy_coalitions(supremacy):
"""Test an API call to get coalitions"""
result = supremacy.coalitions()
if result:
assert isinstance(result, dict), "Result should be a dict" | 5,334,055 |
def Timeline_Integral_with_cross_before(Tm,):
"""
计算时域金叉/死叉信号的累积卷积和(死叉(1-->0)不清零,金叉(0-->1)清零)
这个我一直不会写成 lambda 或者 apply 的形式,只能用 for循环,谁有兴趣可以指导一下
"""
T = [Tm[0]]
for i in range(1,len(Tm)):
T.append(T[i - 1] + 1) if (Tm[i] != 1) else T.append(0)
return np.array(T) | 5,334,056 |
def list_image_paths() -> Generator[str, None, None]:
"""List each image path in the input directory."""
return list_input_directory(INPUT_DIRECTORIES["image_dir"]) | 5,334,057 |
def _clear_server_blackout(zkclient, server):
"""Clear server blackout."""
path = z.path.blackedout_server(server)
zkutils.ensure_deleted(zkclient, path) | 5,334,058 |
def a_star(graph: Graph, start: Node, goal: Node, heuristic):
"""
Standard A* search algorithm.
:param graph: Graph A graph with all nodes and connections
:param start: Node Start node, where the search starts
:param goal: Node End node, the goal for the search
:return: shortest_path: list|False Either a list of node ids or false
"""
# Indexed priority queue
queue = pqdict()
# All visited connections
visited_stack = {}
# Add start node
visited_stack[start] = True
# The costs from start to a node
cost_to_node = {}
# Full costs from a node to goal
full_costs = {}
# All paths that have been taken
shortest_path = []
# Create a dummy for the start node
dummy_connection = Connection(start, start)
# Assign it to the queue so we can start
queue[dummy_connection] = 0
while queue:
# Get next connection from top queue
# and remove it (its a get + pop)
connection = queue.pop()
# Add the node to the shortest path
# cause otherwise we would not be here
shortest_path.append(connection)
cost_to_node[connection.to_node] = connection.cost
# We have found the target
if connection.to_node.id == goal.id:
# Remove all unneded paths and return
# a sorted list
return clean_route_list(shortest_path, goal.id)
# Get all connected nodes
next_connections = graph.get_connections(connection.to_node)
# Iterate through all connected nodes
# and calculate the costs and stuff
for c in next_connections:
# Calculate total costs from start to the goal node
to_goal_cost = heuristic(goal.position, c.to_node.position)
# Calculate costs from start to this node
current_cost = cost_to_node[connection.to_node] + c.cost
# Update lists and costs
queue[c] = current_cost
cost_to_node[c.to_node] = current_cost
full_costs[c.to_node] = current_cost + to_goal_cost
visited_stack[c.to_node] = True
# Never found the target, so sad ...
return False | 5,334,059 |
def buildAndTrainModel(model, learningRate, batchSize, epochs, trainingData, validationData, testingData, trainingLabels, validationLabels, testingLabels, MODEL_NAME, isPrintModel=True):
"""Take the model and model parameters, build and train the model"""
# Build and compile model
# To use other optimizers, refer to: https://keras.io/optimizers/
# Please do not change the loss function
optimizer = tf.keras.optimizers.Adam(lr=learningRate)
model.compile(optimizer=optimizer,
loss=tf.keras.losses.MeanSquaredError())
if isPrintModel:
print(model.summary())
for epoch in range(0, epochs):
model.fit(trainingData, trainingLabels,
epochs=1,
verbose=0,
batch_size=batchSize,
shuffle=False)
# Evaluate model
valLoss = model.evaluate(validationData, validationLabels, verbose=False)
## get metrics
predictions = model.predict(testingData)
MSE, MAE, MAPE, RMSE, PR = getMetrics(testingLabels,predictions)
MeanSquaredError.append(MSE)
RootMeanSquaredError.append(RMSE)
MeanAbsoluteError.append(MAE)
MeanAbsolutePercentageError.append(MAPE)
PearsonR.append(PR)
ValMSE.append(valLoss)
Epoch.append(epoch)
if valLoss <= min(ValMSE):
max_predictions = predictions
return MeanSquaredError, RootMeanSquaredError, MeanAbsoluteError, MeanAbsolutePercentageError, ValMSE, PearsonR, Epoch, max_predictions | 5,334,060 |
def setup_namespace(doctest_namespace):
"""Configure the global doctest_namespace fixture."""
doctest_namespace["coxeter"] = coxeter | 5,334,061 |
def gaussian_temporal_filter(tsincr: np.ndarray, cutoff: float, span: np.ndarray,
thr: int) -> np.ndarray:
"""
Function to apply a Gaussian temporal low-pass filter to a 1D time-series
vector for one pixel with irregular temporal sampling.
:param tsincr: 1D time-series vector to be filtered.
:param cutoff: filter cutoff in years.
:param span: 1D vector of cumulative time spans, in years.
:param thr: threshold for non-NaN values in tsincr.
:return: ts_lp: Low-pass filtered time series vector.
"""
nanmat = ~isnan(tsincr)
sel = np.nonzero(nanmat)[0] # don't select if nan
ts_lp = np.empty(tsincr.shape, dtype=np.float32) * np.nan
m = len(sel)
if m >= thr:
for k in range(m):
yr = span[sel] - span[sel[k]]
# apply Gaussian smoothing kernel
wgt = _kernel(yr, cutoff)
wgt /= np.sum(wgt)
ts_lp[sel[k]] = np.sum(tsincr[sel] * wgt)
return ts_lp | 5,334,062 |
def get_mactable(auth):
"""
Function to get list of mac-addresses from Aruba OS switch
:param auth: AOSSAuth class object returned by pyarubaoss.auth
:return list of mac-addresses
:rtype list
"""
url_mactable = "http://" + auth.ipaddr + "/rest/" + auth.version + "/mac-table"
try:
r = requests.get(url_mactable, headers=auth.cookie)
mactable = json.loads(r.text)['mac_table_entry_element']
return mactable
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_mactable: An Error has occurred" | 5,334,063 |
def presentations():
"""Shows a list of selected presentations"""
return render_template(
'table.html',
title='Presentations',
data=PRESENTATIONS,
target='_blank',
) | 5,334,064 |
def get_consensus_mask(patient, region, aft, ref=HIVreference(subtype="any")):
"""
Returns a 1D vector of size aft.shape[-1] where True are the position that correspond to consensus sequences.
Position that are not mapped to reference or seen too often gapped are always False.
"""
ref_filter = trajectory.get_reference_filter(patient, region, aft, ref)
consensus_mask = trajectory.get_reversion_map(patient, region, aft, ref)
initial_idx = patient.get_initial_indices(region)
# gives reversion mask at initial majority nucleotide
consensus_mask = consensus_mask[initial_idx, np.arange(aft.shape[-1])]
return np.logical_and(ref_filter, consensus_mask) | 5,334,065 |
def validate_close(close):
"""
Validates the given closer.
Parameters
----------
close : `callable`
The closer to validate.
Raises
------
TypeError
If `close` is not async callable or accepts not 1 parameter.
"""
if close is None:
raise TypeError(f'`close` function cannot be `None`.')
analyzer = CallableAnalyzer(close, as_method=True)
if not analyzer.is_async():
raise TypeError('`close` should have be `async` function.')
min_, max_ = analyzer.get_non_reserved_positional_parameter_range()
if min_ > 1:
raise TypeError(f'`close` should accept `1` parameters, meanwhile the given callable expects at '
f'least `{min_!r}`, got `{close!r}`.')
if min_ != 1:
if max_ < 1:
if not analyzer.accepts_args():
raise TypeError(f'`close` should accept `1` parameters, meanwhile the given callable expects '
f'up to `{max_!r}`, got `{close!r}`.') | 5,334,066 |
def parse_arguments():
""" Parse arguments """
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
type=str,
dest="input_pics",
help="A file consists of pics path with each pic on a single line.",
)
parser.add_argument("-o", type=str, dest="output_gif", help="Output gif path.")
parser.add_argument("-fps", type=float, dest="fps", help="FPS.")
parser.add_argument(
"-duration", type=float, dest="duration", help="Duration of each frame."
)
return parser.parse_args() | 5,334,067 |
def return_next_entry_list_uri(links):
"""続くブログ記事一覧のエンドポイントを返す"""
for link in links:
if link.attrib["rel"] == "next":
return link.attrib["href"] | 5,334,068 |
def none(**_):
""" Input: anything
Return: 0.0 (float)
Descr.: Dummy method to handle no temperature correction"""
return 0.0 | 5,334,069 |
def test_parse_exception_i():
"""Test parse exception."""
def exception_raise():
"""A function that raises an exception."""
raise ValueError("expected")
try:
exception_raise()
except Exception as e:
out = parse_exception(e)
expected = [
"Traceback (most recent call last):\n\n",
'test_exceptions.py", line ',
"in exception_raise\n",
'raise ValueError("expected")\n\nValueError: expected\n',
]
assert all([string in out for string in expected]) | 5,334,070 |
def fill_one_side(fname, idx0, idx1):
"""fill idx0:idx1 of variable CO2_TRACER1 in boundary file with
name fname with 450 pptv (0.45 ppbv)"""
print("replacing {}:{} in {}".format(idx0, idx1, fname))
nc = netCDF4.Dataset(fname, 'a')
nc.variables['CO2_TRACER1'][:, :, idx0:idx1] = 0.45 # 450 pptv = 0.45 ppbv
nc.close() | 5,334,071 |
def replicate_manifest_list(
image: ImageName,
endpoint: str,
*,
auth_header_dest: Dict[str, str] = None,
auth_header_src: Dict[str, str] = None,
ssl_context_dest: SSLContext = None,
ssl_context_src: SSLContext = None,
):
"""
Helper function as docker-py cannot operate on manifest lists.
Args:
image: The name of the docker image to be replicated.
endpoint: Endpoint of the docker registry into which to replicate the image.
auth_header_dest: HTTP basic authentication header to using when connecting to the service.
auth_header_src: HTTP basic authentication header to using when connecting to the service.
ssl_context_dest:
SSL context referencing the trusted root CA certificated to used when negotiating the TLS connection.
ssl_context_src:
SSL context referencing the trusted root CA certificated to used when negotiating the TLS connection.
"""
media_type = "application/vnd.docker.distribution.manifest.list.v2+json"
# Note: This cannot be imported above, as it causes a circular import!
from . import __version__ # pylint: disable=import-outside-toplevel
user_agent = f"pytest-docker-registry-fixtures/{__version__}"
https_connection = HTTPSConnection(context=ssl_context_src, host=image.endpoint)
identifier = image.digest if image.digest else image.tag # Prefer digest
https_connection.request(
"GET",
url=f"/v2/{image.image}/manifests/{identifier}",
headers={"Accept": media_type, "User-Agent": user_agent, **auth_header_src},
)
response = https_connection.getresponse()
assert response.status == 200
assert response.headers["Content-Type"] == media_type
if image.digest:
assert response.headers["Docker-Content-Digest"] == image.digest
manifest = response.read()
https_connection = HTTPSConnection(context=ssl_context_dest, host=endpoint)
identifier = image.tag if image.tag else image.digest # Prefer tag
https_connection.request(
"PUT",
url=f"/v2/{image.image}/manifests/{identifier}",
headers={
"Content-Type": media_type,
"User-Agent": user_agent,
**auth_header_dest,
},
body=manifest,
)
assert https_connection.getresponse().status == 201 | 5,334,072 |
def write_attribute_map_template_to_json(attribute_set, output):
"""Create an attribute map template file.
Write a JSON file in the given file_path, storing an object mapping
attributes given in the attribute_set to empty strings. This file was meant
to serve as a template for matching attributes in the sample metadata and
prep files to attributes of a Sample, Subject or Preparation object.
Parameters
----------
attribute_set : Iterable
Iterable of attributes to include in the attribute map template.
output : str
Path to the output (attribute map template) file.
"""
attribute_map_template = dict.fromkeys(attribute_set, '')
json_str = json.dumps(attribute_map_template, indent=4)
with open(output, 'w') as file:
file.write(json_str) | 5,334,073 |
def paths_and_labels_to_rgb_dataset(image_paths, labels, num_classes, label_mode):
"""Constructs a dataset of images and labels."""
path_ds = dataset_ops.Dataset.from_tensor_slices(image_paths)
img_ds = path_ds.map(lambda path: load_rgb_img_from_path(path))
label_ds = dataset_utils.labels_to_dataset(labels, label_mode, num_classes)
img_ds = dataset_ops.Dataset.zip((img_ds, label_ds))
return img_ds | 5,334,074 |
def main(template_initial_path, template_grown_path, step, total_steps, hydrogen_to_replace, core_atom_linker,
tmpl_out_path):
"""
Module to modify templates, currently working in OPLS2005. This main function basically compares two templates;
an initial and a grown one, extracting the atoms of the fragment (that have been grown). Then, it uses this data
to modify Linearly different attributes of the template, particularly, sigmas, charges, bond equilibrium distance,
and the radius non polar SGB from atoms and bonds of the fragment. This modification is performed according to a
lambda parameter that is computed dividing the current step by the total number of steps. Finally, the template is
modified and written again to an output file.
:param template_initial_path: Path to an OPLS2005 template of the core ligand.
:type template_initial_path: str
:param template_grown_path: Path to an OPLS2005 template of the ligand with the fragment added to the core.
:type template_grown_path: str
:param step: Current step of the total steps.
:type step: int
:param total_steps: Total number of steps.
:type total_steps: int
:param hydrogen_to_replace: PDB atom name of the hydrogen that will be replaced for the linking atom of the fragment.
:type hydrogen_to_replace: str
:param core_atom_linker: PDB atom name of the core that is linking the fragment.
:type core_atom_linker: str
:param tmpl_out_path: Output path for the template modified.
:type tmpl_out_path: str
:return: None
"""
lambda_to_reduce = float(step/(total_steps+1))
templ_ini = TemplateOPLS2005(template_initial_path)
templ_grw = TemplateOPLS2005(template_grown_path)
fragment_atoms = detect_fragment_atoms(template_initial=templ_ini, template_grown=templ_grw,
hydrogen_to_replace=hydrogen_to_replace)
set_fragment_atoms(list_of_fragment_atoms=fragment_atoms)
set_connecting_atom(template_grown=templ_grw, pdb_atom_name=hydrogen_to_replace)
set_connecting_atom(template_grown=templ_grw, pdb_atom_name=core_atom_linker)
fragment_bonds = detect_fragment_bonds(list_of_fragment_atoms=fragment_atoms, template_grown=templ_grw)
set_fragment_bonds(list_of_fragment_bonds=fragment_bonds)
reductor = ReduceLinearly(templ_grw, lambda_to_reduce)
reductor.reduce_sigmas(reductor.reduce_value)
reductor.reduce_epsilons(reductor.reduce_value)
reductor.reduce_charges(reductor.reduce_value)
reductor.reduce_bond_eq_dist(reductor.reduce_value)
reductor.reduce_radnpSGB(reductor.reduce_value)
reductor.reduce_radnpType(reductor.reduce_value)
reductor.reduce_sgbnpGamma(reductor.reduce_value)
reductor.reduce_sgbnpType(reductor.reduce_value)
templ_grw.write_template_to_file(template_new_name=tmpl_out_path) | 5,334,075 |
def get_custom_headers(manifest_resource):
"""Generates the X-TAXII-Date-Added headers based on a manifest resource"""
headers = {}
times = sorted(map(lambda x: x["date_added"], manifest_resource.get("objects", [])))
if len(times) > 0:
headers["X-TAXII-Date-Added-First"] = times[0]
headers["X-TAXII-Date-Added-Last"] = times[-1]
return headers | 5,334,076 |
def checkCulling( errs, cullStrings ) :
"""
Removes all messages containing sub-strings listed in cullStrings. cullStrings can be either a string or a
list of strings. If as list of strings, each string must be a sub-string in a message for the message to
be culled.
"""
def checkCullingMatch( message, cullStrings ) :
found = True
for cullString in cullStrings : found = found and ( cullString in message )
return( found )
def checkCulling2( message, cullStrings, level = 0 ) :
if( isinstance( message, list ) ) :
messages = []
for msg in message :
msg1 = checkCulling2( msg, cullStrings, level + 1 )
if( msg1 is not None ) : messages.append( msg1 )
if( len( messages ) < 2 ) : messages = None
return( messages )
else :
if( checkCullingMatch( message, cullStrings ) ) : return( None )
return( message )
if( isinstance( cullStrings, str ) ) : cullStrings = [ cullStrings ]
errs2 = []
for err in errs :
messages = []
if( isinstance( err.message, str ) ) :
if( not( checkCullingMatch( err.message, cullStrings ) ) ) : errs2.append( err )
else :
for message in err.message :
message = checkCulling2( message, cullStrings )
if( message is not None ) :
messages.append( message )
if( len( messages ) > 0 ) :
err.message = messages
errs2.append( err )
return( errs2 ) | 5,334,077 |
def gram_matrix(x, ba, hi, wi, ch):
"""gram for input"""
if ba is None:
ba = -1
feature = K.reshape(x, [ba, int(hi * wi), ch])
gram = K.batch_dot(feature, feature, axes=1)
return gram / (hi * wi * ch) | 5,334,078 |
def read_pinout_csv(csv_file, keyname="number"):
"""
read a csv file and return a dict with the given keyname as the keys
"""
reader = csv.DictReader(open(csv_file))
lst = []
for row in reader:
lst.append(row)
d = {}
for item in lst:
d[item[keyname]] = item
return d | 5,334,079 |
async def start(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Power on (start) a virtual machine.
:param name: The name of the virtual machine to start.
:param resource_group: The resource group name assigned to the
virtual machine.
CLI Example:
.. code-block:: bash
azurerm.compute.virtual_machine.start testvm testgroup
"""
compconn = await hub.exec.azurerm.utils.get_client(ctx, "compute", **kwargs)
try:
# pylint: disable=invalid-name
vm = compconn.virtual_machines.start(
resource_group_name=resource_group, vm_name=name
)
vm.wait()
vm_result = vm.result()
result = vm_result.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result | 5,334,080 |
def Render(request, template_file, params):
"""Render network test pages."""
return util.Render(request, template_file, params) | 5,334,081 |
def test_scaling_load(master_count,
job_count,
single_use: bool,
run_delay,
cpu_quota,
memory_quota,
work_duration,
mom,
external_volume: bool,
scenario,
min_index,
max_index,
batch_size) -> None:
"""Launch a load test scenario. This does not verify the results
of the test, but does ensure the instances and jobs were created.
The installation is run in threads, but the job creation and
launch is then done serially after all Jenkins instances have
completed installation.
Args:
master_count: Number of Jenkins masters or instances
job_count: Number of Jobs on each Jenkins master
single_use: Mesos Single-Use Agent on (true) or off (false)
run_delay: Jobs should run every X minute(s)
cpu_quota: CPU quota (0.0 to disable)
work_duration: Time, in seconds, for generated jobs to sleep
mom: Marathon on Marathon instance name
external_volume: External volume on rexray (true) or local volume (false)
min_index: minimum index to begin jenkins suffixes at
max_index: maximum index to end jenkins suffixes at
batch_size: batch size to deploy jenkins instances in
"""
security_mode = sdk_dcos.get_security_mode()
if mom and cpu_quota != 0.0 and memory_quota != 0.0:
with shakedown.marathon_on_marathon(mom):
_setup_quota(SHARED_ROLE, cpu_quota, memory_quota)
# create marathon client
if mom:
with shakedown.marathon_on_marathon(mom):
marathon_client = shakedown.marathon.create_client()
else:
marathon_client = shakedown.marathon.create_client()
masters = []
if min_index == -1 or max_index == -1:
masters = ["jenkins{}".format(sdk_utils.random_string()) for _ in
range(0, int(master_count))]
else:
#max and min indexes are specified
#NOTE: using min/max will override master count
masters = ["jenkins{}".format(index) for index in
range(min_index, max_index)]
# create service accounts in parallel
sdk_security.install_enterprise_cli()
if mom:
_configure_admin_router(mom, SHARED_ROLE)
current = 0
end = max_index - min_index
for current in range(0, end, batch_size):
batched_masters = masters[current:current+batch_size]
service_account_threads = _spawn_threads(batched_masters,
_create_service_accounts,
security=security_mode)
thread_failures = _wait_and_get_failures(service_account_threads,
timeout=SERVICE_ACCOUNT_TIMEOUT)
current = current + batch_size
# launch Jenkins services
current = 0
end = max_index - min_index
for current in range(0, end, batch_size):
log.info("Re-authenticating current batch load of jenkins{} - jenkins{} "
"to prevent auth-timeouts on scale cluster.".format(current, current+batch_size))
dcos_login.login_session()
batched_masters = masters[current:current+batch_size]
install_threads = _spawn_threads(batched_masters,
_install_jenkins,
event='deployments',
client=marathon_client,
external_volume=external_volume,
security=security_mode,
daemon=True,
mom=mom)
thread_failures = _wait_and_get_failures(install_threads,
timeout=DEPLOY_TIMEOUT)
thread_names = [x.name for x in thread_failures]
# the rest of the commands require a running Jenkins instance
deployed_masters = [x for x in batched_masters if x not in thread_names]
job_threads = _spawn_threads(deployed_masters,
_create_jobs,
jobs=job_count,
single=single_use,
delay=run_delay,
duration=work_duration,
scenario=scenario)
_wait_on_threads(job_threads, JOB_RUN_TIMEOUT)
r = json.dumps(TIMINGS)
print(r)
current = current + batch_size | 5,334,082 |
def setup_global_logger(log_filepath=None):
"""Setup logger for logging
Args:
log_filepath: log file path. If not specified, only log to console
Returns:
logger that can log message at different level
"""
logger = logging.getLogger(__name__)
try:
if not logger.handlers:
logger.propagate = False
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s:[%(asctime)s] - %(message)s")
logging.getLogger("tornado").propagate = False
logging.getLogger("livereload").propagate = False
# Add sysout handler
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
# Add DB handler
logger.addHandler(LogDBHandler())
if log_filepath:
formatter = logging.Formatter(
"%(levelname)s:[%(asctime)s] - "
+ "[%(filename)s, line-%(lineno)d] - %(message)s"
)
# Add file handler
Path("logs").mkdir(parents=True, exist_ok=True)
fh = logging.handlers.TimedRotatingFileHandler(
Path("logs") / log_filepath, when="midnight", interval=1
)
fh.suffix = "%Y%m%d"
fh.setFormatter(formatter)
logger.addHandler(fh)
except Exception as ex:
logger.error(ex)
return logger | 5,334,083 |
def calculate_class_recall(conf_mat: np.array) -> np.array:
"""
Calculates the recall for each class from a confusion matrix.
"""
return np.diagonal(conf_mat) / np.sum(conf_mat, axis=1) | 5,334,084 |
def findall(element, path):
""" A helper function around a :attr:`lxml.etree._Element.findall` that passes the
element's namespace mapping.
"""
return element.findall(path, namespaces=element.nsmap) | 5,334,085 |
def proxyFromPacFiles(pacURLs=None, URL=None, log=True):
"""Attempts to locate and setup a valid proxy server from pac file URLs
:Parameters:
- pacURLs : list
List of locations (URLs) to look for a pac file. This might
come from :func:`~psychopy.web.getPacFiles` or
:func:`~psychopy.web.getWpadFiles`.
- URL : string
The URL to use when testing the potential proxies within the files
:Returns:
- A urllib.request.ProxyHandler if successful (and this will have
been added as an opener to the urllib)
- False if no proxy was found in the files that allowed successful
connection
"""
if pacURLs == None: # if given none try to find some
pacURLs = getPacFiles()
if pacURLs == []: # if still empty search for wpad files
pacURLs = getWpadFiles()
# for each file search for valid urls and test them as proxies
for thisPacURL in pacURLs:
if log:
msg = 'proxyFromPacFiles is searching file:\n %s'
logging.debug(msg % thisPacURL)
try:
response = urllib.request.urlopen(thisPacURL, timeout=2)
except urllib.error.URLError:
if log:
logging.debug("Failed to find PAC URL '%s' " % thisPacURL)
continue
pacStr = response.read().decode('utf-8')
# find the candidate PROXY strings (valid URLS), numeric and
# non-numeric:
pattern = r"PROXY\s([^\s;,:]+:[0-9]{1,5})[^0-9]"
possProxies = re.findall(pattern, pacStr + '\n')
for thisPoss in possProxies:
proxUrl = 'http://' + thisPoss
handler = urllib.request.ProxyHandler({'http': proxUrl})
if tryProxy(handler) == True:
if log:
logging.debug('successfully loaded: %s' % proxUrl)
opener = urllib.request.build_opener(handler)
urllib.request.install_opener(opener)
return handler
return False | 5,334,086 |
def _check_distance_metric(model):
"""Simple wrapper to ensure the distance metric is valid for CoreML conversion"""
is_valid = False
if model.metric is 'euclidean':
is_valid = True
elif model.metric is 'minkowski' and model.p == 2:
is_valid = True
# There are a number of other distance metrics supported by scikit that CoreML doesn't currently support.
if not is_valid:
print_name = ''
if _is_printable(model.metric):
print_name = model.metric
else:
print_name = getattr(model.metric, '__name__', repr(model.metric))
raise TypeError('KNeighbors distance metric not supported for CoreML conversion: {}'.format(print_name)) | 5,334,087 |
def subplots(times,nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
gridspec_kw=None, **fig_kw):
""" create figure and subplot axes with same time (x) axis
Non-Market hours will not be included in the plot.
Notably a custom projection is used for the time axis, and the time values
are expected to be the same in all subplots. The Axes that are returned
are not of the usual matplotlib Axes format, but of our custom TSeriesAxes
type. The usual Axes methods are avaialable, in addtion to some methods
specific to TSeriesAxes.
Args:
- times: the time series for all subplots. Pandas DatetimeIndex object
expected.
- following arguments follow matplotlib pyplot.subplots format
Returns:
- (fig, axes) tuple:
* fig: matplotlib.figure.Figure object
* axes: array of systrade.plotting.utils.TSeriesAxes objects
"""
fig, axes = plt.subplots(nrows, ncols,
sharex=sharex,
sharey=sharey,
squeeze=squeeze,
gridspec_kw=gridspec_kw,
subplot_kw=dict(projection='time_series'),
**fig_kw)
if nrows*ncols>1:
for ax in axes:
ax.set_xaxis_markettime(times)
else:
axes_arr=np.empty((1,),dtype=utils.TSeriesAxes)
axes_arr[0] = axes
axes_arr[0].set_xaxis_markettime(times)
axes = axes_arr
return fig,axes | 5,334,088 |
def test_env(testenv, agent, config):
"""
Test of a GYM environment with an agent deciding on actions based
on environment state. The test is repeated for the indicated
iterations. Status of environment in each step is displayed if
verbose activated. Test results (frequency count) and status
history are returned.
Parameters
----------
env : str
Name of the GYM environment to test.
envconfig : dict
Configuration of the GYM environment.
agent
Trained agent that will decide over actions.
iterations : int
Number of times the environment test will be repeated.
verbose : bool
Wheter status of environment in each step is displayed in output.
Returns
-------
history : dict(str, str, dict)
Status history of the environment during test, for each iteration
and step.
"""
iterations = config['iter']
verbose = config['verbose']
init_freqs = config['init_freqs']
numfreqs = len(init_freqs)
freqcount = 0
history = {}
# TEST ITERATIONS
for i in range(iterations):
# INITIAL STATUS
if numfreqs != 0:
state = testenv.reset( init_freqs[freqcount] )
freqcount += 1
freqcount %= numfreqs
else:
state = testenv.reset()
status = testenv._info.copy()
history[f"Iteration {i + 1}"] = {}
history[f"Iteration {i + 1}"][0] = status
if verbose:
print(f"Iteration {i + 1}")
print("---------------")
display_status("Step 0", status)
# STEPS TO GOAL (OR MAXSTEPS)
for s in range(testenv.MAXSTEPS):
action = agent.compute_action(state)
state, _, done, info = testenv.step(action)
status = info.copy()
history[f"Iteration {i + 1}"][s + 1] = status
if verbose:
display_status(f"Step {s + 1}", status)
if done:
break
return history | 5,334,089 |
def kldivergence(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete distributions
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions.
"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where(p != 0, p * np.log(p / q), 0)) | 5,334,090 |
def read_file(file_dir: str, filename: str) -> bytes:
""" Read file contents to bytes """
with open(os.path.join(file_dir, filename), "rb") as f:
data = f.read()
return data | 5,334,091 |
def sortPermutations(perms, index_return=False):
"""
Sort perms with respect (1) to their length and (2) their lexical order
@param perms:
"""
ans = [None] * len(perms)
indices = np.ndarray(len(perms), dtype="int")
ix = 0
for n in np.sort(np.unique([len(key) for key in perms])):
# load subset of perms with length n
nperms = {}
for i, perm in enumerate(perms):
if len(perm) == n:
tperm = tuple(perm)
nperms[tperm] = i
for perm in sorted(nperms.keys()):
ans[ix] = perm
indices[ix] = nperms[perm]
ix += 1
if index_return:
return ans, indices
else:
return ans | 5,334,092 |
def get_laplacian(Dx,Dy):
"""
return the laplacian
"""
[H,W] = Dx.shape
Dxx, Dyy = np.zeros((H,W)), np.zeros((H,W))
j,k = np.atleast_2d(np.arange(0,H-1)).T, np.arange(0,W-1)
Dxx[j,k+1] = Dx[j,k+1] - Dx[j,k]
Dyy[j+1,k] = Dy[j+1,k] - Dy[j,k]
return Dxx+Dyy | 5,334,093 |
def recipes_ending(language: StrictStr, ending: StrictStr):
"""
Show the recipe for a word-ending.
Given an input language and an ending, present the user with
the recipe that will be used to build grammatical cases
for that specific ending.
And this path operation will:
* returns a single recipe for the ending specific in the path
"""
try:
recipes = Recipes(language=language.lower())
recipes.load()
if ending not in recipes._dict.keys():
raise HTTPException(
status_code=HTTP_404_NOT_FOUND, detail="Ending not found"
)
return {"language": language, "ending": ending, "recipe": recipes._dict[ending]}
except LanguageNotFoundError:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND, detail=f"Language: {language} not found."
) | 5,334,094 |
def blur(grid, blurring):
"""
Spreads probability out on a grid using a 3x3 blurring window.
The blurring parameter controls how much of a belief spills out
into adjacent cells. If blurring is 0 this function will have
no effect.
"""
height = len(grid)
width = len(grid[0])
center_prob = 1.0-blurring
corner_prob = blurring / 12.0
adjacent_prob = blurring / 6.0
window = [
[corner_prob, adjacent_prob, corner_prob],
[adjacent_prob, center_prob, adjacent_prob],
[corner_prob, adjacent_prob, corner_prob]
]
new = [[0.0 for i in range(width)] for j in range(height)]
for i in range(height):
for j in range(width):
grid_val = grid[i][j]
for dx in range(-1,2):
for dy in range(-1,2):
mult = window[dx+1][dy+1]
new_i = (i + dy) % height
new_j = (j + dx) % width
new[new_i][new_j] += mult * grid_val
return normalize(new) | 5,334,095 |
def _row_adress(addr='1'):
"""returns the rown number for a column adress"""
return _cell_address(''.join(['A', addr]))[1] | 5,334,096 |
def split(x, num, axis):
"""
Splits a tensor into a list of tensors.
:param x: [Tensor] A TensorFlow tensor object to be split.
:param num: [int] Number of splits.
:param axis: [int] Axis along which to be split.
:return: [list] A list of TensorFlow tensor objects.
"""
if tf.__version__.startswith('0'): # 0.12 compatibility.
return tf.split(axis, num, x)
else:
return tf.split(x, num, axis) | 5,334,097 |
def delete_sensor_values(request):
"""Delete values from a sensor
"""
params = request.GET
action = params.get('action')
sensor_id = params.get('sensor')
delete_where = params.get('delete_where')
where_value = params.get('value')
where_start_date = params.get('start_date')
where_end_date = params.get('end_date')
db = bmsdata.BMSdata()
# qs = models.Sensor.objects.filter(sensor_id=params['sensor_from'])[0]
where_clause = ''
if delete_where == 'all_values':
pass
elif delete_where == 'value_equals':
where_clause = f'WHERE val = {where_value}'
elif delete_where == 'values_gt':
where_clause = f'WHERE val > {where_value}'
elif delete_where == 'values_lt':
where_clause = f'WHERE val < {where_value}'
elif delete_where == 'dates_between':
where_clause = f'WHERE ts > {bmsapp.data_util.datestr_to_ts(where_start_date)} and ts < {bmsapp.data_util.datestr_to_ts(where_end_date)}'
else:
return HttpResponse(f'Invalid parameter: {delete_where}', status=406)
if action == 'query':
try:
db.cursor.execute(
f'SELECT COUNT(*) FROM [{sensor_id}] {where_clause}')
rec_ct = db.cursor.fetchone()[0]
except Exception as e:
return HttpResponse(e, status=500)
if rec_ct == 0:
return HttpResponse('No records found that meet the criteria!', status=406)
else:
return HttpResponse(f'Do you really want to delete {rec_ct:,} records from {sensor_id}?')
else:
try:
db.cursor.execute(
f'DELETE FROM [{sensor_id}] {where_clause}')
if delete_where == 'all_values':
db.cursor.execute(f'DROP TABLE [{sensor_id}]')
qs = models.Sensor.objects.filter(
sensor_id=sensor_id)
if len(qs) > 0:
qs[0].delete()
db.conn.commit()
except Exception as e:
return HttpResponse(repr(e), status=500)
return HttpResponse('Records Deleted') | 5,334,098 |
def _parse_voc_xml(node):
"""
Extracted from torchvision
"""
voc_dict = {}
children = list(node)
if children:
def_dic = collections.defaultdict(list)
for dc in map(_parse_voc_xml, children):
for ind, v in dc.items():
def_dic[ind].append(v)
if node.tag == 'annotation':
def_dic['object'] = [def_dic['object']]
voc_dict = {
node.tag:
{ind: v[0] if len(v) == 1 else v
for ind, v in def_dic.items()}
}
if node.text:
text = node.text.strip()
if not children:
voc_dict[node.tag] = text
return voc_dict | 5,334,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.