content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
async def test_direction_oscillating(hass, setup_comp):
"""Test handling of direction and oscillating attributes."""
hass.states.async_set(
LIVING_ROOM_FAN_ENTITY_ID,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: FULL_SUPPORT_FEATURES,
ATTR_OSCILLATING: True,
ATTR_DIRECTION: DIRECTION_FORWARD,
ATTR_PERCENTAGE: 50,
},
)
hass.states.async_set(
PERCENTAGE_FULL_FAN_ENTITY_ID,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: FULL_SUPPORT_FEATURES,
ATTR_OSCILLATING: True,
ATTR_DIRECTION: DIRECTION_FORWARD,
ATTR_PERCENTAGE: 50,
},
)
await hass.async_block_till_done()
state = hass.states.get(FAN_GROUP)
assert state.state == STATE_ON
assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME
assert state.attributes[ATTR_ENTITY_ID] == [*FULL_FAN_ENTITY_IDS]
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == FULL_SUPPORT_FEATURES
assert ATTR_PERCENTAGE in state.attributes
assert state.attributes[ATTR_PERCENTAGE] == 50
assert state.attributes[ATTR_OSCILLATING] is True
assert state.attributes[ATTR_DIRECTION] == DIRECTION_FORWARD
assert ATTR_ASSUMED_STATE not in state.attributes
# Add Entity that supports
# ### Test assumed state ###
# ##########################
# Add Entity with a different direction should set assumed state
hass.states.async_set(
PERCENTAGE_FULL_FAN_ENTITY_ID,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: FULL_SUPPORT_FEATURES,
ATTR_OSCILLATING: True,
ATTR_DIRECTION: DIRECTION_REVERSE,
ATTR_PERCENTAGE: 50,
},
)
await hass.async_block_till_done()
state = hass.states.get(FAN_GROUP)
assert state.state == STATE_ON
assert state.attributes[ATTR_ASSUMED_STATE] is True
assert ATTR_PERCENTAGE in state.attributes
assert state.attributes[ATTR_PERCENTAGE] == 50
assert state.attributes[ATTR_OSCILLATING] is True
assert ATTR_ASSUMED_STATE in state.attributes
# Now that everything is the same, no longer assumed state
hass.states.async_set(
LIVING_ROOM_FAN_ENTITY_ID,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: FULL_SUPPORT_FEATURES,
ATTR_OSCILLATING: True,
ATTR_DIRECTION: DIRECTION_REVERSE,
ATTR_PERCENTAGE: 50,
},
)
await hass.async_block_till_done()
state = hass.states.get(FAN_GROUP)
assert state.state == STATE_ON
assert ATTR_PERCENTAGE in state.attributes
assert state.attributes[ATTR_PERCENTAGE] == 50
assert state.attributes[ATTR_OSCILLATING] is True
assert state.attributes[ATTR_DIRECTION] == DIRECTION_REVERSE
assert ATTR_ASSUMED_STATE not in state.attributes
hass.states.async_set(
LIVING_ROOM_FAN_ENTITY_ID,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: FULL_SUPPORT_FEATURES,
ATTR_OSCILLATING: False,
ATTR_DIRECTION: DIRECTION_FORWARD,
ATTR_PERCENTAGE: 50,
},
)
hass.states.async_set(
PERCENTAGE_FULL_FAN_ENTITY_ID,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: FULL_SUPPORT_FEATURES,
ATTR_OSCILLATING: False,
ATTR_DIRECTION: DIRECTION_FORWARD,
ATTR_PERCENTAGE: 50,
},
)
await hass.async_block_till_done()
state = hass.states.get(FAN_GROUP)
assert state.state == STATE_ON
assert ATTR_PERCENTAGE in state.attributes
assert state.attributes[ATTR_PERCENTAGE] == 50
assert state.attributes[ATTR_OSCILLATING] is False
assert state.attributes[ATTR_DIRECTION] == DIRECTION_FORWARD
assert ATTR_ASSUMED_STATE not in state.attributes
| 17,300
|
def torsion_coordinate_names(zma):
""" z-matrix torsional coordinate names
(currently assumes torsional coordinates generated through x2z)
"""
name_dct = standard_names(zma)
inv_name_dct = dict(map(reversed, name_dct.items()))
geo = automol.geom.without_dummy_atoms(geometry(zma))
tors_names = automol.convert.geom.zmatrix_torsion_coordinate_names(geo)
tors_names = tuple(map(inv_name_dct.__getitem__, tors_names))
return tors_names
| 17,301
|
def test_get_screen_count(xsession: XSession):
"""Tests that the number of screens can be retrieved."""
count = xsession.get_screen_count()
assert isinstance(count, int)
assert count is not None
| 17,302
|
def extract_other_creditors_d(
page: pdfplumber.pdf.Page, markers: List[Dict], creditors: List
) -> None:
"""Crop and extract address, key and acct # from the PDf
:param page: PDF page
:param markers: The top and bottom markers
:return: Address, key and account information
"""
adjust = 0 if len(markers) == 5 else 12
addy_bbox = (
0,
markers[0]["top"],
int(markers[-1]["x1"]) * 0.35,
markers[-1]["top"],
)
key_bbox = (
markers[-3]["x0"],
markers[0]["top"] - adjust,
markers[-3]["x1"],
markers[-3]["top"],
)
acct_bbox = (
markers[1]["x0"],
markers[1]["top"] - 12,
markers[1]["x1"],
markers[1]["top"],
)
address = page.crop(addy_bbox).filter(keys_and_input_text).extract_text()
key = (
page.crop(key_bbox).filter(keys_and_input_text).extract_text().strip()
)
acct = page.crop(acct_bbox).filter(keys_and_input_text).extract_text()
for creditor in creditors:
if creditor["key"] == key:
other_creditors = creditor["other_creditors"]
other_creditors.append(
{"key": key, "address": address, "acct": acct}
)
creditor["other_creditors"] = other_creditors
return creditors
| 17,303
|
async def cancel_futures(*futures: asyncio.Future):
"""
Cancels given futures and awaits on them in order to reveal exceptions.
Used in a process' teardown.
"""
for future in futures:
future.cancel()
for future in futures:
try:
await future
except asyncio.CancelledError:
pass
| 17,304
|
def coverage():
"""Generate test coverage report"""
do('FLASK_CONFIG=app.config.TestConfig %s/bin/pytest' % venv_path)
| 17,305
|
def attach_server(host, port, cert=None, key=None, chain=None):
"""Define and attach server, optionally HTTPS"""
if sabnzbd.cfg.ipv6_hosting() or "::1" not in host:
http_server = cherrypy._cpserver.Server()
http_server.bind_addr = (host, port)
if cert and key:
http_server.ssl_module = "builtin"
http_server.ssl_certificate = cert
http_server.ssl_private_key = key
http_server.ssl_certificate_chain = chain
http_server.subscribe()
| 17,306
|
def get_dicdirs(mecab_config: str = "mecab-config") -> List[Path]:
"""Get MeCab dictionary directories.
Parameters
----------
mecab_config : str
Executable path of mecab-config, by default "mecab-config".
Returns
-------
List[Path]
MeCab dictionary directories.
"""
dicdirs = []
for path in _mecab_config_dicdir(mecab_config).glob("**/dicrc"):
dicdirs.append(path.parent.resolve())
return dicdirs
| 17,307
|
def set_missing_parameters(iot):
"""Queries Azure for mising parameters like iothub hostname, keys,
and connection_string"""
if 'hostname' not in iot.config:
click.secho("Checking for IoT Hub Host Name")
hub_info, err = run_command_with_stderr_json_out(
"az iot hub show --resource-group %s --name %s" %
(iot.config['rgroup'], iot.config['iothub']))
if err:
click.secho(err)
sys.exit(1)
iot.set_config("hostname", hub_info["properties"]["hostName"])
if 'key' not in iot.config:
click.secho("Checking for Device Identity with name '%s'" % iot.config['device'])
existingDevice, err = run_command_with_stderr_json_out(
"az iot hub device-identity show --resource-group %s --hub-name %s --device-id %s " %
(iot.config['rgroup'], iot.config['iothub'], iot.config['device']))
if err:
click.secho(err)
sys.exit(1)
iot.set_config("key", existingDevice["authentication"]["symmetricKey"]["primaryKey"])
click.secho("Checking for Connection String for device with name '%s'" % iot.config['device'])
device_info, err = run_command_with_stderr_json_out(
'az iot hub device-identity show-connection-string --resource-group %s --hub-name %s '
'--device-id %s' % (iot.config['rgroup'], iot.config['iothub'],
iot.config['device']))
if err:
click.secho(err)
sys.exit(1)
iot.set_config("cs", device_info["cs"])
click.secho("Checking for Connection string for IoT Hub with name '%s'" % iot.config['iothub'])
hub_info, err = run_command_with_stderr_json_out(
'az iot hub show-connection-string --resource-group %s --hub-name %s' %
(iot.config['rgroup'], iot.config['iothub']))
# stderr actually contains something here on success so check in opposite order
if hub_info and "cs" in hub_info:
iot.set_config("hub_cs", hub_info["cs"])
else:
click.secho(err)
sys.exit(1)
| 17,308
|
def new_instance(settings):
"""
MAKE A PYTHON INSTANCE
`settings` HAS ALL THE `kwargs`, PLUS `class` ATTRIBUTE TO INDICATE THE CLASS TO CREATE
"""
settings = set_default({}, settings)
if not settings["class"]:
Log.error("Expecting 'class' attribute with fully qualified class name")
# IMPORT MODULE FOR HANDLER
path = settings["class"].split(".")
class_name = path[-1]
path = ".".join(path[:-1])
constructor = None
try:
temp = __import__(path, globals(), locals(), [class_name], 0)
constructor = object.__getattribute__(temp, class_name)
except Exception as e:
Log.error("Can not find class {{class}}", {"class": path}, cause=e)
settings['class'] = None
try:
return constructor(kwargs=settings) # MAYBE IT TAKES A KWARGS OBJECT
except Exception as e:
pass
try:
return constructor(**settings)
except Exception as e:
Log.error("Can not create instance of {{name}}", name=".".join(path), cause=e)
| 17,309
|
def find_entry(entries, fn):
"""Find an entry that matches the given filename fn more or less."""
entry = get_entry_by_filename(entries, curr_file)
if entry is not None:
return entry
key = lambda fn: path.splitext(fn)[0]
entry = get_entry_by_filename(entries, curr_file, key)
if entry is not None:
return entry
key = lambda fn: path.basename(path.splitext(fn)[0])
entry = get_entry_by_filename(entries, curr_file, key)
if entry is not None:
return entry
return None
| 17,310
|
def get_type(k):
"""Takes a dict. Returns undefined if not keyed, otherwise returns the key type."""
try:
v = {
'score': '#text',
'applicant': 'str',
'applicant_sort': 'str',
'author': 'str',
'author_sort': 'str',
'brief': 'bool',
'city': 'str',
'daNumber': 'str',
'dateCommentPeriod': 'date',
'dateReplyComment': 'date',
'dateRcpt': 'date',
'disseminated': 'date',
'exParte': 'bool',
'fileNumber': 'str',
'id': 'long',
'lawfirm': 'str',
'lawfirm_sort': 'str',
'modified': 'date',
'pages': 'int',
'proceeding': 'str',
'reportNumber': 'str',
'regFlexAnalysis': 'bool',
'smallBusinessImpact': 'bool',
'stateCd': 'str',
'submissionType': 'str',
'text': 'str',
'viewingStatus': 'str',
'zip': 'str'
}[k]
except:
v = False
return v
| 17,311
|
def al(p):
"""
Given a quaternion p, return the 4x4 matrix A_L(p)
which when multiplied with a column vector q gives
the quaternion product pq.
Parameters
----------
p : numpy.ndarray
4 elements, represents quaternion
Returns
-------
numpy.ndarray
4x4 matrix describing action of quaternion multiplication
"""
# Given a quaternion p, return the 4x4 matrix A_L(p)
# which when multiplied with a column vector q gives
# the quaternion product pq.
return np.array([[p[0], -p[1], -p[2], -p[3]],
[p[1], p[0], -p[3], p[2]],
[p[2], p[3], p[0], -p[1]],
[p[3], -p[2], p[1], p[0]]])
| 17,312
|
def build_data_table(row, fields_to_try):
"""
Create HTML table for one row of data
If no fields are valid, returns empty string
"""
th_class = 'attribute_heading'
td_class = 'attribute_value'
field_names = pd.read_csv('data/field_names.csv')
output_table = """
<table>
<tbody>
"""
fields_written = 0
for field_name in fields_to_try:
if field_name in row:
field_value = row[field_name]
# Convert timestamp to human-readable string
if isinstance(field_value, datetime):
field_value = field_value.strftime('%B %-d, %Y')
if pd.notna(field_value) and len(field_value) > 0:
# If no display_name has been defined for the field_name, use the field_name as the display_name
if sum(field_names['field_name'] == field_name) == 0:
display_name = field_name
else:
display_name = field_names.loc[field_names['field_name'] == field_name, 'display_name'].values[0]
output_table += f"""
<tr>
<th class="{th_class}">{display_name}</th>
"""
if '_link' in field_name:
output_table += f'<td class="{td_class}"><a href="{field_value}">{field_value}</a></td>'
elif '_email' in field_name:
output_table += f'<td class="{td_class}"><a href="mailto:{field_value}">{field_value}</a></td>'
else:
output_table += f'<td class="{td_class}">{field_value}</td>'
output_table += '</tr>'
fields_written += 1
output_table += """
</tbody>
</table>
"""
# or could use: if any([(f in row.index) for f in fields_to_try]):
if fields_written == 0:
output_table = ''
return output_table
| 17,313
|
def publish_exploration(committer_id, exploration_id):
"""This is called by the publish_exploration_and_update_user_profiles
function in exp_services.py. It publishes an exploration and
commits changes.
It is the responsibility of the caller to check that the exploration is
valid prior to publication.
"""
_publish_activity(committer_id, exploration_id, ACTIVITY_TYPE_EXPLORATION)
| 17,314
|
def time_entry_reader(date, configuration):
"""Read the entries and return a list of entries that are apart of the date provided."""
parser = YAML(typ='rt')
date = date.date()
try:
with open(configuration['filename'], 'r') as data_file:
time_entries = parser.load(data_file).get('records', [])
except FileNotFoundError:
LOGGER.error('Cannot read file %s', configuration['filename'])
raise RuntimeError(f'Cannot read file {configuration["filename"]}')
return [te for te in time_entries if te['date'] == date]
| 17,315
|
async def test_send_write(event_loop):
"""Check feed-receive scenarios used in the library."""
STREAM_ID = 'whatever'
DATA = b'data'
def make_writer():
queue = asyncio.Queue()
async def writer(id, data):
assert id == STREAM_ID
await queue.put(data)
return writer, queue
for stream_mode in [StreamMode.WRITE, StreamMode.READ | StreamMode.WRITE]:
stream = Stream(STREAM_ID, event_loop)
writer, queue = make_writer()
stream.on_write.append(writer)
with pytest.raises(exceptions.InvalidStreamMode):
await stream.send(None)
stream.open(stream_mode)
assert stream.is_writable
await stream.send(DATA)
assert await queue.get() == DATA
with pytest.raises(TypeError):
await stream.send(None)
stream.close_sync()
with pytest.raises(exceptions.InvalidStreamState):
await stream.send(None)
| 17,316
|
def width_series(value_series, outer_average_width=5, max_value=None, method='linear'):
"""
:param value_series: the pd.Series that contain the values
:param outer_average_width: the average width of the width series to return
:param max_value: value to use as the maximum when normalizing the series (to focus low values)
:param method: linear or surface
:return: width_series: pd.Series that contains the widths corresponding to the values
:rtype: pd.Series
"""
max_value = max_value if max_value else np.max(list(value_series.values))
if method == 'linear':
serie = value_series.apply(lambda x: x / max_value * outer_average_width)
elif method == 'surface':
serie = value_series.apply(lambda x: np.sqrt(x / max_value) * outer_average_width)
return serie
| 17,317
|
def unroll_upper_triangular(matrix):
"""Converts square matrix to vector by unrolling upper triangle."""
rows, cols = matrix.shape
assert rows == cols, "Not a square matrix."
row_idx, col_idx = np.triu_indices(rows, 1)
unrolled = []
for i, j in zip(row_idx, col_idx):
unrolled.append(matrix[i][j])
assert len(unrolled) == rows * (rows - 1) // 2
return unrolled
| 17,318
|
def fill_tuples(
tuples: Sequence[Any],
length: Optional[int] = None,
repeat: bool = False,
fill_method: str = 'bfill',
) -> Sequence[Tuple]:
"""Fill tuples so they are all the same length.
Parameters
----------
length : int, optional
Fill tuples to a fixed length. If None, fills to max length of
the non-string sequence objects given by tuples.
repeat : bool, default False
If True then fills missing tuple values with the current value
at the end of the sequence given by ``at``. If False fills with None.
fill_method : {'bfill', 'ffill'}, str
Whether to forward fill or backfill the tuple values.
"""
if not length:
if not any(is_non_string_sequence(t) for t in tuples):
return tuples
length = max(len(t) for t in tuples if is_non_string_sequence(t))
new_tups = []
for tup in tuples:
tup = tuple_convert(tup)
while len(tup) < length:
if fill_method == 'bfill':
tup = (tup[0] if repeat else None,) + tup
else: # 'end'
tup += (tup[-1] if repeat else None,)
new_tups.append(tup)
return new_tups
| 17,319
|
def intersect(connection, items, ttl=30, execute=True):
"""并集计算"""
return _set_common(connection, 'sinterstore', items, ttl, execute)
| 17,320
|
def batch_decode(raw_logits, use_random, decode_times):
"""
tbd
"""
size = (raw_logits.shape[1] + 7) // 8
logit_lists = []
for i in range(0, raw_logits.shape[1], size):
if i + size < raw_logits.shape[1]:
logit_lists.append(raw_logits[:, i: i + size, :])
else:
logit_lists.append(raw_logits[:, i:, :])
result_list = [decode_chunk(logit_lists[i], use_random, decode_times) for i in range(len(logit_lists))]
return_list = []
for _0 in result_list:
for _1 in _0:
return_list.append(_1)
return return_list
| 17,321
|
def _load_schemata(obj_type: str) -> dict:
"""Load the schemata from the package, returning merged results of
other schema files if referenced in the file loaded.
:raises: FileNotFoundError
"""
schema_path = pathlib.Path(pkg_resources.resource_filename(
'pglifecycle', 'schemata/{}.yml'.format(obj_type).replace(' ', '_')))
if not schema_path.exists():
raise FileNotFoundError(
'Schema file not found for object type {!r}'.format(obj_type))
return _preprocess(yaml.load(schema_path))
| 17,322
|
def test():
""" 单元测试 """
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(test=tests)
| 17,323
|
def waitfor(msg, status = '', spinner = None, log_level = log_levels.INFO):
"""waitfor(msg, status = '', spinner = None) -> waiter
Starts a new progress indicator which includes a spinner
if :data:`pwnlib.term.term_mode` is enabled. By default it
outputs to loglevel :data:`pwnlib.log_levels.INFO`.
Args:
msg (str): The message of the spinner.
status (str): The initial status of the spinner.
spinner (list): This should either be a list of strings or None.
If a list is supplied, then a either element of the list
is shown in order, with an update occuring every 0.1 second.
Otherwise a random spinner is chosen.
log_level(int): The log level to output the text to.
Returns:
A waiter-object that can be updated using :func:`status`, :func:`done_success` or :func:`done_failure`.
"""
if context.log_level > log_level:
h = _DummyWaiter()
elif term.term_mode:
h = _TermWaiter(msg, spinner, log_level)
else:
h = _SimpleWaiter(msg, spinner, log_level)
if status:
h.status(status)
_waiter_stack.append(h)
return h
| 17,324
|
def f(i):
"""Add 2 to a value
Args:
i ([int]): integer value
Returns:
[int]: integer value
"""
return i + 2
| 17,325
|
def show_forecast(cmp_df, num_predictions, num_values, title):
"""Visualize the forecast."""
def create_go(name, column, num, **kwargs):
points = cmp_df.tail(num)
args = dict(name=name, x=points.index, y=points[column], mode='lines')
args.update(kwargs)
return go.Scatter(**args)
lower_bound = create_go('Lower Bound', 'yhat_lower', num_predictions,
line=dict(width=0),
marker=dict(color="red"))
upper_bound = create_go('Upper Bound', 'yhat_upper', num_predictions,
line=dict(width=0),
marker=dict(color="red"),
fillcolor='rgba(68, 68, 68, 0.3)',
fill='tonexty')
forecast = create_go('Forecast', 'yhat', num_predictions,
line=dict(color='rgb(31, 119, 180)'))
actual = create_go('Actual', 'y', num_values,
marker=dict(color="red"))
# In this case the order of the series is important because of the filling
data = [lower_bound, upper_bound, forecast, actual]
layout = go.Layout(yaxis=dict(title='Posts'), title=title, showlegend = False)
fig = go.Figure(data=data, layout=layout)
plot(fig, show_link=False)
| 17,326
|
def build_jobs_dict(path):
"""Build a dictionary of "job_name" : [recipe list] from a directory full of job
files."""
jobfiles = nested_glob(path, '.ajp')
jobs = {}
for jobfile in jobfiles:
job_name = os.path.basename(jobfile).strip('.ajp')
try:
recipe = parse_jobfile(jobfile)
jobs[job_name] = recipe
except:
warnings.warn("Unable to parse " + jobfile)
return jobs
| 17,327
|
def cancel_timeout_orders(context, max_m=10):
"""实盘仿真,撤销挂单时间超过 max_m 分钟的订单。
:param context:
:param max_m: 最大允许挂单分钟数
:return:
"""
for u_order in context.unfinished_orders:
if context.ipo_shares and u_order.symbol in context.ipo_shares:
# 跳过新股,新股申购订单不能撤
continue
if context.now - u_order.created_at >= timedelta(minutes=max_m):
order_cancel(u_order)
msg = "撤单通知:\n{}\n标的:{}\n".format("*" * 31, u_order.symbol)
msg += "价格:{}\n时间:{}\n".format(u_order.price, u_order.created_at.strftime(dt_fmt))
msg += "开平标志:{}\n买卖方向:{}".format(u_order.position_effect, u_order.side)
context.logger.info(msg.replace("\n", " - "))
if context.mode != MODE_BACKTEST:
push_text(msg, context.wx_key)
| 17,328
|
def write_mmcif(out_file, structure, **kwargs):
"""Write a biopython structure to an mmcif file. This function accepts any viable arguments to Bio.PDB.MMCIFIO.save() as keyword arguments.
:param out_file: Path to output mmCIF file.
:type out_file: Union[str, Path]
:param structure: Biopython object containing protein structure.
:type structure: Bio.PDB.structure
"""
io = Bio.PDB.MMCIFIO()
io.set_structure(structure)
io.save(out_file)
return
| 17,329
|
def get_concepts_from_kmeans(tfidf, kmeans):
"""Get kmeans cluster centers in term space.
Parameters
----------
tfidf : TfidfVectorizer
Fitted vectorizer with learned term vocabulary.
kmeans : KMeans
KMeans fitted to document-term matrix returned by tfidf.
Returns
-------
pandas.DataFrame
Columns are terms, rows are "concepts" sorted by cluster size.
"""
df = pd.DataFrame(kmeans.cluster_centers_, columns=tfidf.get_feature_names())
return df.reindex(pd.Series(kmeans.labels_).value_counts().index)
| 17,330
|
def test_J ():
"""This test verifies that dJ/dt = 2*H."""
qp = phase_space_coordinates()
qp_ = qp.reshape(-1).tolist()
x,y,z = qp[0,:]
p_x,p_y,p_z = qp[1,:]
P_x_ = P_x__(*qp_)
P_y_ = P_y__(*qp_)
mu_ = mu__(x,y,z)
r_squared_ = r_squared__(x,y)
H_qp = H__(*qp_)
X_H = vorpy.symplectic.symplectic_gradient_of(H_qp, qp)
J_qp = J__(*qp_)
# Because X_H gives the vector field defining the time derivative of a solution to the dynamics,
# it follows that X_H applied to J is equal to dJ/dt (where J(t) is J(qp(t)), where qp(t) is a
# solution to Hamilton's equations).
X_H__J = vorpy.manifold.directional_derivative(X_H, J_qp, qp)
#print(f'test_J; X_H__J = {X_H__J}')
#print(f'test_J; 2*H = {sp.expand(2*H_qp)}')
actual_value = X_H__J - sp.expand(2*H_qp)
#print(f'test_J; X_H__J - 2*H = {actual_value}')
# Annoyingly, this doesn't simplify to 0 automatically, so some manual manipulation has to be done.
# Manipulate the expression to ensure the P_x and P_y terms cancel
actual_value = sp.collect(actual_value, [P_x_, P_y_])
#print(f'test_J; after collect P_x, P_y: X_H__J - 2*H = {actual_value}')
actual_value = sp.Subs(actual_value, [P_x_, P_y_], [P_x_._expanded(), P_y_._expanded()]).doit()
#print(f'test_J; after subs P_x, P_y: X_H__J - 2*H = {actual_value}')
# Manipulate the expression to ensure the mu terms cancel
actual_value = sp.factor_terms(actual_value, clear=True, fraction=True)
#print(f'test_J; after factor_terms: X_H__J - 2*H = {actual_value}')
actual_value = sp.collect(actual_value, [r_squared_])
#print(f'test_J; after collect r_squared_: X_H__J - 2*H = {actual_value}')
actual_value = sp.Subs(actual_value, [r_squared_._expanded()], [r_squared_]).doit()
#print(f'test_J; after subs r_squared: X_H__J - 2*H = {actual_value}')
actual_value = sp.Subs(actual_value, [mu_._expanded()], [mu_]).doit()
#print(f'test_J; after subs mu: X_H__J - 2*H = {actual_value}')
if actual_value != 0:
raise ValueError(f'Expected X_H__J - 2*H == 0, but actual value was {actual_value}')
print('test_J passed')
| 17,331
|
def package_proxy_relationships(
db: PartitionedDatabase,
tx: Transaction,
config,
s3,
file_manifests: List[FileManifest],
) -> Iterator[PackageProxyRelationship]:
"""
Yield all proxy package relationships in the dataset
Explodes each proxy package into multiple source files. If the package no
longer exists in the dataset, ignore it.
"""
files_by_package_id: Dict[str, List[FileManifest]] = defaultdict(list)
for f in file_manifests:
if f.source_package_id:
files_by_package_id[f.source_package_id].append(f)
for pp, record in db.get_all_package_proxies_tx(tx):
for file_manifest in files_by_package_id.get(pp.package_node_id, []):
assert file_manifest.id is not None
yield PackageProxyRelationship(
from_=record.id, to=file_manifest.id, relationship=pp.relationship_type
)
| 17,332
|
def streams_to_dataframe(streams, imcs=None, imts=None, event=None):
"""Extract peak ground motions from list of processed StationStream objects.
Note: The PGM columns underneath each channel will be variable
depending on the units of the Stream being passed in (velocity
sensors can only generate PGV) and on the imtlist passed in by
user. Spectral acceleration columns will be formatted as SA(0.3)
for 0.3 second spectral acceleration, for example.
Args:
directory (str):
Directory of ground motion files (streams).
imcs (list):
Strings designating desired components to create in table.
imts (list):
Strings designating desired PGMs to create in table.
event (ScalarEvent): Defines the focal time,
geographic location, and magnitude of an earthquake hypocenter.
Default is None.
Returns:
DataFrame: Pandas dataframe containing columns:
- STATION Station code.
- NAME Text description of station.
- LOCATION Two character location code.
- SOURCE Long form string containing source network.
- NETWORK Short network code.
- LAT Station latitude
- LON Station longitude
- DISTANCE Epicentral distance (km) (if epicentral
lat/lon provided)
- HN1 East-west channel (or H1) (multi-index with pgm columns):
- PGA Peak ground acceleration (%g).
- PGV Peak ground velocity (cm/s).
- SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).
- SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).
- SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).
- HN2 North-south channel (or H2) (multi-index with pgm columns):
- PGA Peak ground acceleration (%g).
- PGV Peak ground velocity (cm/s).
- SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).
- SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).
- SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).
- HNZ Vertical channel (or HZ) (multi-index with pgm columns):
- PGA Peak ground acceleration (%g).
- PGV Peak ground velocity (cm/s).
- SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).
- SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).
- SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).
- GREATER_OF_TWO_HORIZONTALS (multi-index with pgm columns):
- PGA Peak ground acceleration (%g).
- PGV Peak ground velocity (cm/s).
- SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).
- SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).
- SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).
"""
if imcs is None:
station_summary_imcs = DEFAULT_IMCS
else:
station_summary_imcs = imcs
if imts is None:
station_summary_imts = DEFAULT_IMTS
else:
station_summary_imts = imts
subdfs = []
for stream in streams:
if not stream.passed:
continue
if len(stream) < 3:
continue
stream_summary = StationSummary.from_stream(
stream, station_summary_imcs, station_summary_imts, event)
summary = stream_summary.summary
subdfs += [summary]
dataframe = pd.concat(subdfs, axis=0).reset_index(drop=True)
return dataframe
| 17,333
|
def timestamp_diff(time_point_unit: TimePointUnit, time_point1, time_point2) -> Expression:
"""
Returns the (signed) number of :class:`~pyflink.table.expression.TimePointUnit` between
time_point1 and time_point2.
For example,
`timestamp_diff(TimePointUnit.DAY, lit("2016-06-15").to_date, lit("2016-06-18").to_date`
leads to 3.
:param time_point_unit: The unit to compute diff.
:param time_point1: The first point in time.
:param time_point2: The second point in time.
:return: The number of intervals as integer value.
"""
return _ternary_op("timestampDiff", time_point_unit._to_j_time_point_unit(),
time_point1, time_point2)
| 17,334
|
def plot_solar_twins_results(star_postfix=''):
"""Plot results for 17 pairs with q-coefficients for solar twins"""
def format_pair_label(pair_label):
"""Format a pair label for printing with MNRAS ion format.
Parameters
----------
pair_label : str
A pair label of the form "4492.660Fe2_4503.480Mn1_25"
Returns
-------
dict
A dictionary containing LaTeX-formatted representations of the two
transitions in the pair label.
"""
t1, t2, order_num = pair_label.split('_')
# This mimics the look of ion labels in MNRAS.
new_label1 = f"{t1[8:-1]}" + r"\," + r"\textsc{\lowercase{" +\
f"{roman_numerals[t1[-1]]}" + r"}}" + r"\ " + f"{t1[:8]}"
new_label2 = f"{t2[8:-1]}" + r"\," + r"\textsc{\lowercase{" +\
f"{roman_numerals[t2[-1]]}" + r"}}" + r"\ " + f"{t2[:8]}"
return {'ion1': new_label1, 'ion2': new_label2}
roman_numerals = {'1': 'I', '2': 'II'}
# Get labels of the 17 pairs on the shortlist.
pairs_file = vcl.data_dir / '17_pairs.txt'
pair_labels = np.loadtxt(pairs_file, dtype=str)
# Get the 18 solar twins.
stars = {star_name: Star(star_name + star_postfix,
vcl.output_dir / star_name)
for star_name in sp1_stars}
# Set out lists of star for the top and bottom panels.
block1_stars = ('Vesta', 'HD76151', 'HD78429',
'HD140538', 'HD146233', 'HD157347')
block2_stars = ('HD20782', 'HD19467', 'HD45184',
'HD45289', 'HD171665',)
block3_stars = ('HD138573', 'HD183658', 'HD220507', 'HD222582')
block4_stars = ('HD1835', 'HD30495', 'HD78660', )
block1_width = 25
block1_ticks = 15
block2_width = 45
block2_ticks = 30
block3_width = 75
block3_ticks = 50
block4_width = 125
block4_ticks = 75
fig = plt.figure(figsize=(18, 10.5), tight_layout=True)
gs = GridSpec(ncols=20, nrows=4, figure=fig, wspace=0,
height_ratios=(len(block1_stars),
len(block2_stars),
len(block3_stars),
len(block4_stars)))
# Set the "velocity" title to be below the figure.
fig.supxlabel('Diffrence between pair velocity separation and model (m/s)',
fontsize=18)
# Create a dict to hold all the axes.
axes = {}
# Create top panel (with pair labels)
# Create tick locations to put the grid at.
y_grid_locations = [y+0.5 for y in range(len(block1_stars))]
for i, label in (enumerate(pair_labels)):
ax = fig.add_subplot(gs[0, i])
ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7,
zorder=1)
# Set the limits of each axis.
ax.set_ylim(top=-0.5, bottom=len(block1_stars)-0.5)
ax.set_xlim(left=-block1_width, right=block1_width)
# Add the grid.
ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations))
ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8,
linestyle=':', zorder=0)
# Remove all the ticks and labels on the y-axes (left-most will have
# them specially added back in).
ax.tick_params(axis='y', which='both', left=False, right=False,
labelleft=False, labelright=False)
ax.tick_params(axis='x', which='both', top=False, bottom=True,
labeltop=False, labelbottom=True, labelsize=12)
ax.xaxis.set_major_locator(ticker.FixedLocator(
(-block1_ticks, 0, block1_ticks)))
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
# This sets the width of the outside edges of the subaxes.
for axis in ['top', 'right', 'bottom', 'left']:
ax.spines[axis].set_linewidth(2.1)
ax.spines[axis].set_zorder(20)
# Add the tick labels for each pair at the top of the plot.
ax_twin = ax.twiny()
ax_twin.set_xlim(ax.get_xlim())
ax_twin.tick_params(top=False, labelsize=16)
t1, t2, order_num = label.split('_')
if i > 5:
ax_twin.xaxis.set_major_locator(ticker.FixedLocator((-12,)))
ax_twin.set_xticklabels(('{ion1}\n{ion2}'.format(
**format_pair_label(label)),),
fontdict={'rotation': 90,
'horizontalalignment': 'left',
'verticalalignment': 'bottom'})
elif i in (0, 2, 4):
ax_twin.xaxis.set_major_locator(ticker.FixedLocator((-11, 12)))
ax_twin.set_xticklabels((f'Order: {str(order_num)}',
'{ion1}\n{ion2}'.format(
**format_pair_label(label)),),
fontdict={'rotation': 90,
'horizontalalignment': 'left',
'verticalalignment': 'bottom'})
elif i in (1, 3, 5):
ax_twin.xaxis.set_major_locator(ticker.FixedLocator((2,)))
ax_twin.set_xticklabels((f'Order: {str(order_num)}',),
fontdict={'rotation': 90,
'horizontalalignment': 'left',
'verticalalignment': 'bottom'})
# Add axis to axes dictionary.
axes[(0, i)] = ax
# Create second panel
y_grid_locations = [y+0.5 for y in range(len(block2_stars))]
for i, label in (enumerate(pair_labels)):
ax = fig.add_subplot(gs[1, i])
ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7)
ax.set_ylim(top=-0.5, bottom=len(block2_stars)-0.5)
ax.set_xlim(left=-block2_width, right=block2_width)
ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations))
ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8,
linestyle=':')
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_locator(ticker.FixedLocator(
(-block2_ticks, 0, block2_ticks)))
ax.tick_params(which='both', labelleft=False, labelbottom=True,
left=False, right=False, top=False, bottom=True,
labelsize=12)
for axis in ['top', 'right', 'bottom', 'left']:
ax.spines[axis].set_linewidth(2.1)
ax.spines[axis].set_zorder(20)
axes[(1, i)] = ax
# Create third panel
y_grid_locations = [y+0.5 for y in range(len(block3_stars))]
for i, label in (enumerate(pair_labels)):
ax = fig.add_subplot(gs[2, i])
ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7)
ax.set_ylim(top=-0.5, bottom=len(block3_stars)-0.5)
ax.set_xlim(left=-block3_width, right=block3_width)
ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations))
ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8,
linestyle=':')
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_locator(ticker.FixedLocator(
(-block3_ticks, 0, block3_ticks)))
ax.tick_params(which='both', labelleft=False, labelbottom=True,
left=False, right=False, top=False, bottom=True,
labelsize=12)
for axis in ['top', 'right', 'bottom', 'left']:
ax.spines[axis].set_linewidth(2.1)
ax.spines[axis].set_zorder(20)
axes[(2, i)] = ax
# Create fourth panel
y_grid_locations = [y+0.5 for y in range(len(block4_stars))]
for i, label in (enumerate(pair_labels)):
ax = fig.add_subplot(gs[3, i])
ax.axvline(x=0, color='Black', linestyle='--', linewidth=1.7)
ax.set_ylim(top=-0.5, bottom=len(block4_stars)-0.5)
ax.set_xlim(left=-block4_width, right=block4_width)
ax.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations))
ax.yaxis.grid(which='minor', color='LightGray', linewidth=1.8,
linestyle=':')
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_locator(ticker.FixedLocator(
(-block4_ticks, 0, block4_ticks)))
ax.tick_params(which='both', labelleft=False, labelbottom=True,
left=False, right=False, top=False, bottom=True,
labelsize=12)
for axis in ['top', 'right', 'bottom', 'left']:
ax.spines[axis].set_linewidth(2.1)
ax.spines[axis].set_zorder(20)
axes[(3, i)] = ax
# Set the left-most axes to have y-labels for star names.
for i in range(4):
axes[(i, 0)].tick_params(labelleft=True)
# Create the locations for minor ticks to put the star name labels at.
for i, block in enumerate((block1_stars, block2_stars,
block3_stars, block4_stars)):
y_ticks = [y for y in range(len(block))]
axes[(i, 0)].yaxis.set_major_locator(ticker.FixedLocator(y_ticks))
# Create the list of top stars...have to handle Vesta specially.
top_labels = ['Sun']
top_labels.extend([' '.join((x[:2], x[2:])) for x in block1_stars[1:]])
axes[(0, 0)].set_yticklabels(top_labels,
fontdict={'horizontalalignment': 'right',
'fontsize': 15})
for i, star_names in enumerate((block2_stars, block3_stars, block4_stars)):
axes[(i+1, 0)].set_yticklabels([' '.join((x[:2], x[2:]))
for x in star_names],
fontdict={
'horizontalalignment': 'right',
'fontsize': 15})
# Define colors for pre- and post- eras.
pre_color = cmr.ember(0.7)
post_color = cmr.cosmic(0.55)
# How significant to report outliers.
sigma_significance = 3
vprint(f'Looking for outliers beyond {sigma_significance} sigma')
# Create lists to hold the significance values:
pre_stat, pre_sys = [], []
post_stat, post_sys = [], []
for i, pair_label in enumerate(pair_labels):
# Create lists to hold the values and errors:
pre_values, post_values = [], []
pre_err_stat, post_err_stat = [], []
pre_err_sys, post_err_sys = [], []
# Figure out some numbers for locating things from star name.
for star_name in sp1_stars:
if star_name in block1_stars:
row = 0
j = block1_stars.index(star_name)
elif star_name in block2_stars:
row = 1
j = block2_stars.index(star_name)
elif star_name in block3_stars:
row = 2
j = block3_stars.index(star_name)
elif star_name in block4_stars:
row = 3
j = block4_stars.index(star_name)
else:
raise RuntimeError(f"{star_name} not in any list!")
star = stars[star_name]
pair_index = star.p_index(pair_label)
fiber_split_index = star.fiberSplitIndex
# Get the pre-change values.
if star.hasObsPre:
values, mask = remove_nans(star.pairModelOffsetsArray[
:fiber_split_index, pair_index], return_mask=True)
errors = star.pairModelErrorsArray[:fiber_split_index,
pair_index][mask]
plot = True
try:
value, error = weighted_mean_and_error(values, errors)
except ZeroDivisionError:
# This indicates no value for a particular 'cell', so just
# plot something there to indicate that.
axes[(row, i)].plot(0, j-0.15, color='Black', marker='x',
markersize=7, zorder=10)
plot = False
if plot:
# Compute error with sigma_** included.
sigma_s2s = star.pairSysErrorsArray[0, pair_index]
full_error = np.sqrt(error**2 + sigma_s2s**2)
sig_stat = float((value / error).value)
sig_sys = float((value / full_error).value)
pre_stat.append(sig_stat)
pre_sys.append(sig_sys)
if abs(sig_sys) > sigma_significance:
vprint(f'{star.name}: {pair_label}:'
f' (Pre) {sig_sys:.2f}')
pre_values.append(value)
pre_err_stat.append(error)
pre_err_sys.append(full_error)
if (star.name == 'HD1835') and\
(pair_label == '4759.449Ti1_4760.600Ti1_32'):
vprint('For HD 1835, 4759.449Ti1_4760.600Ti1_32:')
vprint(f'Value: {value:.3f}, error: {full_error:.3f}')
# First plot an errorbar with sigma_** included.
axes[(row, i)].errorbar(value, j-0.15,
xerr=full_error,
ecolor=pre_color,
marker='',
capsize=3,
capthick=1.5,
elinewidth=1.4,
zorder=11)
# Then plot just the star's statistical error.
axes[(row, i)].errorbar(value, j-0.15,
xerr=error,
markerfacecolor=pre_color,
markeredgecolor='Black',
ecolor=pre_color,
markeredgewidth=2,
marker='o',
markersize=9,
capsize=5,
elinewidth=4,
zorder=12)
# Get the post-change values.
if star.hasObsPost:
values, mask = remove_nans(star.pairModelOffsetsArray[
fiber_split_index:, pair_index], return_mask=True)
errors = star.pairModelErrorsArray[fiber_split_index:,
pair_index][mask]
plot = True
try:
value, error = weighted_mean_and_error(values, errors)
except ZeroDivisionError:
axes[(row, i)].plot(0, j+0.15, color='Black', marker='x',
markersize=7)
plot = False
if plot:
sigma_s2s = star.pairSysErrorsArray[1, pair_index]
full_error = np.sqrt(error**2 + sigma_s2s**2)
sig_stat = float((value / error).value)
sig_sys = float((value / full_error).value)
post_stat.append(sig_stat)
post_sys.append(sig_sys)
if abs(sig_sys) > sigma_significance:
vprint(f'{star.name}: {pair_label}:'
f' (Post) {sig_sys:.2f}')
post_values.append(value)
post_err_stat.append(error)
post_err_sys.append(full_error)
axes[(row, i)].errorbar(value, j+0.15,
xerr=full_error,
ecolor=post_color,
marker='',
capsize=4,
capthick=1.5,
elinewidth=1.5,
zorder=13)
axes[(row, i)].errorbar(value, j+0.15,
xerr=error,
markerfacecolor=post_color,
markeredgecolor='Black',
ecolor=post_color,
markeredgewidth=2,
marker='D',
markersize=8.5,
capsize=5,
elinewidth=4,
zorder=14)
# Print some metrics for the pair.
pre_val_arr = np.array(pre_values)
pre_err_arr_stat = np.array(pre_err_stat)
pre_err_arr_sys = np.array(pre_err_sys)
post_val_arr = np.array(post_values)
post_err_arr_stat = np.array(post_err_stat)
post_err_arr_sys = np.array(post_err_sys)
wm_value_pre, error_pre = weighted_mean_and_error(
pre_val_arr, pre_err_arr_sys)
wm_value_post, error_post = weighted_mean_and_error(
post_val_arr, post_err_arr_sys)
chi_2_pre_stat = fit.calc_chi_squared_nu(
pre_val_arr, pre_err_arr_stat, 1)
chi_2_pre_sys = fit.calc_chi_squared_nu(
pre_val_arr, pre_err_arr_sys, 1)
chi_2_post_stat = fit.calc_chi_squared_nu(
post_val_arr, post_err_arr_stat, 1)
chi_2_post_sys = fit.calc_chi_squared_nu(
post_val_arr, post_err_arr_sys, 1)
vprint(f'For {pair_label}:')
vprint(' Pre : Weighted mean:'
f' {wm_value_pre:.2f} ± {error_pre:.2f} m/s')
vprint(f' Pre : chi^2: {chi_2_pre_stat:.2f}, {chi_2_pre_sys:.2f}')
vprint(f' Pre : mean error: {np.mean(pre_err_arr_sys):.2f} m/s')
vprint(' Post: Weighted mean:'
f' {wm_value_post:.2f} ± {error_post:.2f} m/s')
vprint(f' Post: chi^2: {chi_2_post_stat:.2f}, {chi_2_post_sys:.2f}')
vprint(f' Post: mean error: {np.mean(post_err_arr_sys):.2f} m/s')
# Create the histogram plots for the pair.
fig_hist = plt.figure(figsize=(5.5, 5.5), tight_layout=True)
bins = np.linspace(-3, 3, num=25)
ax_hist = fig_hist.add_subplot(1, 1, 1)
ax_hist.set_xlabel(r'Significance ($\sigma$)')
ax_hist.set_ylabel('N')
ax_hist.xaxis.set_major_locator(ticker.FixedLocator((-3, -2, -1,
0, 1, 2, 3)))
ax_hist.xaxis.set_minor_locator(ticker.FixedLocator(bins))
ax_hist.yaxis.set_minor_locator(ticker.AutoMinorLocator())
# Add the pre and post distributions together here.
pre_stat.extend(post_stat)
pre_sys.extend(post_sys)
one_sigma, two_sigma = 0, 0
for x in pre_sys:
y = abs(x)
if y < 1:
one_sigma += 1
two_sigma += 1
elif y < 2:
two_sigma += 1
vprint(f'{one_sigma/len(pre_sys):.1%} of values within 1 sigma.')
vprint(f'{two_sigma/len(pre_sys):.1%} of values within 2 sigma.')
ax_hist.hist(pre_stat, color='Gray', histtype='step',
bins=bins, linewidth=1.8, label='Stat. only')
ax_hist.hist(pre_sys, color='Black', histtype='step',
bins=bins, linewidth=2.6, label='Stat. + Sys.')
ax_hist.legend(loc='upper right', fontsize=16,
shadow=True)
outfile = plots_dir / f'Pair_offsets_17_pairs{star_postfix}.pdf'
fig.savefig(str(outfile), bbox_inches='tight', pad_inches=0.01)
histfile = plots_dir / f'Pair_offsets_histograms{star_postfix}.pdf'
fig_hist.savefig(str(histfile), bbox_inches='tight', pad_inches=0.01)
# Create an excerpt of a single column.
fig_ex = plt.figure(figsize=(5, 6), tight_layout=True)
ax_ex = fig_ex.add_subplot(1, 1, 1)
y_grid_locations = [y+0.5 for y in range(len(sp1_stars))]
ax_ex.axvline(x=0, color='Black', linestyle='--', linewidth=1.7)
ax_ex.set_ylim(top=-0.5, bottom=len(sp1_stars)-0.5)
ax_ex.set_xlim(left=-40, right=40)
ax_ex.yaxis.set_minor_locator(ticker.FixedLocator(y_grid_locations))
ax_ex.yaxis.grid(which='minor', color='LightGray', linewidth=1.8,
linestyle=':')
ax_ex.xaxis.set_minor_locator(ticker.AutoMinorLocator())
# ax_ex.xaxis.set_major_locator(ticker.FixedLocator(
# [-50, -25, 0, 25, 50]))
ax_ex.tick_params(which='both', labelleft=True, labelbottom=True,
left=False, right=False, top=False, bottom=True,
labelsize=12)
for axis in ['top', 'right', 'bottom', 'left']:
ax_ex.spines[axis].set_linewidth(2.1)
ax_ex.spines[axis].set_zorder(20)
ax_ex.set_xlabel('Pair model offset (m/s)', size=15)
# Add labels to axis.
# Create the locations for major ticks to put the star name labels at.
y_ticks = [y for y in range(len(sp1_stars))]
ax_ex.yaxis.set_major_locator(ticker.FixedLocator(y_ticks))
# Create the list of top stars...have to handle Vesta specially.
ex_labels = ['Sun']
ex_labels.extend([' '.join((x[:2], x[2:])) for x in sp1_stars[1:]])
ax_ex.set_yticklabels(ex_labels,
fontdict={'horizontalalignment': 'right',
'fontsize': 15})
# Set the pair label to use.
pair_label = pair_labels[10] # 6138--6139
pair_label = pair_labels[16]
tqdm.write(f'Using pair {pair_label} for excerpt')
for j, star_name in enumerate(sp1_stars):
star = stars[star_name]
pair_index = star.p_index(pair_label)
fiber_split_index = star.fiberSplitIndex
# Get the pre-change values.
if star.hasObsPre:
values, mask = remove_nans(star.pairModelOffsetsArray[
:fiber_split_index, pair_index], return_mask=True)
errors = star.pairModelErrorsArray[:fiber_split_index,
pair_index][mask]
try:
value, error = weighted_mean_and_error(values, errors)
except ZeroDivisionError:
# This indicates no value for a particular 'cell', so just
# plot something there to indicate that.
ax_ex.plot(0, j, color='Black', marker='x',
markersize=7, zorder=10)
continue
# Compute error with sigma_** included.
sigma_s2s = star.pairSysErrorsArray[0, pair_index]
full_error = np.sqrt(error**2 + sigma_s2s**2)
significance = abs(value / full_error).value
if significance > sigma_significance:
vprint(f'{star.name}: {pair_label}:'
f' (Pre) {significance:.2f}')
# First plot an errorbar with sigma_** included.
ax_ex.errorbar(value, j-0.15,
xerr=full_error,
ecolor=pre_color,
marker='',
capsize=3,
capthick=1.5,
elinewidth=1.4,
zorder=11)
# Then plot just the star's statistical error.
ax_ex.errorbar(value, j-0.15,
xerr=error,
markerfacecolor=pre_color,
markeredgecolor='Black',
ecolor=pre_color,
markeredgewidth=2, # controls capthick
marker='o',
markersize=9,
capsize=5,
elinewidth=4,
zorder=12)
# Get the post-change values.
if star.hasObsPost:
values, mask = remove_nans(star.pairModelOffsetsArray[
fiber_split_index:, pair_index], return_mask=True)
errors = star.pairModelErrorsArray[fiber_split_index:,
pair_index][mask]
try:
value, error = weighted_mean_and_error(values, errors)
except ZeroDivisionError:
ax_ex.plot(0, j, color='Black', marker='x',
markersize=7)
continue
sigma_s2s = star.pairSysErrorsArray[1, pair_index]
full_error = np.sqrt(error**2 + sigma_s2s**2)
significance = abs(value / full_error).value
if significance > sigma_significance:
vprint(f'{star.name}: {pair_label}:'
f' (Post) {significance:.2f}')
ax_ex.errorbar(value, j+0.15,
xerr=full_error,
ecolor=post_color,
marker='',
capsize=4,
capthick=1.5,
elinewidth=1.5,
zorder=13)
ax_ex.errorbar(value, j+0.15,
xerr=error,
markerfacecolor=post_color,
markeredgecolor='Black',
ecolor=post_color,
markeredgewidth=2,
marker='D',
markersize=8.5,
capsize=5,
elinewidth=4,
zorder=14)
outfile = plots_dir /\
f'Pair_offsets_17_pairs_excerpt_{pair_label.replace(".", "_")}.pdf'
fig_ex.savefig(str(outfile), bbox_inches='tight', pad_inches=0.01)
| 17,335
|
def waitVRText(msg='Text', color=[1.0, 1.0, 1.0], distance=2.0, scale=0.05, keys=' ', controller=None):
""" Display head-locked message in VR and wait for key press.
Args:
msg (str): Message text
color: RBG 3-tuple of color values
distance (float): Z rendering distance from MainView
scale (float): Text node scaling factor
keys (str): Key code(s) to dismiss message (see viztask.waitKeyDown)
controller (sensor): Specify a controller sensor to also dismiss on button press
Returns: Vizard keypress event
"""
text = addHeadLockedText(msg=msg,
color=color,
distance=distance,
scale=scale)
if controller is not None:
event = yield viztask.waitAny([viztask.waitKeyDown(keys), viztask.waitSensorDown(controller, None)])
else:
event = yield viztask.waitKeyDown(keys)
text.remove()
viztask.returnValue(event)
| 17,336
|
def equalize(pil_img: Image.Image, level: float):
"""Equalize an image.
.. seealso:: :func:`PIL.ImageOps.equalize`.
Args:
pil_img (Image.Image): The image.
level (float): The intensity.
"""
del level # unused
return ImageOps.equalize(pil_img)
| 17,337
|
def lightfm_trainer(
train: np.ndarray, loss: str, n_components: int, lam: float
) -> None:
"""Train lightfm models."""
# detect and init the TPU
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
# instantiate a distribution strategy
tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu)
# instantiating the model in the strategy scope creates the model on the TPU
with tpu_strategy.scope():
# train model normally
model = LightFM(
loss=loss,
user_alpha=lam,
item_alpha=lam,
no_components=n_components,
learning_rate=0.001,
random_state=12345,
)
dataset = Dataset()
dataset.fit(train[:, 0], train[:, 1])
(interactions, weights) = dataset.build_interactions(
((x[0], x[1], 1) for x in train[train[:, 2] == 1])
)
model.fit(interactions, epochs=100)
return model
| 17,338
|
def get_wspd_ts(path, storm, res, shpmask):
"""
Extracts the U and V component and returns the wind speed timeseries of storm_dict
Arguments:
path (str): Path containing data to load
storm (str): Name of storm
res (str): Resolution of data
Returns:
Pandas dataframe with time index
"""
ufile = f'{path}/ua.T1Hpoint.UMRA2T.*.{storm}.{res}km.nc'
vfile = f'{path}/va.T1Hpoint.UMRA2T.*.{storm}.{res}km.nc'
ucube = iris.load_cube(ufile, 'x_wind')
vcube = iris.load_cube(vfile, 'y_wind')
ucube = ucube.intersection(longitude=(75, 100), latitude=(10, 25))
vcube = vcube.intersection(longitude=(75, 100), latitude=(10, 25))
ws_ifunc = iris.analysis.maths.IFunc(calc_wspd, ws_units_func)
ws_cube = ws_ifunc(ucube, vcube, new_name='wind speed')
try:
mwspd = shpmask.mask_cube(ws_cube)
except:
print("Can't mask with shape! Masked over lon-lat box instead...")
mwspd = ws_cube
cubedata = []
timedata = []
for subcube in mwspd.slices_over('forecast_reference_time'):
# extracting the time
tcoord = subcube.coord('time')
units = tcoord.units
tdata = [units.num2date(point) for point in tcoord.points]
cube = subcube.collapsed(['latitude', 'longitude'], iris.analysis.MAX)
cubedata.append(cube.data.filled())
timedata.append(tdata)
# Convert to Pandas Dataframe with unified time index
s = list()
[s.append(pd.Series(data=cubedata[i], index=timedata[i])) for i in range(np.shape(timedata)[0])]
return pd.DataFrame(s).T
| 17,339
|
def _read_output_file(path):
"""Read Stan csv file to ndarray."""
comments = []
data = []
columns = None
with open(path, "rb") as f_obj:
# read header
for line in f_obj:
if line.startswith(b"#"):
comments.append(line.strip().decode("utf-8"))
continue
columns = {key: idx for idx, key in enumerate(line.strip().decode("utf-8").split(","))}
break
# read data
for line in f_obj:
line = line.strip()
if line.startswith(b"#"):
comments.append(line.decode("utf-8"))
continue
if line:
data.append(np.array(line.split(b","), dtype=np.float64))
return columns, np.array(data, dtype=np.float64), comments
| 17,340
|
def test_get_node_by_attached_volume(get_volume, get_attached_volume):
"""
Check basic consistency in platform handling.
"""
worker_id = get_attached_volume
logger.info(f"volume is attached to node: {worker_id}")
| 17,341
|
async def test_light_turn_on_service(hass, mock_bridge):
"""Test calling the turn on service on a light."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
light = hass.states.get('light.hue_lamp_2')
assert light is not None
assert light.state == 'off'
updated_light_response = dict(LIGHT_RESPONSE)
updated_light_response['2'] = LIGHT_2_ON
mock_bridge.mock_light_responses.append(updated_light_response)
await hass.services.async_call('light', 'turn_on', {
'entity_id': 'light.hue_lamp_2',
'brightness': 100,
'color_temp': 300,
}, blocking=True)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
assert mock_bridge.mock_requests[1]['json'] == {
'bri': 100,
'on': True,
'ct': 300,
'effect': 'none',
'alert': 'none',
}
assert len(hass.states.async_all()) == 3
light = hass.states.get('light.hue_lamp_2')
assert light is not None
assert light.state == 'on'
| 17,342
|
def run_pull_in_parallel(
dry_run: bool,
parallelism: int,
image_params_list: Union[List[BuildCiParams], List[BuildProdParams]],
python_version_list: List[str],
verbose: bool,
verify_image: bool,
tag_as_latest: bool,
wait_for_image: bool,
extra_pytest_args: Tuple,
):
"""Run image pull in parallel"""
get_console().print(
f"\n[info]Pulling with parallelism = {parallelism} for the images: {python_version_list}:"
)
pool = mp.Pool(parallelism)
poll_time = 10.0
if not verify_image:
results = [
pool.apply_async(
run_pull_image, args=(image_param, dry_run, verbose, wait_for_image, tag_as_latest, poll_time)
)
for image_param in image_params_list
]
else:
results = [
pool.apply_async(
run_pull_and_verify_image,
args=(
image_param,
dry_run,
verbose,
wait_for_image,
tag_as_latest,
poll_time,
extra_pytest_args,
),
)
for image_param in image_params_list
]
check_async_run_results(results)
pool.close()
| 17,343
|
def combinationShape(*args, **kwargs):
"""
Flags:
- addDriver : add (bool) []
- allDrivers : ald (bool) []
- blendShape : bs (unicode) []
- combinationTargetIndex : cti (int) []
- combinationTargetName : ctn (unicode) []
- combineMethod : cm (int) []
- driverTargetIndex : dti (int) []
- driverTargetName : dtn (unicode) []
- exist : ex (bool) []
- removeDriver : rmd (bool) []
Derived from mel command `maya.cmds.combinationShape`
"""
pass
| 17,344
|
def _transform_data(raw_df, cols_config):
"""
Applies required transformations to the raw dataframe
:returns : Trasformed dataframe ready to be exported/loaded
"""
# Perform column and dtype checks
if check_columns(raw_df, cols_config):
df = raw_df
else:
logger.warning("Inconsistencies found during column check")
# Apply transformations
df = convert_dates(df)
df = get_duration(df)
df = remove_negatives(df)
df = drop_columns(df)
return df
| 17,345
|
def dcmToSimpleITK(dcmDirectory):
"""Return a simple ITK image from a pile of dcm files. The returned sITK image has been rescaled based on the
value of the rescale slope on the dicom tag. Array-like data of the 3D image can be obtained with the
GetArrayFromImage() method"""
list_dcmFiles = []
for directory, subDirectory, list_dcmFileNames in os.walk(dcmDirectory):
for dcmFile in list_dcmFileNames:
if '.dcm' in dcmFile.lower():
list_dcmFiles.append(os.path.join(directory, dcmFile))
dcmImage = [pydicom.dcmread(dcmSliceFile) for dcmSliceFile in list_dcmFiles]
voxel_ndarray, ijk_to_xyz = dicom_numpy.combine_slices(dcmImage)
sITK_image = sitk.GetImageFromArray(voxel_ndarray)
return (sITK_image)
| 17,346
|
def main():
"""Main entry point."""
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument(
'file_name', metavar='FILENAME', type=str, help='spreadsheet file')
base_parser.add_argument(
'-o', dest='output_handle', metavar='OUTPUT',
type=argparse.FileType('w'), default=sys.stdout, help='output file')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=usage[0], epilog=usage[1])
parser.add_argument('-v', action='version', version=version(parser.prog))
subparsers = parser.add_subparsers(dest='subcommand')
subparser = subparsers.add_parser(
'read_input', parents=[base_parser],
description=doc_split(read_input))
subparser.set_defaults(func=read_input)
subparser = subparsers.add_parser(
'read_output', parents=[base_parser],
description=doc_split(read_output))
subparser.set_defaults(func=read_output)
subparser = subparsers.add_parser(
'process', parents=[base_parser],
description=doc_split(process))
subparser.add_argument(
'data', metavar='DATA', type=str, help='data in JSON format')
subparser.set_defaults(func=process)
try:
args = parser.parse_args()
except IOError as error:
parser.error(error)
if not args.subcommand:
parser.error('too few arguments')
try:
args.func(**{k: v for k, v in vars(args).items()
if k not in ('func', 'subcommand')})
except ValueError as error:
parser.error(error)
| 17,347
|
def on_new_follower_added(sender, **kwargs):
"""called when someone is followed"""
if kwargs['action'] is "post_add":
for person_i_have_followed in kwargs['pk_set']:
followers_updated_signal.send(sender=ProfilesSignalSender,
who_was_followed=Profile.objects.get(
pk=person_i_have_followed),
who_followed=kwargs['instance'])
| 17,348
|
def print_percents(x, y):
"""Print percentage of x is y"""
print(str(y) + ' is ' + str(percents(x, y)) + '% of ' + str(x))
| 17,349
|
def mean_and_std(values):
"""Compute mean standard deviation"""
size = len(values)
mean = sum(values)/size
s = 0.0
for v in values:
s += (v - mean)**2
std = math.sqrt((1.0/(size-1)) * s)
return mean, std
| 17,350
|
def blacken(
color: Color, amount: FloatOrFloatIterable
) -> Union[Color, List[Color]]:
"""
Return a color or colors amount fraction or fractions of the way from
`color` to `black`.
:param color: The existing color.
:param amount: The proportion to blacken by.
"""
return cross_fade(from_color=color, to_color='black',
amount=amount)
| 17,351
|
def deleteAllProxyServers():
"""Delete all proxy servers - raises exception on error"""
deleteServersOfType( "PROXY_SERVER" )
| 17,352
|
def add_line(preso, x1, y1, x2, y2, width="3pt", color="red"):
"""
Arrow pointing up to right:
context.xml:
office:automatic-styles/
<style:style style:name="gr1" style:family="graphic" style:parent-style-name="objectwithoutfill">
<style:graphic-properties
draw:marker-end="Arrow"
draw:marker-end-width="0.3cm"
draw:fill="none"
draw:textarea-vertical-align="middle"/>
</style:style>
3pt width color red
<style:style style:name="gr2" style:family="graphic" style:parent-style-name="objectwithoutfill">
<style:graphic-properties
svg:stroke-width="0.106cm"
svg:stroke-color="#ed1c24"
draw:marker-start-width="0.359cm"
draw:marker-end="Arrow"
draw:marker-end-width="0.459cm"
draw:fill="none"
draw:textarea-vertical-align="middle"
fo:padding-top="0.178cm"
fo:padding-bottom="0.178cm"
fo:padding-left="0.303cm"
fo:padding-right="0.303cm"/>
</style:style>
...
office:presentation/draw:page
<draw:line draw:style-name="gr1" draw:text-style-name="P2" draw:layer="layout" svg:x1="6.35cm" svg:y1="10.16cm" svg:x2="10.668cm" svg:y2="5.842cm"><text:p/></draw:line>
"""
marker_end_ratio = 0.459 / 3 # .459cm/3pt
marker_start_ratio = 0.359 / 3 # .359cm/3pt
stroke_ratio = 0.106 / 3 # .106cm/3pt
w = float(width[0 : width.index("pt")])
sw = w * stroke_ratio
mew = w * marker_end_ratio
msw = w * marker_start_ratio
attribs = {
"svg:stroke-width": "{}cm".format(sw),
"svg:stroke-color": color, # "#ed1c24",
"draw:marker-start-width": "{}cm".format(msw),
"draw:marker-end": "Arrow",
"draw:marker-end-width": "{}cm".format(mew),
"draw:fill": "none",
"draw:textarea-vertical-align": "middle",
}
style = LineStyle(**attribs)
# node = style.style_node()
preso.add_style(style)
line_attrib = {
"draw:style-name": style.name,
"draw:layer": "layout",
"svg:x1": x1,
"svg:y1": y1,
"svg:x2": x2,
"svg:y2": y2,
}
line_node = el("draw:line", attrib=line_attrib)
preso.slides[-1]._page.append(line_node)
| 17,353
|
def llf_gradient_sigma_neq_gamma(history, sum_less_equal=True):
"""
Calculate the gradient of the log-likelihood function symbolically.
Parameters
----------
sum_less_equal : bool, default: True
This arg is passed to :meth:`self.llf_sigma_eq_gamma`.
Returns
-------
gradient : sympy.Array
An array containing four entries. The first (second) [third]
{fourth} entry is the derivative of the log-likelihood function
w.r.t. beta (sigma) [gamma] {N} parameter.
"""
beta, sigma, gamma, n = symbols("beta sigma gamma n")
return derive_by_array(
llf_sigma_neq_gamma(history, sum_less_equal),
[beta, sigma, gamma, n]
)
| 17,354
|
def test_base__Calendar__update__1():
"""It does nothing.
Test is only here to complete test coverage.
"""
calendar = Calendar(None, None, None)
calendar.update()
| 17,355
|
def test_unwrap(service):
"""unwrapping args from XML."""
assert service.unwrap_arguments(DUMMY_VALID_RESPONSE) == {
"CurrentLEDState": "On",
"Unicode": "μИⅠℂ☺ΔЄ💋",
}
| 17,356
|
def main():
"""
docstring
"""
loop = asyncio.get_event_loop()
task_function1 = asyncio.ensure_future(get_file_extension())
task_function2 = asyncio.ensure_future(get_filenames())
loop.run_forever()
| 17,357
|
def selection_support_df(df, combinations, min_support):
"""
selection combinations with support
Parameters
----------
df : pandas.DataFrame
data to be selected.
for example :
= | banana | mango | apple |
| 1 | 1 | 1 |
| 1 | 0 | 0 |
| 1 | 1 | 0 |
combinations : list
combinations of df columns.
for example :
= [("apple", "apple"), ("banana", "apple"), ("mango", "apple")
("apple", "banana", "apple"), ("apple", "mango", "apple"),
("banana", "mango", "apple"), ("apple",), ...]
min_support : float
minimal support to be select combinations
for example :
= 0.5
Returns
-------
combinations and supports.
for example :
= [("banana", "mango", "apple"), ...]
= [0.1, ...]
"""
selected_supports = []
selected_combinations = []
columns = df.columns
n_rows = df.shape[0]
for combination in combinations:
position = position_itemset(combination, columns)
position_columns = np.array(columns[position])
length_combination = len(combination)
combination_array = np.array(df.loc[:, position_columns])
check_array = np.where(length_combination == combination_array.sum(axis=1))[0]
length_check_array = len(check_array)
support = cal_support(length_check_array, n_rows)
if support >= min_support:
selected_combinations.append(combination)
selected_supports.append(support)
return selected_combinations, selected_supports
| 17,358
|
def empty_trie_tree():
"""Empty trie tree fixture."""
from trie import TrieTree
return TrieTree()
| 17,359
|
def check_reference_search_filter_results(response, expected_hits, expected_req_nums):
"""Check if all expected results are there."""
hits = response.json["hits"]["hits"]
reported_hits = int(response.json["hits"]["total"])
req_numbers = [r["number"] for r in response.json["hits"]["hits"]]
assert len(req_numbers) == len(expected_req_nums)
for num in expected_req_nums:
assert num in req_numbers
assert reported_hits == len(hits) == expected_hits
| 17,360
|
def bind_context_to_node(context, node):
"""Give a context a boundnode
to retrieve the correct function name or attribute value
with from further inference.
Do not use an existing context since the boundnode could then
be incorrectly propagated higher up in the call stack.
:param context: Context to use
:type context: Optional(context)
:param node: Node to do name lookups from
:type node NodeNG:
:returns: A new context
:rtype: InferenceContext
"""
context = copy_context(context)
context.boundnode = node
return context
| 17,361
|
def get_weight_matrix(file_handle):
"""
Read each line in file_handle and return the weight matrix as a dict,
in which each key is the original node name, and each value is a nested
dict, whose keys are gene systematic names, and values are weights.
"""
weight_matrix = dict()
for line_num, line in enumerate(file_handle, start=1):
tokens = line.strip().split('\t')
# The first line includes node names only
if line_num == 1:
num_columns = len(tokens)
nodes = tokens[1:]
for node_name in nodes:
weight_matrix[node_name] = dict()
else: # read data lines
# Validate the number of columns in each line
if num_columns != len(tokens):
raise Exception(f"Incorrect number of columns on line {line_num}")
gene_name = tokens[0]
weights = [float(x) for x in tokens[1:]]
for idx, w in enumerate(weights):
node_name = nodes[idx]
weight_matrix[node_name][gene_name] = w
return weight_matrix
| 17,362
|
def cli(env, date_min, date_max, obj_event, obj_id, obj_type, utc_offset, metadata):
"""Get Event Logs"""
mgr = SoftLayer.EventLogManager(env.client)
usrmgr = SoftLayer.UserManager(env.client)
request_filter = mgr.build_filter(date_min, date_max, obj_event, obj_id, obj_type, utc_offset)
logs = mgr.get_event_logs(request_filter)
if logs is None:
env.fout('None available.')
return
if metadata and 'metadata' not in COLUMNS:
COLUMNS.append('metadata')
table = formatting.Table(COLUMNS)
if metadata:
table.align['metadata'] = "l"
for log in logs:
user = log['userType']
if user == "CUSTOMER":
user = usrmgr.get_user(log['userId'], "mask[username]")['username']
if metadata:
try:
metadata_data = json.dumps(json.loads(log['metaData']), indent=4, sort_keys=True)
if env.format == "table":
metadata_data = metadata_data.strip("{}\n\t")
except ValueError:
metadata_data = log['metaData']
table.add_row([log['eventName'], log['label'], log['objectName'],
log['eventCreateDate'], user, metadata_data])
else:
table.add_row([log['eventName'], log['label'], log['objectName'],
log['eventCreateDate'], user])
env.fout(table)
| 17,363
|
def search_remove(row_id):
"""Remove a search item"""
LOG.debug('Removing search item with ID {}', row_id)
G.LOCAL_DB.delete_search_item(row_id)
common.json_rpc('Input.Down') # Avoids selection back to the top
common.container_refresh()
| 17,364
|
def test_primitive_generators_return_consistent_values_when_reset(g):
"""
Primitive generators produce the same sequence when reset with the same seed.
"""
g.reset(seed=12345)
items1 = list(g.generate(num=10))
g.reset(seed=12345)
items2 = list(g.generate(num=10))
g.reset(seed=99999)
items3 = list(g.generate(num=10))
g.reset(seed=99999)
items4 = list(g.generate(num=10))
assert items1 == items2
assert items3 == items4
if not isinstance(g, Constant):
assert items1 != items3
| 17,365
|
def get_uptime():
"""
Get uptime
"""
try:
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
uptime_time = str(timedelta(seconds=uptime_seconds))
data = uptime_time.split('.', 1)[0]
except Exception as err:
data = str(err)
return data
| 17,366
|
def measure_list_for_upcoming_elections_retrieve_api_view(request): # measureListForUpcomingElectionsRetrieve
"""
Ask for all measures for the elections in google_civic_election_id_list
:param request:
:return:
"""
status = ""
google_civic_election_id_list = request.GET.getlist('google_civic_election_id_list[]')
state_code = request.GET.get('state_code', '')
# We will need all candidates for all upcoming elections so we can search the HTML of
# the possible voter guide for these names
measure_list_light = []
results = retrieve_measure_list_for_all_upcoming_elections(google_civic_election_id_list,
limit_to_this_state_code=state_code)
if results['measure_list_found']:
measure_list_light = results['measure_list_light']
expand_results = add_measure_name_alternatives_to_measure_list_light(measure_list_light)
if expand_results['success']:
measure_list_light = expand_results['measure_list_light']
google_civic_election_id_list = results['google_civic_election_id_list']
status += results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'google_civic_election_id_list': google_civic_election_id_list,
'measure_list': measure_list_light,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
| 17,367
|
def check_template_path(path):
"""
Argument checker, check if template exists and get the content
"""
try:
with open(path) as template:
tmp = template.read()
return tmp
except:
raise argparse.ArgumentTypeError("Invalid template path!")
| 17,368
|
def get_required_flowlist_fields():
"""
Gets required field names for Flow List.
:return:list of required fields
"""
from fedelemflowlist.globals import flow_list_fields
required_fields = []
for k, v in flow_list_fields.items():
if v[1]['required']:
required_fields.append(k)
return required_fields
| 17,369
|
def sparse_table_function(*, index, data) -> callable:
"""
The very simplest Python-ish "sparse matrix", and plenty fast on modern hardware, for the
size of tables this module will probably ever see, is an ordinary Python dictionary from
<row,column> tuples to significant table entries. There are better ways if you program
closer to the bare metal, but this serves the purpose.
This routine unpacks "compressed-sparse-row"-style data into an equivalent Python dictionary,
then returns a means to query said dictionary according to the expected 2-dimensional interface.
"""
hashmap = {}
for row_id, (Cs, Ds) in enumerate(zip(index, data)):
if isinstance(Ds, int): # All non-blank cells this row have the same value:
for column_id in Cs: hashmap[row_id, column_id] = Ds
else:
for column_id, d in zip(Cs, Ds) if Cs else enumerate(Ds):
hashmap[row_id, column_id] = d
return lambda R, C: hashmap.get((R, C))
| 17,370
|
async def _get_db_connection() -> asyncpg.Connection:
"""
Initialise database connection.
On failure, retry multiple times. When the DB starts in parallel with the app (with Compose),
it may not yet be ready to take connections.
"""
log.info("Creating DB connection")
n_attempts = 3
for attempt in range(1, n_attempts + 1):
try:
return await asyncpg.connect(connection_string)
except ConnectionError:
log.info(f"Failed to connect to DB (attempt: {attempt}/{n_attempts})")
if attempt >= n_attempts:
raise
await asyncio.sleep(5)
| 17,371
|
def get_view_content(view):
""" Returns view content as string. """
return utils.execute_in_sublime_main_thread(lambda: view.substr(sublime.Region(0, view.size())))
| 17,372
|
def signed_byte8(x: IntVar) -> Int8:
"""Implementation for `SBYTE8`."""
return signed_byte_n(x, 8)
| 17,373
|
def join(zma1, zma2, join_key_mat, join_name_mat, join_val_dct):
""" join two z-matrices together
"""
syms1 = symbols(zma1)
syms2 = symbols(zma2)
natms1 = count(zma1)
natms2 = count(zma2)
key_mat1 = numpy.array(key_matrix(zma1))
key_mat2 = numpy.array(key_matrix(zma2, shift=natms1)) # note the shift
name_mat1 = numpy.array(name_matrix(zma1))
name_mat2 = numpy.array(name_matrix(zma2))
val_dct1 = values(zma1)
val_dct2 = values(zma2)
join_natms = min(natms2, 3)
assert len(join_key_mat) == len(join_name_mat) == join_natms
join_key_mat = numpy.array(join_key_mat, dtype=numpy.object_)
join_name_mat = numpy.array(join_name_mat, dtype=numpy.object_)
# make sure we aren't overwriting values -- the constructor should take
# care of the rest of the necessary validation
assert numpy.all(numpy.equal(join_key_mat, None) ==
numpy.equal(join_key_mat, None))
join_idxs = numpy.not_equal(join_key_mat, None)
assert numpy.all(numpy.equal(key_mat2[:3][join_idxs], None))
assert numpy.all(numpy.equal(name_mat2[:3][join_idxs], None))
key_mat2[:3][join_idxs] = join_key_mat[join_idxs]
name_mat2[:3][join_idxs] = join_name_mat[join_idxs]
syms = tuple(itertools.chain(syms1, syms2))
key_mat = tuple(itertools.chain(key_mat1, key_mat2))
name_mat = tuple(itertools.chain(name_mat1, name_mat2))
# Could be made to allow for joins with common zma1 and zma2 names (for
# symmetry constraints). Not sure if we really want that.
val_dct = val_dct1.copy()
assert not set(val_dct.keys()) & set(val_dct2.keys())
assert not set(val_dct.keys()) & set(join_val_dct.keys())
val_dct.update(val_dct2)
val_dct.update(join_val_dct)
return automol.create.zmatrix.from_data(syms, key_mat, name_mat, val_dct)
| 17,374
|
def step_see_named_query_deleted(context):
"""
Wait to see query deleted.
"""
wrappers.expect_pager(context, "foo: Deleted\r\n", timeout=1)
| 17,375
|
def multi_to_weighted(G: nx.MultiDiGraph):
"""
Converts a multidigraph into a weighted digraph.
"""
nG = nx.DiGraph(G)
# nG.add_nodes_from(G.nodes)
nG.name = G.name + "_weighted_nomulti"
edge_weights = {(u, v): 0 for u, v, k in G.edges}
for u, v, key in G.edges:
edge_weights[(u, v)] += 1
# nG.add_edges_from(edge_weights.keys())
nx.set_edge_attributes(nG, edge_weights, "weight")
return nG
| 17,376
|
def RetentionInDaysMatch(days):
"""Test whether the string matches retention in days pattern.
Args:
days: string to match for retention specified in days format.
Returns:
Returns a match object if the string matches the retention in days
pattern. The match object will contain a 'number' group for the duration
in number of days. Otherwise, None is returned.
"""
return _RETENTION_IN_DAYS().match(days)
| 17,377
|
def _extract_result_details(pipx_output: str) -> Tuple[str, str, str]:
""" Extracts name and version from pipx's stdout """
match = re.search(r'installed package(.*),(.*)\n.*\n.*?-(.*)', pipx_output)
if match:
package, python_version, plugin_name = map(str.strip, match.groups())
return plugin_name.replace('.exe', ''), package, python_version
raise PluginManagementFatalException('Failed to find package information install log!')
| 17,378
|
def plot_2d_comp_multinom(model, data, vmin=None, vmax=None,
resid_range=None, fig_num=None,
pop_ids=None, residual='Anscombe',
adjust=True):
"""
Mulitnomial comparison between 2d model and data.
model: 2-dimensional model SFS
data: 2-dimensional data SFS
vmin, vmax: Minimum and maximum values plotted for sfs are vmin and
vmax respectively.
resid_range: Residual plot saturates at +- resid_range.
fig_num: Clear and use figure fig_num for display. If None, an new figure
window is created.
pop_ids: If not None, override pop_ids stored in Spectrum.
residual: 'Anscombe' for Anscombe residuals, which are more normally
distributed for Poisson sampling. 'linear' for the linear
residuals, which can be less biased.
adjust: Should method use automatic 'subplots_adjust'? For advanced
manipulation of plots, it may be useful to make this False.
This comparison is multinomial in that it rescales the model to optimally
fit the data.
"""
model = Inference.optimally_scaled_sfs(model, data)
plot_2d_comp_Poisson(model, data, vmin=vmin, vmax=vmax,
resid_range=resid_range, fig_num=fig_num,
pop_ids=pop_ids, residual=residual,
adjust=adjust)
| 17,379
|
def varatts(w_nc_var, varname, tres, vres):
"""Add attibutes to the variables, depending on name and time res.
Arguments:
- w_nc_var: a variable object;
- varname: the name of the variable, among ta, ua, va and wap;
- tres: the time resolution (daily or annual);
- vres: the vertical resolution (pressure levels or vert. integr.).
@author: Chris Slocum (2014), modified by Valerio Lembo (2018).
"""
if tres == 0:
tatt = "Daily\nM"
elif tres == 1:
tatt = "Annual mean\nM"
if vres == 0:
vatt = "Pressure levels\n"
elif vres == 1:
vatt = "Vertically integrated\n"
if varname == 'a':
w_nc_var.setncatts({
'long_name': "Available Potential Energy",
'units': "W m-2",
'level_desc': vatt,
'var_desc': "APE -> KE",
'statistic': tatt
})
elif varname == 'ek':
w_nc_var.setncatts({
'long_name': "Kinetic Energy",
'units': "W m-2",
'level_desc': vatt,
'var_desc': "APE -> KE",
'statistic': tatt
})
elif varname == 'a2k':
w_nc_var.setncatts({
'long_name': "Conversion between APE and KE",
'units': "W m-2",
'level_desc': vatt,
'var_desc': "APE <-> KE",
'statistic': tatt
})
elif varname == 'k':
w_nc_var.setncatts({
'long_name': "Kinetic Energy",
'units': "W m-2",
'level_desc': vatt,
'var_desc': "APE -> KE",
'statistic': tatt
})
| 17,380
|
def get_pwr_SXT(sxt_los, plasma, emiss, num_pts=100, labels=labels_full):
"""
"""
pwr_int = {}
for ll in labels:
# Get the appropriate database label
filt = ll.split()[1]
pix_los = sxt_los[ll]
# Get the spatial points along the line of sight
num_pixels = len(pix_los)
ell_pts = np.linspace(-0.5, 0.5, num=num_pts)
xs = np.zeros([num_pixels, num_pts])
ys = np.zeros([num_pixels, num_pts])
for index,los in enumerate(pix_los):
#xs[index,:], ys[index,:] = list(zip(*[los.get_xy(ell) for ell in ell_pts]))
xs[index,:], ys[index,:] = los.get_xy(ell_pts)
# Evaluate the profiles
Te_xs = np.maximum(plasma.Te(xs, ys), 10.0)
ne_xs = np.maximum(plasma.ne(xs, ys), 1e15)
n0_xs = plasma.n0(xs, ys)
pts = list( zip( Te_xs.ravel(), ne_xs.ravel()/1e19, n0_xs.ravel()/1e14 ) )
# Evaluate deuterium using quasi-netrality
nZ_xs = {ion:plasma.nZ[ion](xs,ys) for ion in plasma.impurities}
nZ_xs['D'] = plasma.nD(xs, ys)
# Calculate the emission array
emiss_xs = np.zeros(xs.shape)
emiss_xs = ne_xs*nZ_xs['D']*np.reshape(emiss['D'][filt](pts), xs.shape)
for ion in plasma.impurities:
emiss_xs += ne_xs*nZ_xs[ion]*np.reshape(emiss[ion][filt](pts), xs.shape)
# Integrate with the trapezoidal rule
dl = np.ones([num_pts,1])*(ell_pts[1] - ell_pts[0])
dl[0] *= 0.5
dl[-1] *= 0.5
pwr_int[ll] = np.squeeze(np.dot(emiss_xs, dl))
return pwr_int
| 17,381
|
def get_mstp_port(auth):
"""
Function to get list of mstp port status
:param auth: AOSSAuth class object returned by pyarubaoss.auth
:return list of mstp port status
:rtype dict
"""
url_mstp_port = "http://" + auth.ipaddr + "/rest/"+auth.version+"/mstp/port"
try:
r = requests.get(url_mstp_port, headers=auth.cookie)
mstp_port = json.loads(r.text)['mstp_port_element']
return mstp_port
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_mstp_port: An Error has occured"
| 17,382
|
def _process_tree(tree, nstag):
"""Process XML tree for a record and return a dictionary for our standard
"""
rec = OrderedDict()
for key, tag_, getall, trans1_, transall_ in [
('author', 'creatorName', True, None, None),
('name', "title[@titleType='AlternativeTitle']", False, None, None),
('title', "title", False, _unwrap, None),
# actually it seems we have no title but "ShortDescription"!!! TODO
('doap:shortdesc', "title", False, _unwrap, None), # duplicate for now
('description', 'description', True, _unwrap, _merge),
('doap:Version', 'version', False, None, None),
('sameAs', "identifier[@identifierType='DOI']", False, None, None),
# conflicts with our notion for having a "type" to be internal and to demarkate a Dataset
# here might include the field e.g. Dataset/Neurophysiology, so skipping for now
# ('type', "resourceType[@resourceTypeGeneral='Dataset']", False, None, None),
('citation', "relatedIdentifier", True, None, None),
('keywords', "subject", True, None, None),
('formats', "format", True, None, None),
]:
trans1 = trans1_ or (lambda x: x)
text = lambda x: trans1(x.text.strip())
tag = nstag(tag_)
try:
if getall:
value = list(map(text, tree.findall(tag)))
else:
value = text(tree.find(tag))
except AttributeError:
continue
if not value or value == ['']:
continue
if transall_:
value = transall_(value)
rec[key] = value
return rec
| 17,383
|
def prepare_all_predictions(
data,
uid_map,
iid_map,
interactions,
model,
num_threads,
user_features=None,
item_features=None,
):
"""Function to prepare all predictions for evaluation.
Args:
data (pandas df): dataframe of all users, items and ratings as loaded
uid_map (dict): Keys to map internal user indices to external ids.
iid_map (dict): Keys to map internal item indices to external ids.
interactions (np.float32 coo_matrix): user-item interaction
model (LightFM instance): fitted LightFM model
num_threads (int): number of parallel computation threads
user_features (np.float32 csr_matrix): User weights over features
item_features (np.float32 csr_matrix): Item weights over features
Returns:
pandas.DataFrame: all predictions
"""
users, items, preds = [], [], [] # noqa: F841
item = list(data.itemID.unique())
for user in data.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
all_predictions = pd.DataFrame(data={"userID": users, "itemID": items})
all_predictions["uid"] = all_predictions.userID.map(uid_map)
all_predictions["iid"] = all_predictions.itemID.map(iid_map)
dok_weights = interactions.todok()
all_predictions["rating"] = all_predictions.apply(
lambda x: dok_weights[x.uid, x.iid], axis=1
)
all_predictions = all_predictions[all_predictions.rating < 1].reset_index(drop=True)
all_predictions = all_predictions.drop("rating", axis=1)
all_predictions["prediction"] = all_predictions.apply(
lambda x: model.predict(
user_ids=x["uid"],
item_ids=[x["iid"]],
user_features=user_features,
item_features=item_features,
num_threads=num_threads,
)[0],
axis=1,
)
return all_predictions[["userID", "itemID", "prediction"]]
| 17,384
|
def touch(v: Union[Callable, str], default=None):
"""
Touch a function or an expression `v`, see if it causes exception.
If not, output the result, otherwise, output `default`.
Note:
Use `default = pycamia.functions.identity_function` (or write one yourself)
to return the exception object.
Example:
----------
>>> a = 0
>>> touch(lambda: 1/a, default = 'fail')
fail
"""
if not callable(default):
default = const_function(default)
if isinstance(v, str):
local_vars = get_environ_locals()
local_vars.update(locals())
locals().update(local_vars)
try: return eval(v)
except Exception as e: return default(e)
else:
try: return v()
except Exception as e: return default(e)
| 17,385
|
def q_mult(q1, q2):
"""Quaternion multiplication"""
w1, x1, y1, z1 = q1
w2, x2, y2, z2 = q2
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2
z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2
return w, x, y, z
| 17,386
|
def round_repeats(repeats, global_params):
"""Calculate module's repeat number of a block based on depth multiplier.
Use depth_coefficient of global_params.
Args:
repeats (int): num_repeat to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new repeat: New repeat number after calculating.
"""
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
# follow the formula transferred from official TensorFlow implementation
return int(math.ceil(multiplier * repeats))
| 17,387
|
def estimate_label_width(labels):
"""
Given a list of labels, estimate the width in pixels
and return in a format accepted by CSS.
Necessarily an approximation, since the font is unknown
and is usually proportionally spaced.
"""
max_length = max([len(l) for l in labels])
return "{0}px".format(max(60,int(max_length*7.5)))
| 17,388
|
def update_user(user_id):
"""
:Route: PUT /<user_id>?active=false&admin=true&password=str&first_name=Katrina&last_name=Wijaya&email=mappeningdevx@gmail.com
:Description: Updates user with id `user_id`. Updates any optional fields that are set as query parameters.
:param user_id: The int ID of a specific user
:type user_id: int
:param active: An optional query component/parameter to update whether or not a user is active. If true, user has an activated account that they can log in to, otherwise account will be rejected/suspended from use
:type active: boolean or None
:param admin: An optional query component/parameter to update whether or not a user has admin permissions. All admins have same permissions so maybe should create a super admin.
:type admin: boolean or None
:param password: An optional query component/parameter to update the password for a user. TODO: actually supporting passwords/salting/hashing.
:type password: str or None
:param first_name: An optional query component/parameter to update the user's first name. Does not modify full name stored in database.
:type first_name: str or None
:param last_name: An optional query component/parameter to update the user's last name. Does not modify full name stored in database.
:type last_name: str or None
:param email: An optional query component/parameter to update the user's email. TODO: email verification.
:type email: str or None
:return: JSON of updated user or an error message
:Requires: Admin permissions
"""
active = request.args.get('active')
admin = request.args.get('admin')
password = request.args.get('password')
first_name = request.args.get('first_name')
last_name = request.args.get('last_name')
email = request.args.get('email')
# Check if user already exists in collection
user = user_utils.get_user(user_id)
if user:
# Update access/update/login time (in UTC I think)
user['account']['time_updated'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Update all fields as passed in via optional parameters
if active and active.lower() == "true": user['account']['is_active'] = True
if active and active.lower() == "false": user['account']['is_active'] = False
if admin and admin.lower() == "true": user['account']['is_admin'] = True
if admin and admin.lower() == "false": user['account']['is_admin'] = False
if password: user['account']['password_hash'] = password # TODO: implement hashing/salting/do this better
if first_name: user['personal_info']['first_name'] = first_name
if last_name: user['personal_info']['last_name'] = last_name
if email: user['personal_info']['email'] = email
# Update database entry
users_collection.replace_one({ "account.id": str(user_id) }, user.copy())
return jsonify(user_utils.get_user(user_id))
return "No such user with id " + str(user_id) + " found!"
| 17,389
|
def process_record(db_conn, cf_number, reclabel, rectext):
""" Process the council file summary information entry; this is a
single field label and value from the HTML content.
:param db_conn: Active database connection
:param cf_number: Council file number, format a zero-padded yy-nnnn
:param reclabel: The HTML field label - the label of the section to be mapped
to the database field.
:param rectext: The value of the field to be added to the database
"""
if reclabel in cf_fields:
db_field = cf_fields[reclabel]
rectext=clean_field_for_database(db_field, rectext)
update_council_file_field(db_conn, cf_number, db_field, rectext)
print('We have label {label} with {text}'.format(label=reclabel, text=rectext))
| 17,390
|
async def test_list_users_without_db_entries(client: AsyncClient, app: FastAPI) -> None:
"""No users should be returned when the database is empty."""
response = await client.get(app.url_path_for("get_users"))
assert response.status_code == 200
users = response.json()
assert not users
| 17,391
|
def install_package(pkg, directory, python_version, pip_args):
"""Downloads wheel for a package. Assumes python binary provided has
pip and wheel package installed.
:param pkg: package name
:param directory: destination directory to download the wheel file in
:param python: python binary path used to run pip command
:param pip_args: extra pip args sent to pip
:returns: path to the wheel file
:rtype: str
"""
pip_args = [
"--isolated",
"--disable-pip-version-check",
"--target",
directory,
"--no-deps",
"--ignore-requires-python",
"--python-version",
python_version,
pkg,
] + pip_args
cmd = InstallCommand()
cmd.run(*cmd.parse_args(pip_args))
# need dist-info directory for pkg_resources to be able to find the packages
dist_info = glob.glob(os.path.join(directory, "*.dist-info"))[0]
# fix namespace packages by adding proper __init__.py files
namespace_packages = os.path.join(dist_info, "namespace_packages.txt")
if os.path.exists(namespace_packages):
with open(namespace_packages) as nspkg:
for line in nspkg.readlines():
namespace = line.strip().replace(".", os.sep)
if namespace:
nspkg_init = os.path.join(directory, namespace, "__init__.py")
with open(nspkg_init, "w") as nspkg:
nspkg.write(
"__path__ = __import__('pkgutil').extend_path(__path__, __name__)"
)
return pkginfo.Wheel(dist_info)
| 17,392
|
def ssh_encrypt_text(ssh_public_key, text):
"""Encrypt text with an ssh public key.
If text is a Unicode string, encode it to UTF-8.
"""
if isinstance(text, six.text_type):
text = text.encode('utf-8')
try:
pub_bytes = ssh_public_key.encode('utf-8')
pub_key = serialization.load_ssh_public_key(
pub_bytes, backends.default_backend())
return pub_key.encrypt(text, padding.PKCS1v15())
except Exception as exc:
raise exception.EncryptionFailure(reason=six.text_type(exc))
| 17,393
|
def add_missing_init_files_from_path(
paths: List[str],
folders_to_ignore: List[str],
source_extensions: List[str],
folder_trees_to_ignore: List[str],
recursive: bool,
) -> bool:
"""
Add missing __init__.py files to the specified root directories and subdirectories that contain at least one python
module (the python module does not have to be in the directory directly, it can be in a subdirectory.
Parameters
----------
paths
List of root path containing the python code.
folders_to_ignore
List of folders paths that will be excluded. The folder path should be relative to the source_tree. Their subdirectories will NOT be excluded.
source_extensions
Files with these extensions will be considered python source code.
folder_trees_to_ignore
List of folders names that will be excluded. Their subdirectories will ALSO be excluded.
recursive
Recursively add missing __init__.py to the subfolders as well.
Returns
-------
bool
True if some init files were added, false otherwise.
"""
init_file_added = False
for path in paths:
if not os.path.exists(path):
exit(f"Cannot find path {path}")
root_directory = path if os.path.isdir(path) else os.path.dirname(path)
if root_directory == "":
root_directory = "."
if add_missing_init_files(
root_directory,
folders_to_ignore,
source_extensions,
folder_trees_to_ignore,
recursive,
):
init_file_added = True
return init_file_added
| 17,394
|
def naildown_entity(entity_class, entity_dict, entity, state, module, check_missing=None):
""" Ensure that a given entity has a certain state """
changed, changed_entity = False, entity
if state == 'present_with_defaults':
if entity is None:
changed, changed_entity = create_entity(entity_class, entity_dict, module)
elif state == 'present':
if entity is None:
changed, changed_entity = create_entity(entity_class, entity_dict, module)
else:
changed, changed_entity = update_entity(entity, entity_dict, module, check_missing)
elif state == 'copied':
new_entity = entity_class(name=entity_dict['new_name'], organization=entity_dict['organization']).search()
if entity is not None and len(new_entity) == 0:
changed, changed_entity = copy_entity(entity, entity_dict, module)
elif len(new_entity) == 1:
changed_entity = new_entity[0]
elif state == 'absent':
if entity is not None:
changed, changed_entity = delete_entity(entity, module)
else:
module.fail_json(msg='Not a valid state: {}'.format(state))
return changed, changed_entity
| 17,395
|
def get_all_admins():
"""
Returns a queryset of all active admin users.
"""
current_admins = User.objects.filter(is_admin=True, is_active=True)
return current_admins
| 17,396
|
def htx_numpy(h, x):
""" Convolution of reversed h with each line of u. Numpy implementation.
Parameters
----------
h : array, shape (n_time_hrf), HRF
x : array, shape (n_samples, n_time), neural activity signals
Return
------
h_conv_x : array, shape (n_samples, n_time_valid), convolved signals
"""
n_samples, _ = x.shape
return np.r_[[np.convolve(h[::-1], x[i], mode='valid')
for i in range(n_samples)]]
| 17,397
|
def test_start_xql_query_valid(mocker):
"""
Given:
- A valid query to search.
When:
- Calling start_xql_query function.
Then:
- Ensure the returned execution_id is correct.
"""
args = {
'query': 'test_query',
'time_frame': '1 year'
}
mocker.patch.object(CLIENT, 'start_xql_query', return_value='execution_id')
response = XQLQueryingEngine.start_xql_query(CLIENT, args=args)
assert response == 'execution_id'
| 17,398
|
async def test_loop():
"""Test loop usage is handled correctly."""
async with Sonarr(HOST, API_KEY) as sonarr:
assert isinstance(sonarr, Sonarr)
| 17,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.