content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def p_ObjectLiteral(p):
"""ObjectLiteral : '{' PropertyNameAndValueList '}'
| '{' '}'"""
p[0] = "ObjectLiteral"
p[0] = list(p)
| 5,339,500
|
def build_docs_for_packages(
current_packages: List[str],
docs_only: bool,
spellcheck_only: bool,
for_production: bool,
jobs: int,
verbose: bool,
) -> Tuple[Dict[str, List[DocBuildError]], Dict[str, List[SpellingError]]]:
"""Builds documentation for all packages and combines errors."""
all_build_errors: Dict[str, List[DocBuildError]] = defaultdict(list)
all_spelling_errors: Dict[str, List[SpellingError]] = defaultdict(list)
with with_group("Cleaning documentation files"):
for package_name in current_packages:
console.print(f"[info]{package_name:60}:[/] Cleaning files")
builder = AirflowDocsBuilder(package_name=package_name, for_production=for_production)
builder.clean_files()
if jobs > 1:
run_in_parallel(
all_build_errors,
all_spelling_errors,
current_packages,
docs_only,
for_production,
jobs,
spellcheck_only,
verbose,
)
else:
run_sequentially(
all_build_errors,
all_spelling_errors,
current_packages,
docs_only,
for_production,
spellcheck_only,
verbose,
)
return all_build_errors, all_spelling_errors
| 5,339,501
|
def GetWorkingInTime():
""" Get the start frame of the working range.
The in time is guaranteed to be less than or equal to the out time.
@rtype: C{number}
"""
pass
| 5,339,502
|
def _inspect_output_dirs_test(ctx):
"""Test verifying output directories used by a test."""
env = analysistest.begin(ctx)
# Assert that the output bin dir observed by the aspect added by analysistest
# is the same as those observed by the rule directly, even when that's
# under a config transition and therefore not the same as the bin dir
# used by the test rule.
bin_path = analysistest.target_bin_dir_path(env)
target_under_test = analysistest.target_under_test(env)
asserts.false(env, not bin_path, "bin dir path not found.")
asserts.false(
env,
bin_path == ctx.bin_dir.path,
"bin dir path expected to differ between test and target_under_test.",
)
asserts.equals(env, bin_path, target_under_test[_OutputDirInfo].bin_path)
return analysistest.end(env)
| 5,339,503
|
def wait(object_refs, num_returns=1, timeout=None):
"""Return a list of IDs that are ready and a list of IDs that are not.
This method is identical to `ray.wait` except it adds support for tuples
and ndarrays.
Args:
object_refs (List[ObjectRef], Tuple(ObjectRef), np.array(ObjectRef)):
List like of object refs for objects that may or may not be ready.
Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
if isinstance(object_refs, (tuple, np.ndarray)):
return ray.wait(
list(object_refs), num_returns=num_returns, timeout=timeout)
return ray.wait(object_refs, num_returns=num_returns, timeout=timeout)
| 5,339,504
|
def test_nameserver_agents(nsproxy):
"""
Test the agents() method, which should return a list with the names of
the registered agents.
"""
# No agents registered
agents = nsproxy.agents()
assert len(agents) == 0
# One agent registered
run_agent('Agent0')
agents = nsproxy.agents()
assert len(agents) == 1
# Two agents registered
run_agent('Agent1')
agents = nsproxy.agents()
assert len(agents) == 2
assert 'Agent0' in agents
assert 'Agent1' in agents
| 5,339,505
|
def test_line_to_cols():
"""Test convert.line_to_cols"""
line = ["ID", "RA", "dec", "test1", "test2"]
actual_cols = convert.line_to_cols(line)
expected_cols = line
expected_cols[0] = "id"
expected_cols[1] = "ra"
assert expected_cols == actual_cols
| 5,339,506
|
def train(
data,
feature_names,
tagset,
epochs,
optimizer,
score_func=perceptron_score,
step_size=1,
):
"""
Trains the model on the data and returns the parameters
:param data: Array of dictionaries representing the data. One dictionary for each data point (as created by the
make_data_point function).
:param feature_names: Array of Strings. The list of feature names.
:param tagset: Array of Strings. The list of tags.
:param epochs: Int. The number of epochs to train
:return: FeatureVector. The learned parameters.
"""
parameters = FeatureVector({}) # creates a zero vector
gradient = get_gradient(
data, feature_names, tagset, parameters, score_func
)
def training_observer(epoch, parameters):
"""
Evaluates the parameters on the development data, and writes out the parameters to a 'model.iter'+epoch and
the predictions to 'ner.dev.out'+epoch.
:param epoch: int. The epoch
:param parameters: Feature Vector. The current parameters
:return: Double. F1 on the development data
"""
(_, _, f1) = evaluate(
dev_data, parameters, feature_names, tagset, score_func
)
return f1
# return the final parameters
return optimizer(
sample_num,
epochs,
gradient,
parameters,
training_observer,
step_size=step_size,
)
| 5,339,507
|
def summed_timeseries(timeseries):
"""
Give sum of value series against timestamps for given timeseries containing several values per one timestamp
:param timeseries:
:return:
"""
sum_timeseries = []
for i in range(len(timeseries)):
if len(timeseries[i])>1:
sum_timeseries.append([timeseries[i][0], '%.3f' % (sum(timeseries[i][1:]))])
return sum_timeseries
| 5,339,508
|
def path_check(path_to_check):
"""
Check that the path given as a parameter is an valid absolute path.
:param path_to_check: string which as to be checked
:type path_to_check: str
:return: True if it is a valid absolute path, False otherwise
:rtype: boolean
"""
path = pathlib.Path(path_to_check)
if not path.is_absolute():
return False
return True
| 5,339,509
|
def _path2list(
path: Union[str, Sequence[str]],
boto3_session: boto3.Session,
s3_additional_kwargs: Optional[Dict[str, Any]],
last_modified_begin: Optional[datetime.datetime] = None,
last_modified_end: Optional[datetime.datetime] = None,
suffix: Union[str, List[str], None] = None,
ignore_suffix: Union[str, List[str], None] = None,
ignore_empty: bool = False,
) -> List[str]:
"""Convert Amazon S3 path to list of objects."""
_suffix: Optional[List[str]] = [suffix] if isinstance(suffix, str) else suffix
_ignore_suffix: Optional[List[str]] = [ignore_suffix] if isinstance(ignore_suffix, str) else ignore_suffix
if isinstance(path, str): # prefix
paths: List[str] = list_objects(
path=path,
suffix=_suffix,
ignore_suffix=_ignore_suffix,
boto3_session=boto3_session,
last_modified_begin=last_modified_begin,
last_modified_end=last_modified_end,
ignore_empty=ignore_empty,
s3_additional_kwargs=s3_additional_kwargs,
)
elif isinstance(path, list):
if last_modified_begin or last_modified_end:
raise exceptions.InvalidArgumentCombination(
"Specify a list of files or (last_modified_begin and last_modified_end)"
)
paths = path if _suffix is None else [x for x in path if x.endswith(tuple(_suffix))]
paths = path if _ignore_suffix is None else [x for x in paths if x.endswith(tuple(_ignore_suffix)) is False]
else:
raise exceptions.InvalidArgumentType(f"{type(path)} is not a valid path type. Please, use str or List[str].")
return paths
| 5,339,510
|
def e_greedy_normal_noise(mags, e):
"""Epsilon-greedy noise
If e>0 then with probability(adding noise) = e, multiply mags by a normally-distributed
noise.
:param mags: input magnitude tensor
:param e: epsilon (real scalar s.t. 0 <= e <=1)
:return: noise-multiplier.
"""
if e and uniform(0, 1) <= e:
# msglogger.info("%sRankedStructureParameterPruner - param: %s - randomly choosing channels",
# threshold_type, param_name)
return torch.randn_like(mags)
return 1
| 5,339,511
|
def beam_area(*args):
"""
Calculate the Gaussian beam area.
Parameters
----------
args: float
FWHM of the beam.
If args is a single argument, a symmetrical beam is assumed.
If args has two arguments, the two arguments are bmaj and bmin,
the width of the major and minor axes of the beam in that order.
Return
------
out: float
Beam area. No unit conversion is performed, i.e. the unit will depend
on the input arguments. For example, beam width in degree wil return
the beam area in square degree. Likewise, beam width in pixel will
return the beam area in pixel.
"""
if len(args) > 2:
raise ValueError('Input argument must be a single beam width for a '
'symmetrical beam, or widths of the major and minor '
'axes of the beam.')
if len(args) == 2:
bmaj, bmin = args
else:
bmaj = args[0]
bmin = bmaj
return np.pi * bmaj * bmin / (4 * np.log(2))
| 5,339,512
|
def enerpi_daemon_logger(with_pitemps=False):
"""
Punto de entrada directa a ENERPI Logger con la configuración de DATA_PATH/config_enerpi.ini.
Se utiliza para iniciar ENERPI como daemon mediante 'enerpi-daemon start|stop|restart'
(en conjunción con enerpiweb, con user www-data: 'sudo -u www-data %(path_env_bin)/enerpi-daemon start')
:param with_pitemps: :bool: Logs RPI temperature every 3 seconds
"""
sleep(2)
set_logging_conf(FILE_LOGGING, LOGGING_LEVEL, with_initial_log=False)
timer_temps = show_pi_temperature(with_pitemps, 3)
enerpi_logger(path_st=HDF_STORE_PATH, is_demo=False, timeout=None, verbose=False,
delta_sampling=SENSORS.delta_sec_data, roll_time=SENSORS.rms_roll_window_sec,
sampling_ms=SENSORS.ts_data_ms)
if timer_temps is not None:
log('Stopping RPI TEMPS sensing desde enerpi_main_logger...', 'debug', False, True)
timer_temps.cancel()
| 5,339,513
|
def get_settlement_amounts(
participant1,
participant2
):
""" Settlement algorithm
Calculates the token amounts to be transferred to the channel participants when
a channel is settled.
!!! Don't change this unless you really know what you are doing.
"""
total_available_deposit = (
participant1.deposit +
participant2.deposit -
participant1.withdrawn -
participant2.withdrawn
)
participant1_amount = (
participant1.deposit +
participant2.transferred -
participant1.withdrawn -
participant1.transferred
)
participant1_amount = max(participant1_amount, 0)
participant1_amount = min(participant1_amount, total_available_deposit)
participant2_amount = total_available_deposit - participant1_amount
participant1_locked = min(participant1_amount, participant1.locked)
participant2_locked = min(participant2_amount, participant2.locked)
participant1_amount = max(participant1_amount - participant1.locked, 0)
participant2_amount = max(participant2_amount - participant2.locked, 0)
assert total_available_deposit == (
participant1_amount +
participant2_amount +
participant1_locked +
participant2_locked
)
return SettlementValues(
participant1_balance=participant1_amount,
participant2_balance=participant2_amount,
participant1_locked=participant1_locked,
participant2_locked=participant2_locked,
)
| 5,339,514
|
def cli_parse_clippings():
"""解析clippings文本(对于此模块的功能还没有明确)
"""
question = [
inquirer.Path("file_path", path_type=inquirer.Path.FILE, exists=True,
message="Kindle Clippings文件路径"),
]
answer = inquirer.prompt(question, theme=GreenPassion())
# 获取到clipping文件路径之后,开始进行处理
for line in ParseFile().parse_file(answer['file_path']):
print(line)
print("")
| 5,339,515
|
def test_attribute_passing_local(sb):
"""Should succeed everywhere as attribute passing needs no dependencies."""
sb.local = True
sb.test()
| 5,339,516
|
def print_ranked_scores(obs, scores):
"""Returns numpy array with data points labelled as outliers
Parameters
----------
data:
no_of_clusters: numpy array like data_point
score: numpy like data
"""
| 5,339,517
|
def test_url_req_case_mismatch_file_index(script, data):
"""
tar ball url requirements (with no egg fragment), that happen to have upper
case project names, should be considered equal to later requirements that
reference the project name using lower case.
tests/data/packages3 contains Dinner-1.0.tar.gz and Dinner-2.0.tar.gz
'requiredinner' has install_requires = ['dinner']
This test is similar to test_url_req_case_mismatch_no_index; that test
tests behaviour when using "--no-index -f", while this one does the same
test when using "--index-url". Unfortunately this requires a different
set of packages as it requires a prepared index.html file and
subdirectory-per-package structure.
"""
Dinner = '/'.join((data.find_links3, 'dinner', 'Dinner-1.0.tar.gz'))
result = script.pip(
'install', '--index-url', data.find_links3, Dinner, 'requiredinner'
)
# only Upper-1.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
| 5,339,518
|
def timefunc(f):
"""Simple timer function to identify slow spots in algorithm.
Just import function and put decorator @timefunc on top of definition of any
function that you want to time.
"""
def f_timer(*args, **kwargs):
start = time.time()
result = f(*args, **kwargs)
end = time.time()
print(f.__name__, 'took {:.2f} seconds'.format(end - start))
return result, (end - start)
return f_timer
| 5,339,519
|
def rdr_geobox(rdr) -> GeoBox:
""" Construct GeoBox from opened dataset reader.
"""
h, w = rdr.shape
return GeoBox(w, h, rdr.transform, rdr.crs)
| 5,339,520
|
def sequence(lst: Block[Result[_TSource, _TError]]) -> Result[Block[_TSource], _TError]:
"""Execute a sequence of result returning commands and collect the
sequence of their response."""
return traverse(identity, lst)
| 5,339,521
|
def keyword_dct_from_block(block, formatvals=True):
""" Take a section with keywords defined and build
a dictionary for the keywords
assumes a block that is a list of key-val pairs
"""
key_dct = None
if block is not None:
block = ioformat.remove_whitespace(block)
key_val_blocks = keyword_value_blocks(block)
if key_val_blocks is not None:
key_dct = {}
for key, val in key_val_blocks:
if formatvals:
formtd_key, formtd_val = format_keyword_values(key, val)
key_dct[formtd_key] = formtd_val
else:
key_dct[key] = val
return key_dct
| 5,339,522
|
def test_resolutions2ints_lists(resolutions, results):
"""Test transformation of resolutions to integer resolutions."""
assert results == utils.resolutions2ints(resolutions)
| 5,339,523
|
def get_geocode(args):
"""
Returns GPS coordinates from Google Maps for a given location.
"""
result = Geocoder.geocode(args.address)
lat, lon = result[0].coordinates
lat = round(lat, 6)
lon = round(lon, 6)
return (lat, lon)
| 5,339,524
|
def test_gui(mock_open_gui, check_no_board_connected):
"""Test the gui command."""
runner = CliRunner()
result = runner.invoke(cli.gui)
assert result.exit_code == 0, "Exit code 0"
assert mock_open_gui.call_count == 1, "open_gui() function called"
| 5,339,525
|
def mice(data, **kwargs):
"""Multivariate Imputation by Chained Equations
Reference:
Buuren, S. V., & Groothuis-Oudshoorn, K. (2011). Mice: Multivariate
Imputation by Chained Equations in R. Journal of Statistical Software,
45(3). doi:10.18637/jss.v045.i03
Implementation follows the main idea from the paper above. Differs in
decision of which variable to regress on (here, I choose it at random).
Also differs in stopping criterion (here the model stops after change in
prediction from previous prediction is less than 10%).
Parameters
----------
data: numpy.ndarray
Data to impute.
Returns
-------
numpy.ndarray
Imputed data.
"""
null_xy = find_null(data)
# Add a column of zeros to the index values
null_xyv = np.append(null_xy, np.zeros((np.shape(null_xy)[0], 1)), axis=1)
null_xyv = [[int(x), int(y), v] for x, y, v in null_xyv]
temp = []
cols_missing = set([y for _, y, _ in null_xyv])
# Step 1: Simple Imputation, these are just placeholders
for x_i, y_i, value in null_xyv:
# Column containing nan value without the nan value
col = data[:, [y_i]][~np.isnan(data[:, [y_i]])]
new_value = np.mean(col)
data[x_i][y_i] = new_value
temp.append([x_i, y_i, new_value])
null_xyv = temp
# Step 5: Repeat step 2 - 4 until convergence (the 100 is arbitrary)
converged = [False] * len(null_xyv)
while all(converged):
# Step 2: Placeholders are set back to missing for one variable/column
dependent_col = int(np.random.choice(list(cols_missing)))
missing_xs = [int(x) for x, y, value in null_xyv if y == dependent_col]
# Step 3: Perform linear regression using the other variables
x_train, y_train = [], []
for x_i in (x_i for x_i in range(len(data)) if x_i not in missing_xs):
x_train.append(np.delete(data[x_i], dependent_col))
y_train.append(data[x_i][dependent_col])
model = LinearRegression()
model.fit(x_train, y_train)
# Step 4: Missing values for the missing variable/column are replaced
# with predictions from our new linear regression model
temp = []
# For null indices with the dependent column that was randomly chosen
for i, x_i, y_i, value in enumerate(null_xyv):
if y_i == dependent_col:
# Row 'x' without the nan value
new_value = model.predict(np.delete(data[x_i], dependent_col))
data[x_i][y_i] = new_value.reshape(1, -1)
temp.append([x_i, y_i, new_value])
delta = (new_value-value)/value
if delta < 0.1:
converged[i] = True
null_xyv = temp
return data
| 5,339,526
|
def SaveFlagValues():
"""Returns copy of flag values as a dict.
Returns:
Dictionary mapping keys to values. Keys are flag names, values are
corresponding __dict__ members. E.g. {'key': value_dict, ...}.
"""
if hasattr(flags, '_FlagValues'): # pylint:disable=protected-access
# In OSS code we use tensorflow/python/platform/flags.py:_FlagValues
# which is not iterable.
flag_dict = FLAGS.__dict__['__flags']
# Make a shallow copy of the flags.
return {name: flag_dict[name] for name in flag_dict}
else:
# FLAGS is iterable and provides __getitem__.
return {name: _CopyFlagDict(FLAGS[name]) for name in FLAGS}
| 5,339,527
|
def _decompile_marketplace_bp(mpi_name, version, project, name, source, with_secrets):
"""Decompiles marketplace manager blueprint"""
decompile_marketplace_bp(
name=mpi_name,
version=version,
project=project,
bp_name=name,
app_source=None,
with_secrets=with_secrets,
)
| 5,339,528
|
def PBH_RULE_update_field_set(
db,
table_name,
rule_name,
priority,
gre_key,
ether_type,
ip_protocol,
ipv6_next_header,
l4_dst_port,
inner_ether_type,
hash,
packet_action,
flow_counter
):
""" Set object field in PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
data = {}
if priority is not None:
data[PBH_RULE_PRIORITY] = priority
if gre_key is not None:
data[PBH_RULE_GRE_KEY] = gre_key
if ether_type is not None:
data[PBH_RULE_ETHER_TYPE] = ether_type
if ip_protocol is not None:
data[PBH_RULE_IP_PROTOCOL] = ip_protocol
if ipv6_next_header is not None:
data[PBH_RULE_IPV6_NEXT_HEADER] = ipv6_next_header
if l4_dst_port is not None:
data[PBH_RULE_L4_DST_PORT] = l4_dst_port
if inner_ether_type is not None:
data[PBH_RULE_INNER_ETHER_TYPE] = inner_ether_type
if hash is not None:
hash_validator(ctx, db.cfgdb_pipe, hash)
data[PBH_RULE_HASH] = hash
if packet_action is not None:
data[PBH_RULE_PACKET_ACTION] = packet_action
if flow_counter is not None:
data[PBH_RULE_FLOW_COUNTER] = flow_counter
if not data:
exit_with_error("Error: Failed to update PBH rule: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_RULE_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH rule capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
if data.get(PBH_RULE_FLOW_COUNTER, "") == "DISABLED":
update_pbh_counters(table_name, rule_name)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
| 5,339,529
|
def estimate_purity_err(dim: int, op_expect: np.ndarray, op_expect_var: np.ndarray, renorm=True):
"""
Propagate the observed variance in operator expectation to an error estimate on the purity.
This assumes that each operator expectation is independent.
:param dim: dimension of the Hilbert space
:param op_expect: array of estimated expectations of each operator being measured
:param op_expect_var: array of estimated variance for each operator expectation
:param renorm: flag that provides error for the renormalized purity
:return: purity given the operator expectations
"""
# TODO: incorporate covariance of observables estimated simultaneously;
# see covariances_of_all_iz_obs
# TODO: check validitiy of approximation |op_expect| >> 0, and functional form below (squared?)
var_of_square_op_expect = (2 * np.abs(op_expect)) ** 2 * op_expect_var
# TODO: check if this adequately handles |op_expect| >\> 0
need_second_order = np.isclose([0.] * len(var_of_square_op_expect), var_of_square_op_expect,
atol=1e-6)
var_of_square_op_expect[need_second_order] = op_expect_var[need_second_order]**2
purity_var = (1 / dim) ** 2 * (np.sum(var_of_square_op_expect))
if renorm:
purity_var = (dim / (dim - 1.0)) ** 2 * purity_var
return np.sqrt(purity_var)
| 5,339,530
|
def test_linked_list_can_remove_value():
"""Test value can be removed."""
from linked_list import LinkedList
with pytest.raises(ValueError):
l = LinkedList()
for i in range(10):
l.push(i)
l.remove(l.search(6))
| 5,339,531
|
def get_next_code(seen, server_ticket=0):
"""Find next unused assertion code.
Called by: SConstruct and main()
Since SConstruct calls us, codes[] must be global OR WE REPARSE EVERYTHING
"""
if not codes:
(_, _, seen) = read_error_codes()
if server_ticket:
# Each SERVER ticket is allocated 100 error codes ranging from TICKET_00 -> TICKET_99.
def generator(seen, ticket):
avail_codes = list(range(ticket * 100, (ticket + 1) * 100))
avail_codes.reverse()
while avail_codes:
code = avail_codes.pop()
if str(code) in seen:
continue
yield code
return "No more available codes for ticket. Ticket: {}".format(ticket)
return generator(seen, server_ticket)
# No server ticket. Return a generator that counts starting at highest + 1.
highest = reduce(lambda x, y: max(int(x), int(y)), (loc.code for loc in codes))
return iter(range(highest + 1, MAXIMUM_CODE))
| 5,339,532
|
async def test_fixture_env_home_uses_default(env_home):
"""Provide xdg home directories if not set in the environemnt."""
unset_env(env_home)
config = await get_xdg_home()
home = await Path.home()
assert str(config['XDG_CONFIG_HOME']) == str(home / '.config')
assert str(config['XDG_CACHE_HOME']) == str(home / '.cache')
assert str(config['XDG_DATA_HOME']) == str(home / '.local' / 'share')
assert str(config['XDG_RUNTIME_DIR']) == str(home / '.local' / 'run')
# ... log warning if no xdg_runtime pylint : disable = W0511 ...
| 5,339,533
|
def getEnergyUsage():
"""Query plug for energy usage data. Runs as async task.
:return: json with device energy data
"""
energy_data = asyncio.run(plug.get_emeter_realtime())
return energy_data
| 5,339,534
|
def collect_metrics(logger, settings, encrypt_key, collectors):
"""
This function fetch, encrypted and persist metrics data for each collector in the collector list. It will first
fetch and encrypt for each collector. Then, the data is persist to either local or remote database depending on
the configured value
:param logger: the logger instance for writing the state of the software
:param settings: dictionary of configured values
:param encrypt_key: public key use in the encryption
:param collectors: list of collectors
:return:
"""
try:
logger.info("Begin fetching metrics data from other collects")
print("Start fetching metrics data")
for c in collectors:
c.fetch_metrics()
print("End fetching metrics data")
logger.info("End fetching metrics data")
logger.info("Begin persisting fetched metrics")
logger.debug("To store metrics on local: " + str(settings["to_store_local"]))
# Store a copy of metrics data if the user want them.
if settings["to_store_local"]:
print("Start storing metrics data to local data directory")
persist_local(logger, settings.get("root_dir") + settings["local_store_dir"], collectors)
print("End storing metrics data to local data directory")
print("Start storing metrics data to remote database")
persist_database(logger, settings, encrypt_key, collectors)
print("End storing metrics data to remote database")
logger.info("Finish persisting fetched metrics")
reset_collectors(logger, collectors, settings["to_store_local"])
except AccessDenied as ad:
logger.error("Access denied for fetch data from psutil library")
logger.error(ad)
except FileNotFoundError as fnfe:
logger.error("Dependent file not found.")
logger.error(fnfe)
except Exception as e:
logger.error(e.args[0])
| 5,339,535
|
def get_sequence_from_kp(midi):
"""
Get the reduced chord sequence from a kp KP-corpus file.
Parameters
==========
midi : pretty_midi
A pretty_midi object representing the piece to parse.
Returns
=======
chords : list
The reduced chord sequence from the given piece.
times : list
The time of each chord in chords.
"""
def convert_chord_kp(chord):
"""
Convert the given chord from a string (read from the KP-corpus), to a tonic and quality.
Parameters
==========
chord : string
A string representation of a chord.
Returns
=======
tonic : int
The tonic of the chord, where 0 represents C. A chord with no tonic returns None here.
quality : int
The quality of chord, where 0 is major, 1 is minor, and 2 is diminished. Others are None.
"""
global tonic_map, accidental_map
chord = chord.split('_')
tonic = tonic_map[chord[0][0]]
if tonic is not None:
for accidental in chord[0][1:]:
tonic += accidental_map[accidental]
tonic %= 12
quality = quality_map[chord[1]]
return tonic, quality
return get_reduced_chord_sequence([convert_chord_kp(lyric.text) for lyric in midi.lyrics],
[lyric.time for lyric in midi.lyrics])
| 5,339,536
|
def step_payload():
"""
We define the type of payload we wish to send and create the final exploit file.
"""
global current_step
current_step = 5
show_step_banner('[5] Creating payload')
# Set IP -----------------
global connect_ip
show_prompt_text('Enter your IP (hit Enter to use current value {}):'.format(connect_ip))
user_input = get_input(ip_valid)
if user_input != '':
connect_ip = user_input
# Set port -----------------
global connect_port
show_prompt_text('Enter the port to listen on (hit Enter to use current value {}):'.format(connect_port))
user_input = get_input(port_valid)
if user_input != '':
connect_port = user_input
# Set architecture -----------------
global arch
show_prompt_text('Enter the target architecture (hit Enter to use current value {}):'.format(arch))
user_input = get_input(arch_valid)
if user_input != '':
arch = 'x' + user_input
# Set platform -----------------
global platform
show_prompt_text('Enter the target platform (hit Enter to use current value {}):'.format(platform))
user_input = get_input(platform_valid)
if user_input != '':
platform = user_input
# Set payload -----------------
global payload
while True:
show_prompt_text('Enter payload type'.format(payload))
show_prompt_text('Show all available with {}show payloads{}'.format(BOLD, FORMAT_END))
user_input = get_input(payload_valid)
if user_input == 'show payloads':
show_payloads()
continue
else:
# Create payload -----------------
payload = user_input
payload_ok = create_payload()
if payload_ok and bo_type == 'local':
dump_local_exploit()
elif payload_ok and bo_type == 'remote':
update_remote_exploit()
run_remote_exploit()
show_prompt_text('Did your exploit work? If not, try sending a different payload.')
show_prompt_text(
'Enter {}again{} to try again. Hit Enter if everything worked fine.'.format(BOLD, FORMAT_END))
user_input = get_input(check_text)
if user_input == '':
break
else:
continue
# Finally show prompt till user exits
get_input(generic_check)
| 5,339,537
|
def get_bw_range(features):
"""
Get the rule-of-thumb bandwidth and a range of bandwidths on a log scale for the Gaussian RBF kernel.
:param features: Features to use to obtain the bandwidths.
:return: Tuple consisting of:
* rule_of_thumb_bw: Computed rule-of-thumb bandwidth.
* bws: List of bandwidths on a log scale.
"""
dists = sklearn.metrics.pairwise.pairwise_distances(features).reshape(-1)
rule_of_thumb_bw = np.median(dists)
gammas = np.logspace(np.log(0.5/np.percentile(dists, 99)**2), np.log(0.5/np.percentile(dists, 1)**2), 10, base=np.e)
bws = np.sqrt(1/(2*gammas))
return rule_of_thumb_bw, bws
| 5,339,538
|
def _register_jsonschema(js_schema, model_id):
"""
Makes a jsonschema known to this viz
This was added to benefit ONAP/DCAE and is lightly tested at best.
TODO: Does not inject APV keys into messages.
"""
# import here to avoid circular dependency
from acumos_proto_viewer import data
_logger.info("_register_jsonschema: registering for model_id {0}".format(model_id))
data.jsonschema_data_structure[model_id] = {}
_inject_apv_keys_into_schema(js_schema["properties"])
data.jsonschema_data_structure[model_id]["json_schema"] = js_schema
| 5,339,539
|
def gen_endpoint(endpoint_name, endpoint_config_name):
"""
Generate the endpoint resource
"""
endpoint = {
"SagemakerEndpoint": {
"Type": "AWS::SageMaker::Endpoint",
"DependsOn": "SagemakerEndpointConfig",
"Properties": {
"EndpointConfigName": {
"Fn::GetAtt": ["SagemakerEndpointConfig", "EndpointConfigName"]
},
"EndpointName": endpoint_name,
"RetainAllVariantProperties": False,
},
},
}
return endpoint
| 5,339,540
|
def load_config():
""" Load configuration and set debug flag for this environment """
# Load global configuration
config = yaml.load(open(os.path.abspath('./conf/global.yaml'), 'r').read())
# Detect development or production environment and configure accordingly
if os.environ['SERVER_SOFTWARE'].startswith('Dev'):
conf_f = open(os.path.abspath('./conf/development.yaml'), 'r')
config = dict(config.items() + yaml.load(conf_f.read()).items())
else:
conf_f = open(os.path.abspath('./conf/production.yaml'), 'r')
config = dict(config.items() + yaml.load(conf_f.read()).items())
return config
| 5,339,541
|
def read_dataframe(df, smiles_column, name_column, data_columns=None):
"""Read molecules from a dataframe.
Parameters
----------
df : pandas.DataFrame
Dataframe to read molecules from.
smiles_column : str
Key of column containing SMILES strings or rdkit Mol objects.
name_column : str
Key of column containing molecule name strings.
data_columns : list, optional
A list of column keys containg data to retain
in molecule graph nodes. The default is None.
Returns
-------
DataFrameMolSupplier
"""
return DataFrameMolSupplier(df, smiles_column, name_column, data_columns)
| 5,339,542
|
def dummy_awsbatch_cluster_config(mocker):
"""Generate dummy cluster."""
image = Image(os="alinux2")
head_node = dummy_head_node(mocker)
compute_resources = [
AwsBatchComputeResource(name="dummy_compute_resource1", instance_types=["dummyc5.xlarge", "optimal"])
]
queue_networking = AwsBatchQueueNetworking(subnet_ids=["dummy-subnet-1"], security_groups=["sg-1", "sg-2"])
queues = [AwsBatchQueue(name="queue1", networking=queue_networking, compute_resources=compute_resources)]
scheduling = AwsBatchScheduling(queues=queues)
# shared storage
shared_storage: List[Resource] = []
shared_storage.append(dummy_fsx())
shared_storage.append(dummy_ebs("/ebs1"))
shared_storage.append(dummy_ebs("/ebs2", volume_id="vol-abc"))
shared_storage.append(dummy_ebs("/ebs3", raid=Raid(raid_type=1, number_of_volumes=5)))
shared_storage.append(dummy_efs("/efs1", file_system_id="fs-efs-1"))
shared_storage.append(dummy_raid("/raid1"))
cluster = _DummyAwsBatchClusterConfig(
image=image, head_node=head_node, scheduling=scheduling, shared_storage=shared_storage
)
cluster.custom_s3_bucket = "s3://dummy-s3-bucket"
cluster.additional_resources = "https://additional.template.url"
cluster.config_version = "1.0"
cluster.iam = ClusterIam()
cluster.tags = [Tag(key="test", value="testvalue")]
return cluster
| 5,339,543
|
def float_or_none(val, default=None):
"""
Arguments:
- `x`:
"""
if val is None:
return default
else:
try:
ret = float(val)
except ValueError:
ret = default
return ret
| 5,339,544
|
def user_get_year_rating(user_id: int):
"""
Get the last step user was at
:param user_id:
:return: str
"""
try:
con = psconnect(db_url, sslmode='require')
cursor = con.cursor()
cursor.execute("SELECT year,rating FROM users WHERE uid = %s", (user_id,))
result = cursor.fetchone()
con.close()
return result
except psError as e:
print(e)
| 5,339,545
|
def msg_with_data(config, filter_):
"""Creates :py:class:`pymco.message.Message` instance with some data."""
# Importing here since py-cov will ignore code imported on conftest files
# imports
from pymco import message
with mock.patch('time.time') as time:
with mock.patch('hashlib.sha1') as sha1:
time.return_value = ctxt.MSG['msgtime']
sha1.return_value.hexdigest.return_value = ctxt.MSG['requestid']
body = {
':action': 'runonce',
':data': {':noop': True, ':process_results': True},
':ssl_msgtime': 1421878604,
':ssl_ttl': 60,
}
return message.Message(body=body,
agent='puppet',
filter_=filter_,
config=config)
| 5,339,546
|
def csrf_protect(remainder, params):
"""
Perform CSRF protection checks. Performs checks to determine if submitted
form data matches the token in the cookie. It is assumed that the GET
request handler successfully set the token for the request and that the
form was instrumented with a CSRF token field. Use the
:py:func:`~csrf_token` decorator to do this.
Generally, the handler does not need to do anything
CSRF-protection-specific. All it needs is the decorator::
@csrf_protect
@expose()
def protected_post_handler():
return 'OK!'
"""
secret, token_name, path, expires, handler = _get_conf()
cookie_token = tg.request.signed_cookie(token_name, secret=secret.decode('ascii'))
if not cookie_token:
handler('csrf cookie not present')
form_token = tg.request.args_params.get(token_name)
if not form_token:
handler('csrf input not present')
if form_token != cookie_token:
tg.response.delete_cookie(token_name, path=path)
handler('cookie and input mismatch')
_validate_csrf(form_token)
| 5,339,547
|
def get_image_names():
"""
Returns (image_names, covid_image_names, normal_image_names,
virus_image_names), where each is a list of image names
"""
image_names = os.listdir(DEFAULT_IMG_PATH_UNEDITED)
# Remove directories
image_names.remove("COVID-19")
image_names.remove("Normal")
image_names.remove("ViralPneumonia")
covid_image_names = os.listdir(COVID_IMG_PATH_UNEDITED)
normal_image_names = os.listdir(NORMAL_IMG_PATH_UNEDITED)
virus_image_names = os.listdir(VIRUS_IMG_PATH_UNEDITED)
return image_names, covid_image_names, normal_image_names, virus_image_names
| 5,339,548
|
def api_connect_wifi():
""" Connect to the specified wifi network """
res = network.wifi_connect()
return jsonify(res)
| 5,339,549
|
async def get_sinks_metadata(sinkId: str) -> List: # pylint: disable=unused-argument
"""Get metadata attached to sinks
This adapter does not implement metadata. Therefore this will always result
in an empty list!
"""
return []
| 5,339,550
|
def fn_lin(x_np, *, multiplier=3.1416):
""" Linear function """
return x_np * multiplier
| 5,339,551
|
def task1(outfile, extra):
"""
First task
"""
# N.B. originate works with an extra parameter
helper(None, outfile)
| 5,339,552
|
def get_MACD(df, column='Close'):
"""Function to get the EMA of 12 and 26"""
df['EMA-12'] = df[column].ewm(span=12, adjust=False).mean()
df['EMA-26'] = df[column].ewm(span=26, adjust=False).mean()
df['MACD'] = df['EMA-12'] - df['EMA-26']
df['Signal'] = df['MACD'].ewm(span=9, adjust=False).mean()
df['Histogram'] = df['MACD'] - df['Signal']
return df
| 5,339,553
|
def from_pyGraphviz_agraph(A, create_using=None):
"""Returns a EasyGraph Graph or DiGraph from a PyGraphviz graph.
Parameters
----------
A : PyGraphviz AGraph
A graph created with PyGraphviz
create_using : EasyGraph graph constructor, optional (default=None)
Graph type to create. If graph instance, then cleared before populated.
If `None`, then the appropriate Graph type is inferred from `A`.
Examples
--------
>>> K5 = eg.complete_graph(5)
>>> A = eg.to_pyGraphviz_agraph(K5)
>>> G = eg.from_pyGraphviz_agraph(A)
Notes
-----
The Graph G will have a dictionary G.graph_attr containing
the default graphviz attributes for graphs, nodes and edges.
Default node attributes will be in the dictionary G.node_attr
which is keyed by node.
Edge attributes will be returned as edge data in G. With
edge_attr=False the edge data will be the Graphviz edge weight
attribute or the value 1 if no edge weight attribute is found.
"""
if create_using is None:
if A.is_directed():
if A.is_strict():
create_using = eg.DiGraph
else:
create_using = eg.MultiDiGraph
else:
if A.is_strict():
create_using = eg.Graph
else:
create_using = eg.MultiGraph
# assign defaults
N = eg.empty_graph(0, create_using)
if A.name is not None:
N.name = A.name
# add graph attributes
N.graph.update(A.graph_attr)
# add nodes, attributes to N.node_attr
for n in A.nodes():
str_attr = {str(k): v for k, v in n.attr.items()}
N.add_node(str(n), **str_attr)
# add edges, assign edge data as dictionary of attributes
for e in A.edges():
u, v = str(e[0]), str(e[1])
attr = dict(e.attr)
str_attr = {str(k): v for k, v in attr.items()}
if not N.is_multigraph():
if e.name is not None:
str_attr["key"] = e.name
N.add_edge(u, v, **str_attr)
else:
N.add_edge(u, v, key=e.name, **str_attr)
# add default attributes for graph, nodes, and edges
# hang them on N.graph_attr
N.graph["graph"] = dict(A.graph_attr)
N.graph["node"] = dict(A.node_attr)
N.graph["edge"] = dict(A.edge_attr)
return N
| 5,339,554
|
def get_textbox_rectangle_from_pane(pane_rectangle: GeometricRectangle, texts: Collection[str],
direction: str) -> GeometricRectangle:
"""
Args:
pane_rectangle:
texts:
direction:
Returns:
"""
num_boxes: int = len(texts)
dimensions = copy.deepcopy(pane_rectangle.dimensions)
if direction == 'right':
dimensions.width /= num_boxes
elif direction == 'down':
dimensions.height /= num_boxes
else:
raise InvalidDirectionError(f'direction must be "right" or "down": {direction}')
return GeometricRectangle(top_left=pane_rectangle.top_left,
dimensions=dimensions)
| 5,339,555
|
def spawn_shell(shell_cmd):
"""Spawn a shell process with the provided command line. Returns the Pexpect object."""
return pexpect.spawn(shell_cmd[0], shell_cmd[1:], env=build_shell_env())
| 5,339,556
|
def analyze_image(data, err, seg, tab, athresh=3.,
robust=False, allow_recenter=False,
prefix='', suffix='', grow=1,
subtract_background=False, include_empty=False,
pad=0, dilate=0, make_image_cols=True):
"""
SEP/SExtractor analysis on arbitrary image
Parameters
----------
data : array
Image array
err : array
RMS error array
seg : array
Segmentation array
tab : `~astropy.table.Table`
Table output from `sep.extract` where `id` corresponds to segments in
`seg`. Requires at least columns of
``id, xmin, xmax, ymin, ymax`` and ``x, y, flag`` if want to use
`robust` estimators
athresh : float
Analysis threshold
prefix, suffix : str
Prefix and suffix to add to output table column names
Returns
-------
tab : `~astropy.table.Table`
Table with columns
``id, x, y, x2, y2, xy, a, b, theta``
``flux, background, peak, xpeak, ypeak, npix``
"""
from collections import OrderedDict
import sep
from grizli import utils, prep
yp, xp = np.indices(data.shape) - 0.5*(grow == 2)
# Output data
new = OrderedDict()
idcol = choose_column(tab, ['id','number'])
ids = tab[idcol]
new[idcol] = tab[idcol]
for k in ['x','y','x2','y2','xy','a','b','theta','peak','flux','background']:
if k in tab.colnames:
new[k] = tab[k].copy()
else:
new[k] = np.zeros(len(tab), dtype=np.float32)
for k in ['xpeak','ypeak','npix','flag']:
if k in tab.colnames:
new[k] = tab[k].copy()
else:
new[k] = np.zeros(len(tab), dtype=int)
for id_i in tqdm(ids):
ix = np.where(tab[idcol] == id_i)[0][0]
xmin = tab['xmin'][ix]-1-pad
ymin = tab['ymin'][ix]-1-pad
slx = slice(xmin, tab['xmax'][ix]+pad+2)
sly = slice(ymin, tab['ymax'][ix]+pad+2)
seg_sl = seg[sly, slx] == id_i
if include_empty:
seg_sl |= seg[sly, slx] == 0
if dilate > 0:
seg_sl = nd.binary_dilation(seg_sl, iterations=dilate)
if seg_sl.sum() == 0:
new['flag'][ix] |= 1
continue
if grow > 1:
sh = seg_sl.shape
seg_gr = np.zeros((sh[0]*grow, sh[1]*grow), dtype=bool)
for i in range(grow):
for j in range(grow):
seg_gr[i::grow, j::grow] |= seg_sl
seg_sl = seg_gr
xmin = xmin*grow
ymin = ymin*grow
slx = slice(xmin, (tab['xmax'][ix]+2+pad)*grow)
sly = slice(ymin, (tab['ymax'][ix]+2+pad)*grow)
if subtract_background:
if subtract_background == 2:
# Linear model
x = xp[sly, slx] - xmin
y = yp[sly, slx] - ymin
A = np.array([x[~seg_sl]*0.+1, x[~seg_sl], y[~seg_sl]])
b = data[sly,slx][~seg_sl]
lsq = np.linalg.lstsq(A.T, b)
back_level = lsq[0][0]
A = np.array([x[seg_sl]*0.+1, x[seg_sl], y[seg_sl]]).T
back_xy = A.dot(lsq[0])
else:
# Median
back_level = np.median(data[sly, slx][~seg_sl])
back_xy = back_level
else:
back_level = 0.
back_xy = back_level
dval = data[sly, slx][seg_sl] - back_xy
ival = err[sly, slx][seg_sl]
rv = dval.sum()
imax = np.argmax(dval)
peak = dval[imax]
x = xp[sly, slx][seg_sl] - xmin
y = yp[sly, slx][seg_sl] - ymin
xpeak = x[imax] + xmin
ypeak = y[imax] + ymin
thresh_sl = (dval > athresh*ival) & (ival >= 0)
new['npix'][ix] = thresh_sl.sum()
new['background'][ix] = back_level
if new['npix'][ix] == 0:
new['flag'][ix] |= 2
new['x'][ix] = np.nan
new['y'][ix] = np.nan
new['xpeak'][ix] = xpeak
new['ypeak'][ix] = ypeak
new['peak'][ix] = peak
new['flux'][ix] = rv
new['x2'][ix] = np.nan
new['y2'][ix] = np.nan
new['xy'] = np.nan
new['a'][ix] = np.nan
new['b'][ix] = np.nan
new['theta'][ix] = np.nan
continue
cval = dval[thresh_sl]
rv = cval.sum()
x = x[thresh_sl]
y = y[thresh_sl]
mx = (x*cval).sum()
my = (y*cval).sum()
mx2 = (x*x*cval).sum()
my2 = (y*y*cval).sum()
mxy = (x*y*cval).sum()
xm = mx/rv
ym = my/rv
xm2 = mx2/rv - xm**2
ym2 = my2/rv - ym**2
xym = mxy/rv - xm*ym
if robust:
if 'flag' in tab.colnames:
flag = tab['flag'][ix] & sep.OBJ_MERGED
else:
flag = False
if flag | (robust > 1):
if allow_recenter:
xn = xm
yn = ym
else:
xn = tab['x'][ix]-xmin
yn = tab['y'][ix]-ymin
xm2 = mx2 / rv + xn*xn - 2*xm*xn
ym2 = my2 / rv + yn*yn - 2*ym*yn
xym = mxy / rv + xn*yn - xm*yn - xn*ym
xm = xn
ym = yn
temp2 = xm2*ym2-xym*xym
if temp2 < 0.00694:
xm2 += 0.0833333
ym2 += 0.0833333
temp2 = xm2*ym2-xym*xym;
temp = xm2 - ym2
if np.abs(temp) > 0:
theta = np.clip(np.arctan2(2.0*xym, temp)/2.,
-np.pi/2.+1.e-5, np.pi/2.-1.e-5)
else:
theta = np.pi/4
temp = np.sqrt(0.25*temp*temp+xym*xym);
pmy2 = pmx2 = 0.5*(xm2+ym2);
pmx2 += temp
pmy2 -= temp
amaj = np.sqrt(pmx2)
amin = np.sqrt(pmy2)
new['x'][ix] = xm+xmin
new['y'][ix] = ym+ymin
new['xpeak'][ix] = xpeak
new['ypeak'][ix] = ypeak
new['peak'][ix] = peak
new['flux'][ix] = rv
new['x2'][ix] = xm2
new['y2'][ix] = ym2
new['xy'] = xym
new['a'][ix] = amaj
new['b'][ix] = amin
new['theta'][ix] = theta
new['flag'] |= ((~np.isfinite(new['a'])) | (new['a'] <= 0))*4
new['flag'] |= ((~np.isfinite(new['b'])) | (new['b'] <= 0))*8
newt = utils.GTable()
for k in new:
newt[f'{prefix}{k}{suffix}'] = new[k]
if make_image_cols:
newt['a_image'] = newt['a']
newt['b_image'] = newt['b']
newt['theta_image'] = newt['theta']
newt['x_image'] = newt['x']+1
newt['y_image'] = newt['y']+1
return newt
| 5,339,557
|
def qr(tag):
"""
called by an AJAX request for cipherwallet QR code
this action is typically invoked by your web page containing the form, thru the code
in cipherwallet.js, to obtain the image with the QR code to display
it will return the image itself, with an 'image/png' content type, so you can use
the URL to this page as a 'src=...' attribute for the <img> tag
"""
# default timeout values, do not modify because they must stay in sync with the API
DEFAULT_TTL = {
OP_SIGNUP: 120,
OP_LOGIN: 60,
OP_CHECKOUT: 300,
OP_REGISTRATION: 30,
}
# create an unique session identifier, 8 random characters, and postfix it with the QR code tag
# the qr code tag is useful to distinguish multiple QR codes on the same page
if re.compile("[a-zA-Z0-9.:_-]+").match(tag) is None:
raise CipherwalletError(400, "Bad request")
cw_session = "".join(random.choice(ALPHABET) for _ in range(8)) + "-" + tag
# get the user data request template; templates for each type of request are pre-formatted
# and stored in the constants file, in the qr_requests variable
try:
rq_def = qr_requests[tag]
except Exception:
raise CipherwalletError(501, "Not implemented")
# set the time-to-live of the cipherwallet session in the temporary storage
cw_session_ttl = rq_def.get('qr_ttl', DEFAULT_TTL[rq_def['operation']])
if tmp_datastore.cw_session_data(cw_session, 'qr_expires', 1 + cw_session_ttl + int(time.time())) is None:
raise CipherwalletError(500, "Internal server error")
# for registration QR code requests, we also save the current user ID in the short term storage
if rq_def['operation'] == OP_REGISTRATION:
uid = hooks.get_user_id_for_current_session() # you MUST implement this function in hooks.py
if uid is None:
raise CipherwalletError(401, "Unauthorized")
else:
tmp_datastore.cw_session_data(cw_session, 'user_id', uid);
# prepare request to the API
method = "POST";
resource = "/{0}/{1}.png".format(tag, cw_session)
request_params = {}
if rq_def.get('qr_ttl'): request_params['ttl'] = rq_def['qr_ttl']
if rq_def.get('callback_url'): request_params['push_url'] = rq_def['callback_url']
if rq_def['operation'] not in [ OP_LOGIN, OP_REGISTRATION, ]:
display = rq_def.get('display')
if hasattr(display, '__call__'):
request_params['display'] = display()
elif type(display) == type(""):
request_params['display'] = rq_def['display']
# should do the same thing for the service params
# create CQR headers and the query string
api_rq_headers = cqr.auth(
CUSTOMER_ID, API_SECRET, method, resource, request_params or "", H_METHOD
)
# some extra headers we need
api_rq_headers['Content-Type'] = "application/x-www-form-urlencoded";
#api_rq_headers['Content-Length'] = len(request_params);
# get the QR image from the API and send it right back to the browser
api_rp = requests.post(API_URL + resource, headers=api_rq_headers, data=request_params)
content = api_rp.content if api_rp.status_code == 200 \
else open(os.path.dirname(os.path.realpath(__file__)) + "/1x1.png").read()
return content, cw_session
| 5,339,558
|
def get_experiment_table(faultgroup, faultname, tablename):
"""
Get anny table from a faultgroup
"""
node = faultgroup._f_get_child(faultname)
table = node._f_get_child(tablename)
return pd.DataFrame(table.read())
| 5,339,559
|
def kjunSeedList(baseSeed, n):
"""
generates n seeds
Due to the way it generates the seed, do not use i that is too large..
"""
assert n <= 100000
rs = ra.RandomState(baseSeed);
randVals = rs.randint(np.iinfo(np.uint32).max+1, size=n);
return randVals;
| 5,339,560
|
def main():
"""
TODO:
"""
####################
print('Welcome to stanCode\"Anagram Generator\"( or -1 to quit)')
read_dictionary()
start, end = 0, 0
while True:
word = input('Find anagrams for: ')
if word == EXIT:
break
else:
start = time.time()
find_anagrams(word)
end = time.time()
####################
print('----------------------------------')
print(f'The speed of your anagram algorithm: {end-start} seconds.')
| 5,339,561
|
def filter_sharpen(image):
"""Apply a sharpening filter kernel to the image.
This is the same as using PIL's ``PIL.ImageFilter.SHARPEN`` kernel.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: no
* ``float64``: no
* ``float128``: no
* ``bool``: no
Parameters
----------
image : ndarray
The image to modify.
Returns
-------
ndarray
Sharpened image.
"""
return _filter_by_kernel(image, PIL.ImageFilter.SHARPEN)
| 5,339,562
|
async def zha_client(hass, config_entry, zha_gateway, hass_ws_client):
"""Test zha switch platform."""
# load the ZHA API
async_load_api(hass)
# create zigpy device
await async_init_zigpy_device(
hass,
[general.OnOff.cluster_id, general.Basic.cluster_id],
[],
None,
zha_gateway,
)
await async_init_zigpy_device(
hass,
[general.OnOff.cluster_id, general.Basic.cluster_id, general.Groups.cluster_id],
[],
zigpy.profiles.zha.DeviceType.ON_OFF_LIGHT,
zha_gateway,
manufacturer="FakeGroupManufacturer",
model="FakeGroupModel",
ieee="01:2d:6f:00:0a:90:69:e8",
)
# load up switch domain
await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN)
await hass.async_block_till_done()
await hass.config_entries.async_forward_entry_setup(config_entry, light_domain)
await hass.async_block_till_done()
return await hass_ws_client(hass)
| 5,339,563
|
def summary_selector(summary_models=None):
"""
Will create a function that take as input a dict of summaries :
{'T5': [str] summary_generated_by_T5, ..., 'KW': [str] summary_generted_by_KW}
and randomly return a summary that has been generated by one of the summary_model in summary_model
if summary_models is none, will not use summaru
:param summary_models: list of str(SummarizerModel)
:return: function [dict] -> [str]
"""
if summary_models is None or len(summary_models) == 0 or \
(len(summary_models) == 1 and summary_models[0] == ""):
return lambda x: ""
summary_model = random.choice(summary_models)
return lambda summaries_dict: summaries_dict[summary_model]
| 5,339,564
|
def handle_collectd(root_dir):
"""Generate figure for each plugin for each hoster."""
result = collections.defaultdict(lambda: collections.defaultdict(dict))
for host in natsorted(root_dir.iterdir()):
for plugin in natsorted(host.iterdir()):
stats_list = natsorted(
[fname for fname in plugin.iterdir() if fname.suffix == ".rrd"]
)
title = plugin.name
result[host.name][plugin.name] = {
"daily": rrd2svg(
stats_list,
f"{title} - by day",
start_time=datetime.datetime.now() - datetime.timedelta(days=1),
),
"monthly": rrd2svg(
stats_list,
f"{title} - by month",
start_time=datetime.datetime.now() - datetime.timedelta(weeks=4),
),
}
if len(result[host.name]) > 20:
break
return result
| 5,339,565
|
def mfa_delete_token(token_name):
""" Deletes an MFA token file from the .ndt subdirectory in the user's
home directory """
os.remove(get_ndt_dir() + '/mfa_' + token_name)
| 5,339,566
|
def create_policy_case_enforcement(repository_id, blocking, enabled,
organization=None, project=None, detect=None):
"""Create case enforcement policy.
"""
organization, project = resolve_instance_and_project(
detect=detect, organization=organization, project=project)
policy_client = get_policy_client(organization)
configuration = create_configuration_object(repository_id, None, blocking, enabled,
'40e92b44-2fe1-4dd6-b3d8-74a9c21d0c6e',
['enforceConsistentCase'],
['true'])
return policy_client.create_policy_configuration(configuration=configuration, project=project)
| 5,339,567
|
def svn_fs_new(*args):
"""svn_fs_new(apr_hash_t fs_config, apr_pool_t pool) -> svn_fs_t"""
return apply(_fs.svn_fs_new, args)
| 5,339,568
|
def cam_pred(prefix, data_dir):
"""
"""
groundtruth_dict = read(os.path.join('../data/contrast_dataset', 'groundtruth.txt'))
cam = CAM(model=load_pretrained_model(prefix, 'resnet'))
if data_dir == '../data/split_contrast_dataset':
normalize = transforms.Normalize(mean=[0.7432, 0.661, 0.6283],
std=[0.0344, 0.0364, 0.0413])
print('load custom-defined skin dataset successfully!!!')
else:
raise ValueError('')
preprocess = transforms.Compose([
transforms.ToTensor(),
normalize,
])
pred_results = []
y_true = []
idx = 0
for phase in ['train', 'val']:
path = os.path.join(data_dir, phase)
for name in os.listdir(path):
if 'lesion' not in name:
continue
abs_path = os.path.join(path, name)
img_pil = Image.open(abs_path)
img_tensor = preprocess(img_pil).unsqueeze(0)
heatmap = cam(img_tensor)
idx += 1
if idx % 50 == 0:
print('[%d/%d]' % (idx, len(os.listdir(path))))
heatmap = np.float32(heatmap) / 255
pred_results.extend(heatmap.reshape(-1))
y_true.extend(get_true(groundtruth_dict, name).reshape(-1).tolist())
print(idx)
return pred_results, y_true
| 5,339,569
|
def read_history_file(
store,
src_file,
store_file,
ignore_file=None,
mark_read=True):
"""Read in the history files."""
commands = _get_unread_commands(src_file)
output = []
if ignore_file:
ignore_rules = IgnoreRules.create_ignore_rule(ignore_file)
else:
ignore_rules = IgnoreRules()
# get the max count
current_time = time.time()
for line in commands:
current_time += 1
command = Command(line, current_time)
if ignore_rules.is_match(command.get_unique_command_id()):
continue
store.add_command(command)
output.append(command.get_unique_command_id())
if mark_read:
with open(store_file, 'a') as command_filestore:
for command_str in output:
command_filestore.write(command_str + '\n')
with open(src_file, "a") as myfile:
myfile.write(PROCESSED_TO_TAG + "\n")
| 5,339,570
|
def least_similar(sen, voting_dict):
"""
Find senator with voting record least similar, excluding the senator passed
:param sen: senator last name
:param voting_dict: dictionary of voting record by last name
:return: senator last name with least similar record, in case of a tie chooses first alphabetically
>>> voting_dict = create_voting_dict(list(open('voting_record_dump109.txt')))
>>> least_similar('Mikulski', voting_dict)
'Inhofe'
>>> least_similar('Santorum', voting_dict) # 2.12.5
'Feingold'
"""
return specifier_similar(sen, voting_dict, '<')['sen']
| 5,339,571
|
def _send_req(wait_sec, url, req_gen, retry_result_code=None):
""" Helper function to send requests and retry when the endpoint is not ready.
Args:
wait_sec: int, max time to wait and retry in seconds.
url: str, url to send the request, used only for logging.
req_gen: lambda, no parameter function to generate requests.Request for the
function to send to the endpoint.
retry_result_code: int (optional), status code to match or retry the request.
Returns:
requests.Response
"""
def retry_on_error(e):
return isinstance(e, (SSLError, ReqConnectionError))
# generates function to see if the request needs to be retried.
# if param `code` is None, will not retry and directly pass back the response.
# Otherwise will retry if status code is not matched.
def retry_on_result_func(code):
if code is None:
return lambda _: False
return lambda resp: not resp or resp.status_code != code
@retry(stop_max_delay=wait_sec * 1000, wait_fixed=10 * 1000,
retry_on_exception=retry_on_error,
retry_on_result=retry_on_result_func(retry_result_code))
def _send(url, req_gen):
resp = None
logging.info("sending request to %s", url)
try:
resp = req_gen()
except Exception as e:
logging.warning("%s: request with error: %s", url, e)
raise e
return resp
return _send(url, req_gen)
| 5,339,572
|
def load_file(path, types = None):
"""
load file in path if file format in types list
----
:param path: file path
:param code: file type list, if None, load all files, or not load the files in the list, such as ['txt', 'xlsx']
:return: a list is [path, data]
"""
ext = path.split(".")[-1]
if types != None:
if ext not in types: # filter this file
return None
if ext == "txt":
return [path, __load_txt(path)]
else:
print("pyftools: format", ext, "not support!")
return None
| 5,339,573
|
def cdo_spatial_cut(path, file_includes, new_file_includes, lonmin, lonmax, latmin, latmax):
"""
loops through the given directory and and executes "cdo -sellonlatbox,lonmin,lonmax,latmin,latmax *file_includes* fileout.nc" appends "spatial_cut_*new_file_includes*" at the end of the filename
"""
for name in os.listdir(path):
if file_includes in name and 'spatial_cut' not in name:
name_new = f"{''.join(name.split('.')[:-1])}_spatial_cut_{new_file_includes}.{name.split('.')[-1]}"
print(f'extracting region: {name} to {name_new} in {path} ...')
os.system(f'cdo -sellonlatbox,{lonmin},{lonmax},{latmin},{latmax} {pjoin(path, name)} {pjoin(path, name_new)}')
| 5,339,574
|
def get_dense_labels_map(values, idx_dtype='uint32'):
"""
convert unique values into dense int labels [0..n_uniques]
:param array values: (n,) dtype array
:param dtype? idx_dtype: (default: 'uint32')
:returns: tuple(
labels2values: (n_uniques,) dtype array,
values2labels: HashMap(dtype->int),
)
"""
# get unique values
unique_values = unique(values)
# build labels from 0 to n_uniques
labels = numpy.arange(unique_values.shape[0], dtype=idx_dtype)
# build small hashmap with just the unique items
values2labels = Hashmap(unique_values, labels)
return unique_values, values2labels
| 5,339,575
|
def delete_debug_file_from_orchestrator(
self,
filename: str,
) -> bool:
"""Delete debug file from Orchestrator
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - debugFiles
- POST
- /debugFiles/delete
:param filename: Name of debug file to delete from Orchestrator
:type filename: str
:return: Returns True/False based on successful call
:rtype: bool
"""
data = {"fileName": filename}
return self._post(
"/debugFiles/delete",
data=data,
return_type="bool",
)
| 5,339,576
|
def _replace_fun_unescape(m: Match[str]) -> str:
""" Decode single hex/unicode escapes found in regex matches.
Supports single hex/unicode escapes of the form ``'\\xYY'``,
``'\\uYYYY'``, and ``'\\UYYYYYYYY'`` where Y is a hex digit. Only
decodes if there is an odd number of backslashes.
.. versionadded:: 0.2
Parameters
----------
m : regex match
Returns
-------
c : str
The unescaped character.
"""
slsh = b'\\'.decode('ascii')
s = m.group(0)
count = s.count(slsh)
if count % 2 == 0:
return s
else:
c = chr(int(s[(count + 1):], base=16))
return slsh * (count - 1) + c
| 5,339,577
|
def delete_clip(cid: int) -> None:
"""
Deletes the specified clip.
:param cid: The clip's id.
"""
try:
Clip.objects.get(id=cid).delete()
except Clip.DoesNotExist:
pass
| 5,339,578
|
def predict_next_location(game_data, ship_name):
"""
Predict the next location of a space ship.
Parameters
----------
game_data: data of the game (dic).
ship_name: name of the spaceship to predicte the next location (str).
facing: facing of the ship (tuple)
Return
------
predicted_location : predicte location of the spaceship (tuple(int, int)).
Version
-------
Specification: Nicolas Van Bossuyt (v1. 19/03/17).
Implementation: Nicolas Van Bossuyt (v1. 19/03/17).
Bayron Mahy (v2. 22/03/17).
"""
ship_location = game_data['ships'][ship_name]['location']
ship_facing = game_data['ships'][ship_name]['facing']
ship_speed = game_data['ships'][ship_name]['speed']
return next_location(ship_location, ship_facing, ship_speed, game_data['board_size'])
| 5,339,579
|
def mux_video_audio(videofile_path, audiofile_path, output_video_name):
"""
Add audio to video file
@param videofile_path: str - Input video file path
@param audiofile_path: str - Input audio file path
@param output_video_name: str - Output video file path
"""
input_video = ffmpeg.input(videofile_path)
input_audio = ffmpeg.input(audiofile_path)
ffmpeg.concat(input_video, input_audio, v=1, a=1).output(output_video_name, vcodec="libx264").overwrite_output().run()
| 5,339,580
|
def main(argv):
"""Find and report approximate size info for a particular built package."""
commandline.RunInsideChroot()
parser = _get_parser()
opts = parser.parse_args(argv)
opts.Freeze()
db = portage_util.PortageDB(root=opts.root)
if opts.packages:
installed_packages = portage_util.GenerateInstalledPackages(db, opts.root,
opts.packages)
else:
installed_packages = db.InstalledPackages()
results = generate_package_size_report(db, opts.root, opts.image_type,
opts.partition_name,
installed_packages)
print(json.dumps(results))
| 5,339,581
|
def get_absolute_filename(user_inputted_filename: str) -> Path:
"""Clean up user inputted filename path, wraps os.path.abspath, returns Path object"""
filename_location = Path(os.path.abspath(user_inputted_filename))
return filename_location
| 5,339,582
|
def check_answer(guess, a_follower, b_follower):
"""Chcek if the user guessed the correct option"""
if a_follower > b_follower:
return guess == "a"
else:
return guess == "b"
| 5,339,583
|
def command_line_arg_parser():
"""
Command line argument parser. Encrypts by default. Decrypts when --decrypt flag is passed in.
"""
parser = argparse.ArgumentParser(description='Parses input args')
parser.add_argument('input_file', type=str,
help='Path to input file location')
parser.add_argument('output_file', type=str, default='./output_data',
help='Path to output file location')
parser.add_argument('key_file', type=str,
help='Path to public or private key file')
parser.add_argument('--decrypt', dest='decrypt', action='store_true',
help='Private key file (for decryption)')
return parser
| 5,339,584
|
def x(ctx, command):
"""run command on all containers (one for each deployment) within current
namespace. only show output when command succeeds
\b
examples:
\b
lain admin x -- bash -c 'pip3 freeze | grep -i requests'
"""
res = kubectl('get', 'po', '--no-headers', capture_output=True)
ctx.obj['silent'] = True
deploy_names = set()
for line in ensure_str(res.stdout).splitlines():
podname, *_ = line.split()
deploy_name = tell_pod_deploy_name(podname)
if deploy_name in deploy_names:
continue
deploy_names.add(deploy_name)
res = kubectl(
'exec',
'-it',
podname,
'--',
*command,
check=False,
timeout=None,
capture_output=True,
)
if rc(res):
stderr = ensure_str(res.stderr)
# abort execution in the case of network error
if 'unable to connect' in stderr.lower() or 'timeout' in stderr:
error(stderr, exit=1)
continue
echo(f'command succeeds for {podname}')
echo(res.stdout)
| 5,339,585
|
async def output(ctx, metadata):
"""Create outputs - such as Ansible inventory."""
ctx.obj.init_metadata(metadata)
await generate_outputs(ctx)
| 5,339,586
|
def respond(variables, Body=None, Html=None, **kwd):
"""
Does the grunt work of cooking up a MailResponse that's based
on a template. The only difference from the lamson.mail.MailResponse
class and this (apart from variables passed to a template) are that
instead of giving actual Body or Html parameters with contents,
you give the name of a template to render. The kwd variables are
the remaining keyword arguments to MailResponse of From/To/Subject.
For example, to render a template for the body and a .html for the Html
attachment, and to indicate the From/To/Subject do this:
msg = view.respond(locals(), Body='template.txt',
Html='template.html',
From='test@test.com',
To='receiver@test.com',
Subject='Test body from "%(dude)s".')
In this case you're using locals() to gather the variables needed for
the 'template.txt' and 'template.html' templates. Each template is
setup to be a text/plain or text/html attachment. The From/To/Subject
are setup as needed. Finally, the locals() are also available as
simple Python keyword templates in the From/To/Subject so you can pass
in variables to modify those when needed (as in the %(dude)s in Subject).
"""
assert Body or Html, "You need to give either the Body or Html template of the mail."
for key in kwd:
kwd[key] = kwd[key] % variables
msg = mail.MailResponse(**kwd)
if Body:
msg.Body = render(variables, Body)
if Html:
msg.Html = render(variables, Html)
return msg
| 5,339,587
|
def canonical_for_code_system(jcs: Dict) -> str:
"""get the canonical URL for a code system entry from the art decor json. Prefer FHIR URIs over the generic OID URI.
Args:
jcs (Dict): the dictionary describing the code system
Returns:
str: the canonical URL
"""
if "canonicalUriR4" in jcs:
return jcs["canonicalUriR4"]
else:
return jcs["canonicalUri"]
| 5,339,588
|
def correspdesc_source(data):
"""
extract @source from TEI elements <correspDesc>
"""
correspdesc_data = correspdesc(data)
try:
return [cd.attrib["source"].replace("#", "") for cd in correspdesc_data]
except KeyError:
pass
try:
return [cd.attrib[ns_cs("source")].replace("#", "") for cd in correspdesc_data]
except KeyError:
pass
return []
| 5,339,589
|
def leaders(Z, T):
"""
(L, M) = leaders(Z, T):
For each flat cluster j of the k flat clusters represented in the
n-sized flat cluster assignment vector T, this function finds the
lowest cluster node i in the linkage tree Z such that:
* leaf descendents belong only to flat cluster j (i.e. T[p]==j
for all p in S(i) where S(i) is the set of leaf ids of leaf
nodes descendent with cluster node i)
* there does not exist a leaf that is not descendent with i
that also belongs to cluster j (i.e. T[q]!=j for all q not in S(i)).
If this condition is violated, T is not a valid cluster assignment
vector, and an exception will be thrown.
Two k-sized numpy vectors are returned, L and M. L[j]=i is the linkage
cluster node id that is the leader of flat cluster with id M[j]. If
i < n, i corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
"""
Z = np.asarray(Z)
T = np.asarray(T)
if type(T) != _array_type or T.dtype != np.int:
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype=np.int32)
M = np.zeros((kk,), dtype=np.int32)
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _cluster_wrap.leaders_wrap(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError('T is not a valid assignment vector. Error found when examining linkage node %d (< 2n-1).' % s)
return (L, M)
| 5,339,590
|
def plot_visualize_mft_sources(fwdmag, stcdata, tmin, tstep,
subject, subjects_dir):
"""
Plot the MFT sources at time point of peak.
Parameters
----------
fwdmag: forward solution
stcdata: stc with ||cdv|| (point sequence as in fwdmag['source_rr'])
tmin, tstep, subject: passed to mne.SourceEstimate()
"""
print("##### Attempting to plot:")
# cf. decoding/plot_decoding_spatio_temporal_source.py
vertices = [s['vertno'] for s in fwdmag['src']]
if len(vertices) == 1:
vertices = [fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] <= -0.],
fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] > -0.]]
elif len(vertices) > 2:
warnings.warn('plot_visualize_mft_sources(): Cannot handle more than two sources spaces')
return
stc_feat = SourceEstimate(stcdata, vertices=vertices,
tmin=tmin, tstep=tstep, subject=subject)
itmaxsum = np.argmax(np.sum(stcdata, axis=0))
twmin = tmin + tstep * float(itmaxsum - stcdata.shape[1] / 20)
twmax = tmin + tstep * float(itmaxsum + stcdata.shape[1] / 20)
for ihemi, hemi in enumerate(['lh', 'rh', 'both']):
brain = stc_feat.plot(surface='white', hemi=hemi, subjects_dir=subjects_dir,
transparent=True, clim='auto')
# use peak getter to move visualization to the time point of the peak
print("Restricting peak search to [%fs, %fs]" % (twmin, twmax))
if hemi == 'both':
brain.show_view('parietal')
vertno_max, time_idx = stc_feat.get_peak(hemi=None, time_as_index=True,
tmin=twmin, tmax=twmax)
else:
brain.show_view('lateral')
vertno_max, time_idx = stc_feat.get_peak(hemi=hemi, time_as_index=True,
tmin=twmin, tmax=twmax)
print("hemi=%s: setting time_idx=%d" % (hemi, time_idx))
brain.set_data_time_index(time_idx)
if hemi == 'lh' or hemi == 'rh':
# draw marker at maximum peaking vertex
brain.add_foci(vertno_max, coords_as_verts=True, hemi=hemi, color='blue',
scale_factor=0.6)
if len(fwdmag['src']) > ihemi:
fwds = fwdmag['src'][ihemi]
comax = fwds['rr'][vertno_max]
print("hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][%d]['rr'][vertno_max] = " % \
(hemi, vertno_max, time_idx, ihemi), comax)
offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])
if hemi == 'lh':
ifoci = [np.nonzero([stcdata[0:offsets[1], time_idx] >= 0.25 * np.max(stcdata[:, time_idx])][0])]
elif len(fwdmag['src']) > 1:
ifoci = [np.nonzero([stcdata[offsets[1]:, time_idx] >= 0.25 * np.max(stcdata[:, time_idx])][0])]
vfoci = fwds['vertno'][ifoci[0][0]]
cfoci = fwds['rr'][vfoci]
print("Coords of %d sel. vfoci: " % cfoci.shape[0])
print(cfoci)
print("vfoci: ")
print(vfoci)
print("brain.geo[%s].coords[vfoci] : " % hemi)
print(brain.geo[hemi].coords[vfoci])
mrfoci = np.zeros(cfoci.shape)
invmri_head_t = invert_transform(fwdmag['info']['mri_head_t'])
mrfoci = apply_trans(invmri_head_t['trans'], cfoci, move=True)
print("mrfoci: ")
print(mrfoci)
# Just some blops along the coordinate axis:
# This will not yield reasonable results w an inflated brain.
# bloblist = np.zeros((300,3))
# for i in xrange(100):
# bloblist[i,0] = float(i)
# bloblist[i+100,1] = float(i)
# bloblist[i+200,2] = float(i)
# mrblobs = apply_trans(invmri_head_t['trans'], bloblist, move=True)
# brain.add_foci(mrblobs, coords_as_verts=False, hemi=hemi, color='yellow', scale_factor=0.3)
brain.save_image('testfig_map_%s.png' % hemi)
brain.close()
| 5,339,591
|
def wmca(instance: bot, message: trigger) -> None:
"""Expand a link to Wikimedia CentralAuth."""
try:
instance.say(
f'https://meta.wikimedia.org/wiki/Special:CentralAuth/{message.group(2).replace(" ", "_")}',
)
except AttributeError:
instance.say('Syntax: .wmca example', message.sender)
| 5,339,592
|
def create_unique_views(rows: list, fields: List[str]):
"""Create views for each class objects, default id should be a whole row"""
views = {}
for r in rows:
values = [r[cname] for cname in fields]
if any(isinstance(x, list) for x in values):
if all(isinstance(x, list) for x in values) and len({len(x) for x in values}) == 1:
# all its value is in a list
for j in range(len(values[0])):
key = ",".join(str(values[i][j]) for i in range(len(values)))
views[key] = [values[i][j] for i in range(len(values))]
else:
# assert False
key = ",".join((str(x) for x in values))
views[key] = values
else:
key = ",".join((str(x) for x in values))
views[key] = values
views = [{cname: r[i] for i, cname in enumerate(fields)} for r in views.values()]
return views
| 5,339,593
|
def check_shape_function(invocations: List[Invocation]):
"""Decorator that automatically tests a shape function.
The shape function, which is expected to be named systematically with
`〇` instead of `.`, is tested against the corresponding op in
`torch.ops.*` function using the given invocations.
"""
def decorator(f):
# `torch.ops.*` functions are overloaded already, so we don't need
# to pass in the overload name.
ns, unqual = f.__name__.split("〇")[:2]
op = getattr(getattr(torch.ops, ns), unqual)
for invocation in invocations:
shape_fn_error, op_error = None, None
try:
result_shapes = _normalize_multiple_results_to_list(f(
*invocation.to_shape_function_args(),
**invocation.kwargs))
except Exception as e:
shape_fn_error = f"{e}"
try:
golden_results = _normalize_multiple_results_to_list(op(
*invocation.to_real_op_args(),
**invocation.kwargs))
except Exception as e:
op_error = f"{e}"
def report(error_message: str):
raise ValueError(f"For shape function {f.__name__!r} with invocation {invocation}: {error_message}")
# Check for error behavior.
if invocation.is_expected_to_raise_exception():
if shape_fn_error is None and op_error is None:
report(f"Expected to raise an exception, but neither shape function nor op raised an exception")
if shape_fn_error is None:
report(f"Op raised error {op_error!r}, but shape function did not.")
if op_error is None:
report(f"Shape function raised error {shape_fn_error!r}, but op did not.")
else:
if shape_fn_error is not None and op_error is not None:
report(f"Both shape function and op raised errors, but were not expected to. Shape function raised error {shape_fn_error!r} and op raised error {op_error!r}.")
if shape_fn_error is not None:
report(f"Shape function raised error {shape_fn_error!r} but op did not raise any error.")
if op_error is not None:
report(f"Op raised error {op_error!r} but shape function did not raise any error.")
if shape_fn_error is not None or op_error is not None:
# If both raised errors, then that is good -- the shape function
# and the real op should agree on the erroneous cases.
# The exact error message might differ though.
if shape_fn_error is not None and op_error is not None:
continue
# Check for matching results.
if len(result_shapes) != len(golden_results):
report(f"Expected {len(golden_results)} result shapes, got {len(result_shapes)}")
for result_shape, golden_result in zip(result_shapes, golden_results):
for dimension_size, golden_dimension_size in zip(result_shape, golden_result.shape):
if dimension_size != golden_dimension_size:
report(f"Expected result shape {golden_result.shape}, got {result_shape}")
return f
return decorator
| 5,339,594
|
def _prepare_images(ghi, clearsky, daytime, interval):
"""Prepare data as images.
Performs pre-processing steps on `ghi` and `clearsky` before
returning images for use in the shadow detection algorithm.
Parameters
----------
ghi : Series
Measured GHI. [W/m^2]
clearsky : Series
Expected clearsky GHI. [W/m^2]
daytime : Series
Boolean series with True for daytime and False for night.
interval : int
Time between data points in `ghi`. [minutes]
Returns
-------
ghi_image : np.ndarray
Image form of `ghi`
clearsky_image : np.ndarray
Image form of `clearsky`
clouds_image : np.ndarray
Image of the cloudy periods in `ghi`
image_times : pandas.DatetimeIndex
Index for the data included in the returned images. Leading
and trailing days with incomplete data are not included in the
image, these times are needed to build a Series from the image
later on.
"""
# Fill missing times by interpolation. Missing data at the
# beginning or end of the series is not filled in, and will be
# excluded from the images used for shadow detection.
image_width = 1440 // interval
ghi = ghi.interpolate(limit_area='inside')
# drop incomplete days.
ghi = ghi[ghi.resample('D').transform('count') == image_width]
image_times = ghi.index
ghi_image = _to_image(ghi.to_numpy(), image_width)
scaled_ghi = (ghi * 1000) / np.max(_smooth(ghi_image))
scaled_clearsky = (clearsky * 1000) / clearsky.max()
scaled_clearsky = scaled_clearsky.reindex_like(scaled_ghi)
daytime = daytime.reindex_like(scaled_ghi)
# Detect clouds.
window_size = 50 // interval
clouds = _detect_clouds(scaled_ghi, scaled_clearsky, window_size)
cloud_mask = _to_image(clouds.to_numpy(), image_width)
# Interpolate across days (i.e. along columns) to remove clouds
# replace clouds with nans
#
# This could probably be done directly with scipy.interpolate.inter1d,
# but the easiest approach is to turn the image into a dataframe and
# interpolate along the columns.
cloudless_image = ghi_image.copy()
cloudless_image[cloud_mask] = np.nan
clouds_image = ghi_image.copy()
clouds_image[~cloud_mask] = np.nan
ghi_image = pd.DataFrame(cloudless_image).interpolate(
axis=0,
limit_direction='both'
).to_numpy()
# set night to nan
ghi_image[~_to_image(daytime.to_numpy(), image_width)] = np.nan
return (
ghi_image,
_to_image(scaled_clearsky.to_numpy(), image_width),
clouds_image,
image_times
)
| 5,339,595
|
def interleaved_code(modes: int) -> BinaryCode:
""" Linear code that reorders orbitals from even-odd to up-then-down.
In up-then-down convention, one can append two instances of the same
code 'c' in order to have two symmetric subcodes that are symmetric for
spin-up and -down modes: ' c + c '.
In even-odd, one can concatenate with the interleaved_code
to have the same result:' interleaved_code * (c + c)'.
This code changes the order of modes from (0, 1 , 2, ... , modes-1 )
to (0, modes/2, 1 modes/2+1, ... , modes-1, modes/2 - 1).
n_qubits = n_modes.
Args: modes (int): number of modes, must be even
Returns (BinaryCode): code that interleaves orbitals
"""
if modes % 2 == 1:
raise ValueError('number of modes must be even')
else:
mtx = numpy.zeros((modes, modes), dtype=int)
for index in numpy.arange(modes // 2, dtype=int):
mtx[index, 2 * index] = 1
mtx[modes // 2 + index, 2 * index + 1] = 1
return BinaryCode(mtx, linearize_decoder(mtx.transpose()))
| 5,339,596
|
def nearest_neighbors(data, args):
"""
最近邻
"""
from sklearn.neighbors import NearestNeighbors
nbrs = NearestNeighbors(**args)
nbrs.fit(data)
# 计算测试数据对应的最近邻下标和距离
# distances, indices = nbrs.kneighbors(test_data)
return nbrs
| 5,339,597
|
def test_selective_sync(m: Maestral) -> None:
"""
Tests :meth:`Maestral.exclude_item`, :meth:`MaestralMaestral.include_item`,
:meth:`Maestral.excluded_status` and :meth:`Maestral.excluded_items`.
"""
dbx_dirs = [
"/selective_sync_test_folder",
"/independent_folder",
"/selective_sync_test_folder/subfolder_0",
"/selective_sync_test_folder/subfolder_1",
]
local_dirs = [m.to_local_path(dbx_path) for dbx_path in dbx_dirs]
# create folder structure
for path in local_dirs:
os.mkdir(path)
wait_for_idle(m)
# exclude "/selective_sync_test_folder" from sync
m.exclude_item("/selective_sync_test_folder")
wait_for_idle(m)
# check that local items have been deleted
assert not osp.exists(m.to_local_path("/selective_sync_test_folder"))
# check that `Maestral.excluded_items` only contains top-level folder
assert "/selective_sync_test_folder" in m.excluded_items
assert "/selective_sync_test_folder/subfolder_0" not in m.excluded_items
assert "/selective_sync_test_folder/subfolder_1" not in m.excluded_items
# check that `Maestral.excluded_status` returns the correct values
assert m.excluded_status("") == "partially excluded"
assert m.excluded_status("/independent_folder") == "included"
for dbx_path in dbx_dirs:
if dbx_path != "/independent_folder":
assert m.excluded_status(dbx_path) == "excluded"
# include folder in sync, check that it worked
m.include_item("/selective_sync_test_folder")
wait_for_idle(m)
assert osp.exists(m.to_local_path("/selective_sync_test_folder"))
assert "/selective_sync_test_folder" not in m.excluded_items
for dbx_path in dbx_dirs:
assert m.excluded_status(dbx_path) == "included"
# test excluding a non-existent folder
with pytest.raises(NotFoundError):
m.exclude_item("/bogus_folder")
# check for fatal errors
assert not m.fatal_errors
| 5,339,598
|
def register_connection(
alias,
db=None,
name=None,
host=None,
port=None,
read_preference=READ_PREFERENCE,
username=None,
password=None,
authentication_source=None,
authentication_mechanism=None,
**kwargs
):
"""Register the connection settings.
: param alias: the name that will be used to refer to this connection
throughout MongoEngine
: param db: the name of the database to use, for compatibility with connect
: param name: the name of the specific database to use
: param host: the host name of the: program: `mongod` instance to connect to
: param port: the port that the: program: `mongod` instance is running on
: param read_preference: The read preference for the collection
: param username: username to authenticate with
: param password: password to authenticate with
: param authentication_source: database to authenticate against
: param authentication_mechanism: database authentication mechanisms.
By default, use SCRAM-SHA-1 with MongoDB 3.0 and later,
MONGODB-CR (MongoDB Challenge Response protocol) for older servers.
: param is_mock: explicitly use mongomock for this connection
(can also be done by using `mongomock: // ` as db host prefix)
: param kwargs: ad-hoc parameters to be passed into the pymongo driver,
for example maxpoolsize, tz_aware, etc. See the documentation
for pymongo's `MongoClient` for a full list.
.. versionchanged:: 0.10.6 - added mongomock support
"""
conn_settings = _get_connection_settings(
db=db,
name=name,
host=host,
port=port,
read_preference=read_preference,
username=username,
password=password,
authentication_source=authentication_source,
authentication_mechanism=authentication_mechanism,
**kwargs
)
_connection_settings[alias] = conn_settings
| 5,339,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.