content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _registry_munger(desired_registries_dict, current_registries_json_dict):
"""Generator for iterating through the union of the desired & current registry configurations.
Yields a tuple containing the registry name, desired configuration, & current configuration."""
for key in desired_registries_dict.keys() | current_registries_json_dict.keys():
yield (key, desired_registries_dict.get(key, None), current_registries_json_dict.get(key, None))
| 5,342,200
|
def hdi_of_mcmc(sample_vec, cred_mass=0.95):
"""
Highest density interval of sample.
"""
assert len(sample_vec), 'need points to find HDI'
sorted_pts = np.sort(sample_vec)
ci_idx_inc = int(np.floor(cred_mass * len(sorted_pts)))
n_cis = len(sorted_pts) - ci_idx_inc
ci_width = sorted_pts[ci_idx_inc:] - sorted_pts[:n_cis]
min_idx = np.argmin(ci_width)
hdi_min = sorted_pts[min_idx]
hdi_max = sorted_pts[min_idx + ci_idx_inc]
return hdi_min, hdi_max
| 5,342,201
|
def compare_AlphaFz(sq_amp,sq_amp_baseline):
"""
Compare the baseline alpha squared amplitude with that of a single epoch.
Parameters
----------
sq_amp: float
Alpha squared amplitude (Fz) from a single epoch
cnt_baseline: float
Baseline alpha squared amplitude (Fz)
Returns
-------
feedback_val: float
Feedback value for stimulus presentation [-1,1]
"""
relative_error = (sq_amp-sq_amp_baseline)/sq_amp_baseline
feedback_val = relative_error
if feedback_val>1:
feedback_val = 1
elif feedback_val<-1:
feedback_val = -1
return feedback_val
| 5,342,202
|
def get_filtered_acc_gas(database_year, start_year, end_year):
"""Returns gas avoided costs data
Parameters
----------
database_year: str
The year corresponding to the database that contains the avoided costs data.
Requires that year's database to have already been downloaded
using the `flexvalue downloaded-avoided-costs-data-db --year 20XX` command.
start_year: int
Which year to start the filter of avoided costs data
end_year: int
Which year to end the filter of avoided costs data
Returns
-------
pd.DataFrame
"""
columns = [
"year",
"month",
*ACC_COMPONENTS_GAS,
]
columns_str = ", ".join(columns)
sql_str = f"""
SELECT *
FROM acc_gas
WHERE year >= {start_year}
AND year <= {end_year}
"""
con = get_db_connection(database_year=database_year)
return pd.read_sql(sql_str, con=con)
| 5,342,203
|
def test_datetime_dtype_conversion(test_df):
"""Test converting dates to datetime dtype."""
correct_dtype = np.dtype("<M8[ns]")
date_column_type = test_df.adni.standard_dates()["Acq Date"].dtype
assert correct_dtype == date_column_type
| 5,342,204
|
def enable_object_storage_access(application_name, instance_id, region,
patterns):
"""
Enable object storage (S3) read-only access for the given instance to
resources matching the given patterns.
"""
log('Enabling object storage (S3) read for instance {} of '
'application {} in region {}',
instance_id, application_name, region)
policy_name = 's3-read'
if patterns:
policy_name = _restrict_policy_for_app(policy_name,
application_name,
patterns)
policy_arn = _get_policy_arn(policy_name)
role_name = _get_role_name(application_name, instance_id, region)
_ensure_policy(policy_name)
_attach_policy(policy_arn, role_name)
if patterns:
_add_app_entity(application_name, 'policy', policy_arn)
| 5,342,205
|
def FindZ(outContoursPolygons, in_raster, tempWs, sessid):
""" Use the point within the polygon to determine the low and high
sides of the polygon"""
outEVT = os.path.join(tempWs,'outEVT' + sessid)
outEVTjoinedLayer = os.path.join(tempWs,'outEVTjoinedLayer')
outPolygPoints = os.path.join(tempWs,'outPolygPoints' + sessid)
arcpy.AddMessage(" FeatureToPoint_management...")
try:
arcpy.FeatureToPoint_management(outContoursPolygons,
outPolygPoints, 'INSIDE')
except:
if arcpy.Describe(
outContoursPolygons).spatialReference.name == 'Unknown':
arcpy.AddError('This might be caused by data with '+
'Unknown spatial reference.' +
' Define a projection and re-run')
sys.exit()
arcpy.AddMessage(" ExtractValuesToPoints...")
ExtractValuesToPoints(outPolygPoints, in_raster, outEVT,
'NONE', 'ALL')
arcpy.MakeFeatureLayer_management(outContoursPolygons,
outEVTjoinedLayer)
arcpy.AddMessage(" MakeFeatureLayer_management...")
descFlayer = arcpy.Describe(outEVTjoinedLayer)
descOutEVT = arcpy.Describe(outEVT)
arcpy.AddMessage(" AddJoin_management...")
arcpy.AddJoin_management(outEVTjoinedLayer, descFlayer.OIDFieldName,
outEVT, descOutEVT.OIDFieldName, 'KEEP_ALL')
return outEVTjoinedLayer, outEVT, outPolygPoints
| 5,342,206
|
def emr_app(config_name):
""" Application Factories
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(app_config[config_name])
handler = RotatingFileHandler('emr.log')
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.wsgi_app = ProxyFix(app.wsgi_app)
login_manager.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
assets.init_app(app)
# Blueprints
app.register_blueprint(default_blueprint)
app.register_blueprint(dashboard_blueprint)
app.register_blueprint(resources_blueprint)
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
with app.app_context():
db.create_all()
return app
| 5,342,207
|
def _split_datetime_from_line(line):
"""Docker timestamps are in RFC3339 format: 2015-08-03T09:12:43.143757463Z, with everything up to the first space
being the timestamp.
"""
log_line = line
dt = datetime.datetime.utcnow()
pos = line.find(" ")
if pos > 0:
dt = scalyr_util.rfc3339_to_datetime(line[0:pos])
log_line = line[pos + 1 :]
return (dt, log_line)
| 5,342,208
|
def set_signal_winch(handler):
""" return the old signal handler """
global winch_handler
old_handler=winch_handler
winch_handler=handler
return old_handler
| 5,342,209
|
def entry_text_to_file(
args: argparse.Namespace,
entry: Dict) -> Tuple[Optional[Text], Optional[Text]]:
"""Extract the entry content and write it to the proper file.
Return the wrapped HTML"""
filename = safe_filename(entry.get('title', str(uuid.uuid4())))
html_data = create_html(entry)
full_filename = os.path.join(os.path.join(args.store_path, "download"), filename + ".html")
with open(full_filename, "w") as html_file:
html_file.write(html_data)
return full_filename, html_data
| 5,342,210
|
def visdom_loss_handler(modules_dict, model_name):
"""
Attaches plots and metrics to trainer.
This handler creates or connects to an environment on a running Visdom dashboard and creates a line plot that tracks the loss function of a
training loop as a function of the number of iterations. This can be attached to an Ignite Engine, and the training closure must
have 'loss' as one of the keys in its return dict for this plot to be made.
See documentation for Ignite (https://github.com/pytorch/ignite) and Visdom (https://github.com/facebookresearch/visdom) for more information.
"""
tim = Timer()
tim.attach( trainer,
start=Events.STARTED,
step=Events.ITERATION_COMPLETED,
)
vis = visdom.Visdom(env=environment)
def create_plot_window(vis, xlabel, ylabel, title):
return vis.line(X=np.array([1]), Y=np.array([np.nan]), opts=dict(xlabel=xlabel, ylabel=ylabel, title=title))
train_loss_window = create_plot_window(vis, '#Iterations', 'Loss', description)
log_interval = 10
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
iter = (engine.state.iteration -1)
if iter % log_interval == 0:
logging.info("Epoch[{}] Iteration: {} Time: {} Loss: {:.2f}".format(
engine.state.epoch, iter, str(datetime.timedelta(seconds=int(tim.value()))), engine.state.output
))
vis.line(X=np.array([engine.state.iteration]),
Y=np.array([engine.state.output]),
update='append',
win=train_loss_window)
save_interval = 50
handler = ModelCheckpoint('/tmp/models', model_name, save_interval = save_interval, n_saved=5, create_dir=True, require_empty=False)
trainer.add_event_handler(Events.ITERATION_COMPLETED, handler, modules_dict)
| 5,342,211
|
def MolToQPixmap(mol, size=(300, 300), kekulize=True, wedgeBonds=True, fitImage=False, options=None,
**kwargs):
""" Generates a drawing of a molecule on a Qt QPixmap
"""
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.qtCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor = None
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
drawer.AddMol(mol, **kwargs)
canvas.flush()
return canvas.pixmap
| 5,342,212
|
def _unpad(string: str) -> str:
"""Un-pad string."""
return string[: -ord(string[len(string) - 1 :])]
| 5,342,213
|
def copy_dir_to_target(source_directory: Path, destination_directory: Path) -> bool:
"""
Args:
source_directory: a folder to copy
destination_directory: the parent directory to copy source_directory into
Returns: True if copy was successful, False otherwise
"""
if source_directory.exists() and source_directory.is_dir():
print("Found directory at %s" % source_directory.resolve())
else:
print("Unable to find required folder, looked at %s" % source_directory.resolve())
return False
print("Copying to %s" % destination_directory)
shutil.copytree(str(source_directory), str(destination_directory / source_directory.name))
return True
| 5,342,214
|
def vlanlist_to_config(vlan_list, first_line_len=48, other_line_len=44, min_grouping_size=3):
"""Given a List of VLANs, build the IOS-like vlan list of configurations.
Args:
vlan_list (list): Unsorted list of vlan integers.
first_line_len (int, optional): The maximum length of the line of the first element of within the return list. Defaults to 48.
other_line_len (int, optional): The maximum length of the line of all other elements of within the return list. Defaults to 44.
min_grouping_size (int, optional): The minimum consecutive VLANs to aggregate with a hyphen . Defaults to Cisco's minimum grouping size of 3.
Returns:
list: Sorted string list of integers according to IOS-like vlan list rules
Example:
>>> from netutils.vlan import vlanlist_to_config
>>> vlanlist_to_config([1, 2, 3, 5, 6, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018])
['1-3,5,6,1000,1002,1004,1006,1008,1010,1012,1014', '1016,1018']
>>> vlanlist_to_config([1,3,5,6,100,101,102,103,104,105,107,109], min_grouping_size=2)
['1,3,5-6,100-105,107,109']
>>> vlanlist_to_config([1,3,5,6,100,101,102,103,104,105,107,109], min_grouping_size=1)
['1,3,5,6,100,101,102,103,104,105,107,109']
"""
def build_final_vlan_cfg(vlan_cfg):
if len(vlan_cfg) <= first_line_len:
return [vlan_cfg]
# Split VLAN config if lines are too long
first_line = re.match(f"^.{{0,{first_line_len}}}(?=,)", vlan_cfg)
vlan_cfg_lines = [first_line.group(0)]
next_lines = next_lines = re.compile(f"(?<=,).{{0,{other_line_len}}}(?=,|$)")
for line in next_lines.findall(vlan_cfg, first_line.end()):
vlan_cfg_lines.append(line)
return vlan_cfg_lines
# Fail if min_grouping_size is less than 1.
if min_grouping_size < 1:
raise ValueError("Minimum grouping size must be equal to or greater than one.")
# Sort and de-dup VLAN list
vlan_list = sorted(set(vlan_list))
# If grouping size is zero, sort, and return the config list as no other processing is required.
if min_grouping_size == 1:
return build_final_vlan_cfg(",".join([str(vlan) for vlan in vlan_list]))
# Group consecutive VLANs
vlan_groups = []
for _, vlan in groupby(enumerate(vlan_list), lambda vlan: vlan[0] - vlan[1]):
vlan_groups.append(list(map(itemgetter(1), vlan)))
# Check for invalid VLAN IDs
if vlan_list[0] < 1 or vlan_list[-1] > 4094:
raise ValueError("Valid VLAN range is 1-4094")
# Create VLAN portion of config
vlan_strings = []
for group in vlan_groups:
group_length = len(group)
group_string = f"{group[0]}"
# Compress based on grouping_size
if group_length >= min_grouping_size:
group_string += f"-{group[-1]}"
# If it does not match grouping_size, and is greater than one
elif group_length != 1:
group_string += f",{group[1]}"
vlan_strings.append(group_string)
return build_final_vlan_cfg(",".join(vlan_strings))
| 5,342,215
|
def check_type(instance=None, classinfo=None):
"""Raise an exception if object is not an instance of classinfo."""
if not isinstance(instance, classinfo):
raise TypeCheckError(
"expected an instance of {0}, got {1}".format(classinfo, instance)
)
| 5,342,216
|
def plot_forecasts(
data: pd.DataFrame,
forecasters: Iterable[str],
plots_dir: str = "./plots",
use_logx: bool = True,
figsize: Tuple[int, int] = (12, 5),
linewidth: float = 2,
):
"""Plot forecasts along with the dataset.
Also plots ``data`` or ``true_probs``, if those columns are available in
``data``.
Args:
data: A ``pd.DataFrame`` containing a set of forecasts as its columns.
Must contain columns ``time`` and each element of ``forecasters``.
forecasters: list of forecasters (columns of ``data``) to be plotted.
plots_dir: directory to save the plots.
use_logx: whether to use the log-scale on the time (x) axis.
figsize: output figure size.
linewidth: line width.
Returns: None
Saves a plot to ``{plots_dir}/forecasters.pdf``.
"""
if "all" in forecasters:
forecasters = [f for f in FORECASTERS_ALL if f in data.columns]
for name in forecasters:
assert name in data.columns, (
f"invalid forecaster name {name}. "
f"available: {data.columns.to_list()}"
)
set_theme()
colors = get_colors()
normal_colors = colors[:3] + colors[4:7] + colors[8:] # remove red, gray
plt.figure(figsize=figsize, facecolor="white")
if "true_probs" in data.columns:
plt.scatter(data.time, data.true_probs, marker=".", alpha=0.7,
color=colors[7], label=r"reality ($r_t$)")
elif "data" in data.columns:
plt.scatter(data.time, data.data, marker=".", alpha=0.7,
color=colors[7], label=r"data ($y_t$)")
for i, name in enumerate(forecasters):
plt.plot(data.time, data[name], linewidth=linewidth, alpha=0.9,
color=normal_colors[i % len(normal_colors)], label=name)
plt.title("Forecasters", fontweight="regular", fontsize="13")
plt.xlabel("Time")
plt.ylabel("Probability/Outcome")
plt.ylim(-0.05, 1.05)
if use_logx:
plt.xscale("log")
plt.xlim(10, len(data))
plt.legend(loc="lower right", bbox_to_anchor=(1.15, 0), frameon=False)
plt.tight_layout()
if plots_dir:
os.makedirs(plots_dir, exist_ok=True)
plt.savefig(os.path.join(plots_dir, "forecasters.pdf"))
| 5,342,217
|
def test_part_1():
"""
Result should be: 40
"""
result = part_1(TEST_INPUT_FILE)
assert result == 40
| 5,342,218
|
async def process_challenge(bot:NoneBot, ctx:Context_T, ch:ParseResult):
"""
处理一条报刀 需要保证challenge['flag']的正确性
"""
bm = BattleMaster(ctx['group_id'])
now = datetime.now() - timedelta(days=ch.get('dayoffset', 0))
clan = _check_clan(bm)
mem = _check_member(bm, ch.uid, ch.alt)
cur_round, cur_boss, cur_hp = bm.get_challenge_progress(1, now)
round_ = ch.round or cur_round
boss = ch.boss or cur_boss
damage = ch.damage if ch.flag != BattleMaster.LAST else (ch.damage or cur_hp)
flag = ch.flag
if (ch.flag == BattleMaster.LAST) and (ch.round or ch.boss) and (not damage):
raise NotFoundError('补报尾刀请给出伤害值') # 补报尾刀必须给出伤害值
msg = ['']
# 上一刀如果是尾刀,这一刀就是补偿刀
challenges = bm.list_challenge_of_user_of_day(mem['uid'], mem['alt'], now)
if len(challenges) > 0 and challenges[-1]['flag'] == BattleMaster.LAST:
flag = BattleMaster.EXT
msg.append('⚠️已自动标记为补时刀')
if round_ != cur_round or boss != cur_boss:
msg.append('⚠️上报与当前进度不一致')
else: # 伤害校对
eps = 30000
if damage > cur_hp + eps:
damage = cur_hp
msg.append(f'⚠️过度虐杀 伤害数值已自动修正为{damage}')
if flag == BattleMaster.NORM:
flag = BattleMaster.LAST
msg.append('⚠️已自动标记为尾刀')
elif flag == BattleMaster.LAST:
if damage < cur_hp - eps:
msg.append('⚠️尾刀伤害不足 请未报刀成员及时上报')
elif damage < cur_hp:
if damage % 1000 == 0:
damage = cur_hp
msg.append(f'⚠️尾刀伤害已自动修正为{damage}')
else:
msg.append('⚠️Boss仍有少量残留血量')
eid = bm.add_challenge(mem['uid'], mem['alt'], round_, boss, damage, flag, now)
aft_round, aft_boss, aft_hp = bm.get_challenge_progress(1, now)
max_hp, score_rate = bm.get_boss_info(aft_round, aft_boss, clan['server'])
msg.append(f"记录编号E{eid}:\n{mem['name']}给予{round_}周目{bm.int2kanji(boss)}王{damage:,d}点伤害\n")
msg.append(_gen_progress_text(clan['name'], aft_round, aft_boss, aft_hp, max_hp, score_rate))
await bot.send(ctx, '\n'.join(msg), at_sender=True)
# 判断是否更换boss,呼叫预约
if aft_round != cur_round or aft_boss != cur_boss:
await call_subscribe(bot, ctx, aft_round, aft_boss)
await auto_unlock_boss(bot, ctx, bm)
await auto_unsubscribe(bot, ctx, bm.group, mem['uid'], boss)
| 5,342,219
|
def get_next_url(bundle: dict) -> Optional[str]:
"""
Returns the URL for the next page of a paginated ``bundle``.
>>> bundle = {
... 'link': [
... {'relation': 'self', 'url': 'https://example.com/page/2'},
... {'relation': 'next', 'url': 'https://example.com/page/3'},
... {'relation': 'previous', 'url': 'https://example.com/page/1'},
... ]
... }
>>> get_next_url(bundle)
'https://example.com/page/3'
>>> bundle = {
... 'link': [
... {'relation': 'self', 'url': 'https://example.com/page/1'},
... ]
... }
>>> type(get_next_url(bundle))
<class 'NoneType'>
"""
if 'link' in bundle:
for link in bundle['link']:
if link['relation'] == 'next':
return link['url']
| 5,342,220
|
def convert_sklearn_variance_threshold(operator, device, extra_config):
"""
Converter for `sklearn.feature_selection.VarianceThreshold`.
Args:
operator: An operator wrapping a `sklearn.feature_selection.VarianceThreshold` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
var = operator.raw_operator.variances_
threshold = operator.raw_operator.threshold
indices = np.array([i for i in range(len(var)) if var[i] > threshold])
return ArrayFeatureExtractor(np.ascontiguousarray(indices), device)
| 5,342,221
|
def test_crop_generator(input_path, batch_size=1, mode="test", num_classes =6, epsilon = 0, resize_params = (224, 224), do_shuffle=True):
"""
Simple data generator that reads all images based on mode, picks up corresponding time series, returns entire list
"""
data_path = os.path.join(input_path, mode)
all_images = glob.glob(os.path.join(data_path, "**/*.jpg"))
print("Found {} files for {}".format(len(all_images), mode))
if do_shuffle:
shuffle(all_images)
curr_idx = 0
while curr_idx < len(all_images):
# create random batches first
#batch_paths = np.random.choice(a= all_images, size = batch_size)
# initialize our batches of images and labels
#print(all_images[curr_idx])
imgs = []
ts = []
labels = []
curr_batch = all_images[curr_idx]
imgs, ts, labels = preprocess_test_img(image_path= curr_batch, class_names = [0,1,2,3,4,5], num_classes = num_classes, epsilon = epsilon, resize_width_and_height=resize_params, mode=mode)
imgs = np.array(imgs)
ts = np.array(ts)
labels = np.array(labels)
curr_idx += batch_size
yield ([imgs, ts], labels, curr_batch)
| 5,342,222
|
def gradient(okay=0.25, warn=0.75, fail=1.0, count=32):
"""
Generate a gradient of *count* steps representing values between 0.0 and
1.0. Until the *okay* value, the gradient is pure green. Until the *warn*
value it gradually fades to orange. As the value approaches *fail*, it
fades to red, and above *fail* it remains red until the value 1.0.
"""
warn_gradient = list(Color('green').gradient(Color('orange'), steps=count))
fail_gradient = list(Color('orange').gradient(Color('red'), steps=count))
for step in range(count):
value = step / count
if value < okay:
yield Color('green')
elif value < warn:
yield warn_gradient[int(count * (value - okay) / (warn - okay))]
elif value < fail:
yield fail_gradient[int(count * (value - warn) / (fail - warn))]
else:
yield Color('red')
| 5,342,223
|
def query_merchant_users(bc_app, merchant=None, start_time=None, end_time=None):
"""
query merchant users
:param bc_app: beecloud.entity.BCApp
:param merchant: merchant account, if not passed, only users associated with app will be returned
:param start_time: if passed, only users registered after it will be returned
:param end_time: if passed, only users registered before it will be returned
:return: result contains beecloud.entity.MerchantUser list
"""
req_param = _TmpObject()
if merchant:
req_param.email = merchant
if start_time:
req_param.start_time = start_time
if end_time:
req_param.end_time = end_time
attach_app_sign(req_param, BCReqType.QUERY, bc_app)
url = get_rest_root_url() + 'rest/users?para=' + obj_to_quote_str(req_param)
tmp_resp = http_get(url, bc_app.timeout)
# if err encountered, [0] equals 0
if not tmp_resp[0]:
return tmp_resp[1]
# [1] contains result dict
resp_dict = tmp_resp[1]
bc_result = BCResult()
set_common_attr(resp_dict, bc_result)
if not bc_result.result_code:
user_dict_arr = resp_dict.get('users')
class_name = BCMerchantUser
users = []
if user_dict_arr:
users = [parse_dict_to_obj(user_dict, class_name) for user_dict in user_dict_arr]
bc_result.users = users
return bc_result
| 5,342,224
|
def getUIQM(x):
"""
Function to return UIQM to be called from other programs
x: image
"""
x = x.astype(np.float32)
### UCIQE: https://ieeexplore.ieee.org/abstract/document/7300447
#c1 = 0.4680; c2 = 0.2745; c3 = 0.2576
### UIQM https://ieeexplore.ieee.org/abstract/document/7305804
c1 = 0.0282; c2 = 0.2953; c3 = 3.5753
uicm = _uicm(x)
uism = _uism(x)
uiconm = _uiconm(x, 10)
uiqm = (c1*uicm) + (c2*uism) + (c3*uiconm)
return uiqm
| 5,342,225
|
def cart2spher(x, y, z):
"""Cartesian to Spherical coordinate conversion."""
hxy = np.hypot(x, y)
rho = np.hypot(hxy, z)
#if not rho:
# return np.array([0,0,0])
theta = np.arctan2(hxy, z)
phi = np.arctan2(y, x)
return rho, theta, phi
| 5,342,226
|
def get_mean_and_stdv(dataset):
"""return means and standard
deviations along 0th axis of tensor"""
means = dataset.mean(0)
stdvs = dataset.std(0)
return means, stdvs
| 5,342,227
|
async def test_host_already_configured(hass, auth_error):
"""Test host already configured."""
entry = MockConfigEntry(domain=mikrotik.DOMAIN, data=DEMO_CONFIG_ENTRY)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
mikrotik.DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=DEMO_USER_INPUT
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
| 5,342,228
|
def create_incident(session: RemedySession, incident_request: dict) -> None:
"""Create Remedy incident and modify status if required"""
return_fields = ["Incident Number", "Request ID"]
incident_data = incident_request.get("values", {})
# logging.info(json.dumps(incident_request, indent=4))
# Create the base incident
_, return_data = session.create_entry(
rest_config.get("remedyCreateForm",
"HPD:IncidentInterface_Create"), incident_request, return_fields
)
values = return_data.get("values", {})
incident_number = values.get("Incident Number")
incident_id = values.get("Request ID")
logging.info(
f" +-- Incident {incident_number} created with status Assigned ({incident_id})"
)
if not incident_number:
raise RemedyException("Failed to create incident")
status = incident_data.get("Status", "")
if status in ["In Progress", "Pending"]:
update_incident_status(incident_number, session, status, incident_data)
| 5,342,229
|
def getBuildRequirements():
"""
Returns a list of essential packages needed for a minimalistic
software installation tree (SIT).
Opposed to getMinRequirements() this contains everything needed to
build software packages.
"""
from ToolBOSCore.Settings.ToolBOSConf import getConfigOption
result = getConfigOption( 'SIT_bootstrapFull' )
Any.requireIsListNonEmpty( result )
return result
| 5,342,230
|
def nms_3d(boxes, scores, nms_threshold):
"""
包装一下nms_gpu,该函数接收的数据维度是y1,x1,y2,x2,z1,z2 ,sores
:param boxes: tensor [n,(y1,x1,z1,y2,x2,z2)]
:param scores: tensor [n]
:param nms_threshold: 浮点标量
:return: keep: nms后保留的索引 tensor [m]
"""
# [n,(y1,x1,z1,y2,x2,z2)] => [n,(y1,x1,y2,x2,z1,z2 ,sores)], axis swapped for nms
box_with_score = torch.cat((boxes[:, [0, 1, 3, 4, 2, 5]], scores.unsqueeze(-1)), -1)
keep = nms_gpu(box_with_score, nms_threshold)
return keep
| 5,342,231
|
def report_install_status(ctx, op_id):
"""
:param ctx: CSM Context object
:param op_id: operational ID
Peeks into the install log to see if the install operation is successful or not
"""
failed_oper = r'Install operation {} aborted'.format(op_id)
output = ctx.send("show install log {} detail".format(op_id))
status, message = match_pattern(ctx.pattern, output)
report_log(ctx, status, message)
if re.search(failed_oper, output):
log_install_errors(ctx, output)
ctx.error("Operation {} failed".format(op_id))
return False
ctx.info("Operation {} finished successfully".format(op_id))
return True
| 5,342,232
|
def spltime(tseconds):
""" This gets the time in hours, mins and seconds """
hours = tseconds // 3600
minutes = int(tseconds / 60) % 60
seconds = tseconds % 60
return hours, minutes, seconds
| 5,342,233
|
def create_test_player_data(username: str):
"""
Creates all data in database for test users to be able to fight in battle.
Creates test user's deck and all data needed to create deck.
:param username: Test player username, must be unique.
:return: None
"""
# Create test User
test_user = User.objects.create_user(f"{username}", f"{username}@company.com")
# Create test UserProfile
test_user_profile1 = UserProfile.objects.create(user=test_user, displayedUsername=f"{username}",
semester=Semester.objects.get(pk=1))
# Create test UserCards (assigning test user 5 cards)
random_cards = random.sample(list(Card.objects.all()), k=5)
test_user_cards = []
for i in range(5):
test_user_card = test_user_profile1.user_cards.create(card=random_cards[i])
test_user_cards.append(test_user_card)
# Create test Deck
test_deck = Deck.objects.create(card1=test_user_cards[0], card2=test_user_cards[1],
card3=test_user_cards[2], card4=test_user_cards[3],
card5=test_user_cards[4])
# Create test UserDeck
test_user_deck = test_user_profile1.user_decks.create(deck_number=1, deck=test_deck)
| 5,342,234
|
def show_many_wrongs_mask(truths,
preds) -> None:
"""
Shows all true/wrong mask pairs.
TODO: Will have to create a limit / range otherwise it will be too big.
:return: None. Just shows image.
"""
assert len(truths) == len(preds) # Make sure same number of images in both
n_images = len(truths)
mask_value = 1 # This should be global-like...
dice_vals = []
fig, axs = plt.subplots(nrows = n_images,
ncols = 3,
sharex = True,
sharey = True)
for row in range(n_images):
truth = truths[row]
predicted = preds[row]
dice = np.sum(predicted[truth==mask_value])*2.0 / (np.sum(predicted) + np.sum(truth))
dice_vals.append(dice)
if row == 0:
axs[row, 0].set_title("True")
axs[row, 1].set_title("Predicted")
axs[row, 2].set_title("Wrong_mask")
wrong_mask = truth - predicted
axs[row, 0].imshow(truth, cmap = "gray")
axs[row, 0].axis('off')
axs[row, 1].imshow(predicted, cmap = "gray")
axs[row, 1].axis('off')
axs[row, 2].imshow(wrong_mask, cmap = "gray")
axs[row, 2].axis('off')
avg_dice = round(np.array(dice_vals).mean(), 2)
the_title = f"True, Predicted Comparison. Avg Dice: {avg_dice}"
fig.suptitle(the_title)
plt.show()
| 5,342,235
|
def _getShortName(filePath, classPath):
"""Returns the shortest reference to a class within a file.
Args:
filePath: file path relative to the library root path (e.g., `Buildings/package.mo`).
classPath: full library path of the class to be shortened (e.g., `Buildings.Class`).
"""
pos = re.search(r'\w', filePath).start()
splFil = filePath[pos:].split(os.path.sep)
splCla = classPath.split(".")
shortSource = None
for i in range(min(len(splFil), len(splCla))):
if splFil[i] != splCla[i]:
# See https://github.com/lbl-srg/BuildingsPy/issues/382 for the rationale
# behind the code below.
idx_start = i
if i > 0:
for k in range(i + 1, len(splFil)):
lookup_path = os.path.sep.join(splFil[:k])
if splCla[i] in [re.sub(r'\.mo', '', el) for el in os.listdir(lookup_path)]:
idx_start = i - 1
break
shortSource = '.'.join(splCla[idx_start:len(splCla)])
# shortSource starts with a space as instance names are
# preceded with a space.
shortSource = ' ' + shortSource
break
return shortSource
| 5,342,236
|
def mine():
"""
This function will try to mine a new block by joining the negotiation process.
If we are the winner address, we will check the negotiation winner of the neighbour nodes, to grant that no node
has a different winner address.
"""
if blockchain is not None:
last_block = blockchain.chain[-1]
# Candidate to the negotiation and execute the negotiation algorithm
winner, negotiation_price = blockchain.proof_of_negotiation()
# If we won the negotiation, then start validating the block
if winner.address != blockchain.node_id:
return jsonify({"winner": winner.url, "won": False}), 200
# We must receive a reward for winning the reputation.
blockchain.submit_transaction(
sender_address=MINING_SENDER, recipient_address=blockchain.node_id,
value=MINING_REWARD, signature="", timestamp=time()
)
# Forge the new Block by adding it to the chain
previous_hash = blockchain.hash(last_block)
block = blockchain.create_block(
previous_hash=previous_hash,
validator_address=blockchain.node_id,
negotiation_price=negotiation_price
)
# Broadcast the new chain
print("Sending the new chain in broadcast...")
for node_url in blockchain.nodes:
print(node_url + '/nodes/resolve')
try:
requests.get(node_url + '/nodes/resolve')
except requests.exceptions.RequestException:
print("Node with url '" + node_url + "' isn't connected or doesn't exist anymore.")
print("New chain broadcast completed successfully!")
# Check if the validator of the last block is the same as the neighbour nodes
for node_url in blockchain.nodes:
print(node_url + '/chain')
try:
neighbour_chain = requests.get(node_url + '/chain').json()["chain"]
except requests.exceptions.RequestException:
print("Node with url '" + node_url + "' isn't connected or doesn't exist anymore.")
continue # skip the current iteration if we can't connect with the node
validator_address = neighbour_chain[-1]["validator"]
# If the address of the validator of the last block is different from the winner address, decrease the
# reputation of the neighbour, because the node tried to put false negotiation winner in the last block
if validator_address != winner.address:
blockchain.change_reputation(
node_address=blockchain.nodes[node_url].address,
change_lvl=INVALID_CHAIN_GRAVITY
)
response = {
'message': "New Block Forged",
'block_number': block['block_number'],
'transactions': block['transactions'],
'validator': block['validator'],
'previous_hash': block['previous_hash']
}
return jsonify(response), 200
else:
response = {'message': 'Blockchain hasn\'t been initialized yet!'}
return jsonify(response), 400
| 5,342,237
|
def test_local_optimizer_commutable_circuit_U_example_4(U):
"""Us shouldn't merge because they are operating on different qubits."""
local_optimizer = _optimize.LocalOptimizer(m=10)
backend = DummyEngine(save_commands=True)
eng = MainEngine(backend=backend, engine_list=[local_optimizer])
qb0 = eng.allocate_qubit()
qb1 = eng.allocate_qubit()
U(0.1) | qb0
H | qb0
CNOT | (qb1, qb0)
H | qb0
U(0.2) | qb1
eng.flush()
received_commands = []
# Remove Allocate and Deallocate gates
for cmd in backend.received_commands:
if not (isinstance(cmd.gate, FastForwardingGate) or
isinstance(cmd.gate, ClassicalInstructionGate)):
received_commands.append(cmd)
assert received_commands[0].gate == U(0.1)
assert len(received_commands) == 5
| 5,342,238
|
def test_user_create_token_view(client, user_data):
""" Teste na view que gera um token JWT """
user = get_user_model().objects.create_user(**user_data)
assert user.email == 'admin@email.com'
url = reverse('authentication:obtain_token')
data = {"email": f"{user_data['email']}", "password": f"{user_data['password']}"}
response = client.post(url, data)
assert response.status_code == 200
assert 'token' in response.data
| 5,342,239
|
def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0):
"""
Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
.. seealso:: :doc:`dns`
"""
return get_hub().resolver.getaddrinfo(host, port, family, socktype, proto, flags)
| 5,342,240
|
def export_to_colmap_points3d_txt(colmap_points3d_filepath: str,
colmap_image_ids: Dict[str, int],
points3d: kapture.Points3d = None,
observations: kapture.Observations = None) -> None:
"""
Exports to colmap points3d text file.
:param colmap_points3d_filepath: path to colmap points3d file to be writen.
:param colmap_image_ids: correspondences between kapture image id (image path) and colmap image id
:param points3d: kapture points3d to export
:param observations: kapture observations to export
"""
assert isinstance(points3d, kapture.Points3d) or points3d is None
assert isinstance(observations, kapture.Observations) or observations is None
assert isinstance(colmap_image_ids, dict)
points3d_colmap_header = '# 3D point list with one line of data per point:\n' \
'# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n'
with open(colmap_points3d_filepath, 'w') as fid:
fid.write(points3d_colmap_header)
if points3d:
for i in range(points3d.shape[0]):
point3d = points3d[i]
line = '{} {} {} {} {} {} {} 0'.format(i,
point3d[0], point3d[1], point3d[2],
int(point3d[3]), int(point3d[4]), int(point3d[5]))
if observations is not None and i in observations and len(observations[i]) > 0:
line += ' '
pairs = [(str(colmap_image_ids[name]), str(keypoint_id)) for name, keypoint_id in observations[i]]
line += ' '.join([str(s) for s in list(sum(pairs, ()))])
line += '\n'
fid.write(line)
| 5,342,241
|
def create_test_data():
"""Load unit test data in the dev/local environment. Delete all existing test data as a first step."""
execute_script(db.session, 'test_data/postgres_test_reset.sql')
execute_script(db.session, 'test_data/postgres_create_first.sql')
filenames = os.listdir(os.path.join(os.getcwd(), 'test_data/postgres_data_files'))
sorted_names = sorted(filenames)
for filename in sorted_names:
execute_script(db.session, os.path.join(os.getcwd(), ('test_data/postgres_data_files/' + filename)))
| 5,342,242
|
def atomic_log(using=None):
"""
Decorator that surrounds atomic block, ensures that logged output requests will be stored inside database in case
of DB rollback
"""
if callable(using):
return AtomicLog(DEFAULT_DB_ALIAS)(using)
else:
return AtomicLog(using)
| 5,342,243
|
def _readConfigFile(config_file, verbose):
"""Read configuration file options into a dictionary."""
if not os.path.exists(config_file):
raise RuntimeError("Couldn't open configuration file '%s'." % config_file)
try:
import imp
conf = {}
configmodule = imp.load_source("configuration_module", config_file)
for k, v in vars(configmodule).items():
if k.startswith("__"):
continue
elif isfunction(v):
continue
conf[k] = v
except Exception, e:
# if verbose >= 1:
# traceback.print_exc()
exceptioninfo = traceback.format_exception_only(sys.exc_type, sys.exc_value) #@UndefinedVariable
exceptiontext = ""
for einfo in exceptioninfo:
exceptiontext += einfo + "\n"
# raise RuntimeError("Failed to read configuration file: " + config_file + "\nDue to " + exceptiontext)
print >>sys.stderr, "Failed to read configuration file: " + config_file + "\nDue to " + exceptiontext
raise
return conf
| 5,342,244
|
def test_labware_rows_by_name(
decoy: Decoy,
engine_client: ProtocolEngineClient,
subject: Labware,
) -> None:
"""It should return the labware's wells as dictionary of rows."""
decoy.when(
engine_client.state.labware.get_wells(labware_id="labware-id")
).then_return(["A1", "A2"])
decoy.when(
engine_client.state.labware.get_well_rows(labware_id="labware-id")
).then_return(
{"A": ["A1", "A2"]},
)
assert subject.rows_by_name() == {
"A": [
Well(well_name="A1", engine_client=engine_client, labware=subject),
Well(well_name="A2", engine_client=engine_client, labware=subject),
]
}
| 5,342,245
|
def run_street_queries(es, params_list, queries, formats):
"""Punto de entrada del módulo 'street.py'. Toma una lista de consultas de
calles y las ejecuta, devolviendo los resultados QueryResult.
Args:
es (Elasticsearch): Conexión a Elasticsearch.
params_list (list): Lista de ParametersParseResult.
queries (list): Lista de búsquedas, generadas a partir de
'params_list'.
formats (list): Lista de parámetros de formato de cada búsqueda, en
forma de diccionario.
Returns:
list: Lista de QueryResult, una por cada búsqueda.
"""
searches = []
for query, fmt in zip(queries, formats):
processed_query = query.copy()
if N.FULL_NAME in fmt[N.FIELDS]:
# La nomenclatura incluye el nombre de la provincia y del depto.,
# agregar esos campos a la query para luego poder extraer sus
# nombres.
processed_query['fields'] += (N.STATE, N.DEPT)
searches.append(data.StreetsSearch(processed_query))
data.ElasticsearchSearch.run_searches(es, searches)
for search, fmt in zip(searches, formats):
if N.FULL_NAME in fmt[N.FIELDS]:
# Agregar nomenclatura a cada hit del resultado.
for hit in search.result.hits:
full_name = '{}, {}, {}'.format(
hit[N.NAME], hit[N.DEPT][N.NAME], hit[N.STATE][N.NAME]
)
hit[N.FULL_NAME] = full_name
return [
QueryResult.from_entity_list(search.result.hits,
params.received_values(),
search.result.total,
search.result.offset)
for search, params in zip(searches, params_list)
]
| 5,342,246
|
def draw_text(text, bgcolor, plt_ax, text_plt):
"""
Render the text
:param str text: text to render
:param str bgcolor: backgroundcolor used to render text
:param matplotlib.axes.Axes plt_ax: figure sub plot instance
:param matplotlib.text.Text text_plt: plot of text
:return matplotlib.text.Text: updated plot of text
"""
if text_plt is None:
# render text with color
text_plt = plt_ax.text(0.95, 0.95, text, backgroundcolor=bgcolor,
horizontalalignment='right', verticalalignment='top',
transform=plt_ax.transAxes, fontsize=10)
else:
# update existing text
text_plt.set_text(text)
return text_plt
| 5,342,247
|
def compute_95confidence_intervals(
record, episode, num_episodes, store_accuracies, metrics=["AccuracyNovel",]
):
"""Computes the 95% confidence interval for the novel class accuracy."""
if episode == 0:
store_accuracies = {metric: [] for metric in metrics}
for metric in metrics:
store_accuracies[metric].append(record[metric])
if episode == (num_episodes - 1):
# Compute std and confidence interval of the 'metric' accuracies.
accuracies = np.array(store_accuracies[metric])
stds = np.std(accuracies, 0)
record[metric + "_std"] = stds
record[metric + "_cnf"] = 1.96 * stds / np.sqrt(num_episodes)
return record, store_accuracies
| 5,342,248
|
def _pytype(dtype):
""" return a python type for a numpy object """
if dtype in ("int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64"):
return int
elif dtype in ("float16", "float32", "float64", "float128"):
return float
elif dtype in ("complex64", "complex128", "complex256"):
return complex
else:
raise TypeError("not a recognized dtype: {0}".format(dtype))
| 5,342,249
|
def pca(x, output_dim, dtype, name=None):
"""Computes pca on the dataset using biased covariance.
The pca analyzer computes output_dim orthonormal vectors that capture
directions/axes corresponding to the highest variances in the input vectors of
x. The output vectors are returned as a rank-2 tensor with shape
(input_dim, output_dim), where the 0th dimension are the components of each
output vector, and the 1st dimension are the output vectors representing
orthogonal directions in the input space, sorted in order of decreasing
variances.
The output rank-2 tensor (matrix) serves a useful transform purpose. Formally,
the matrix can be used downstream in the transform step by multiplying it to
the input tensor x. This transform reduces the dimension of input vectors to
output_dim in a way that retains the maximal variance.
NOTE: To properly use PCA, input vector components should be converted to
similar units of measurement such that the vectors represent a Euclidean
space. If no such conversion is available (e.g. one element represents time,
another element distance), the canonical approach is to first apply a
transformation to the input data to normalize numerical variances, i.e.
tft.scale_to_z_score(). Normalization allows PCA to choose output axes that
help decorrelate input axes.
Below are a couple intuitive examples of PCA.
Consider a simple 2-dimensional example:
Input x is a series of vectors [e, e] where e is Gaussian with mean 0,
variance 1. The two components are perfectly correlated, and the resulting
covariance matrix is
[[1 1],
[1 1]].
Applying PCA with output_dim = 1 would discover the first principal component
[1 / sqrt(2), 1 / sqrt(2)]. When multipled to the original example, each
vector [e, e] would be mapped to a scalar sqrt(2) * e. The second principal
component would be [-1 / sqrt(2), 1 / sqrt(2)] and would map [e, e] to 0,
which indicates that the second component captures no variance at all. This
agrees with our intuition since we know that the two axes in the input are
perfectly correlated and can be fully explained by a single scalar e.
Consider a 3-dimensional example:
Input x is a series of vectors [a, a, b], where a is a zero-mean, unit
variance Gaussian. b is a zero-mean, variance 4 Gaussian and is independent of
a. The first principal component of the unnormalized vector would be [0, 0, 1]
since b has a much larger variance than any linear combination of the first
two components. This would map [a, a, b] onto b, asserting that the axis with
highest energy is the third component. While this may be the desired
output if a and b correspond to the same units, it is not statistically
desireable when the units are irreconciliable. In such a case, one should
first normalize each component to unit variance first, i.e. b := b / 2.
The first principal component of a normalized vector would yield
[1 / sqrt(2), 1 / sqrt(2), 0], and would map [a, a, b] to sqrt(2) * a. The
second component would be [0, 0, 1] and map [a, a, b] to b. As can be seen,
the benefit of normalization is that PCA would capture highly correlated
components first and collapse them into a lower dimension.
Args:
x: A rank-2 `Tensor`, 0th dim are rows, 1st dim are indices in row vectors.
output_dim: The PCA output dimension (number of eigenvectors to return).
dtype: numpy dtype of entries in the returned matrix.
name: (Optional) A name for this operation.
Raises:
ValueError: if input is not a rank-2 Tensor.
Returns:
A 2D `Tensor` (matrix) M of shape (input_dim, output_dim).
"""
if not isinstance(x, tf.Tensor):
raise TypeError('Expected a Tensor, but got %r' % x)
x.shape.assert_has_rank(2)
input_dim = x.shape.as_list()[1]
shape = (input_dim, output_dim)
spec = _PCACombinerSpec(output_dim, dtype)
return combine_analyzer(
[x], [dtype], [shape], spec,
name if name is not None else 'pca')[0]
| 5,342,250
|
def preorder(root: Node):
"""
Pre-order traversal visits root node, left subtree, right subtree.
>>> preorder(make_tree())
[1, 2, 4, 5, 3]
"""
return [root.data] + preorder(root.left) + preorder(root.right) if root else []
| 5,342,251
|
def main(argv=None):
"""Search a Cheshire3 database based on query in argv."""
global argparser, session, server, db
if argv is None:
args = argparser.parse_args()
else:
args = argparser.parse_args(argv)
session = Session()
server = SimpleServer(session, args.serverconfig)
if args.database is None:
try:
dbid = identify_database(session, os.getcwd())
except EnvironmentError as e:
server.log_critical(session, e.message)
return 1
server.log_debug(
session,
"database identifier not specified, discovered: {0}".format(dbid)
)
else:
dbid = args.database
try:
db = server.get_object(session, dbid)
except ObjectDoesNotExistException:
msg = """Cheshire3 database {0} does not exist.
Please provide a different database identifier using the --database option.
""".format(dbid)
server.log_critical(session, msg)
return 2
else:
qFac = db.get_object(session, 'defaultQueryFactory')
query = qFac.get_query(session, args.query, format=args.format)
resultSet = db.search(session, query)
return _format_resultSet(resultSet,
maximumRecords=args.maxRecs,
startRecord=args.startRec)
| 5,342,252
|
def generate_uuid() -> str:
"""
Generate UUIDs to use as `sim.base_models.Node` and `sim.base_models.Item` ids.
"""
return str(uuid.uuid4())
| 5,342,253
|
def new_key_generator():
"""Generator of new keys.
Yields: str
"""
def _rnd_key():
return ''.join(nchoice(key_chars, size=next(key_lengths)))
while True:
key = _rnd_key()
while key in storage:
key = _rnd_key()
yield key
| 5,342,254
|
def _style_mixture(which_styles, num_styles):
"""Returns a 1-D array mapping style indexes to weights."""
if not isinstance(which_styles, dict):
raise ValueError('Style mixture must be a dictionary.')
mixture = np.zeros([num_styles], dtype=np.float32)
for index in which_styles:
mixture[index] = which_styles[index]
return mixture
| 5,342,255
|
def merge_dicts(a, b):
"""combine two dictionaries, assuming components are arrays"""
result = a
for k, v in b.items():
if k not in result:
result[k] = []
result[k].extend(v)
return result
| 5,342,256
|
def test_salesforce_origin_aggregate(sdc_builder, sdc_executor, salesforce):
"""Create data using Salesforce client and then check if Salesforce origin
retrieves correct aggregate data using snapshot.
The pipeline looks like:
salesforce_origin >> trash
Args:
sdc_builder (:py:class:`streamsets.testframework.Platform`): Platform instance
sdc_executor (:py:class:`streamsets.sdk.DataCollector`): Data Collector executor instance
salesforce (:py:class:`testframework.environments.SalesforceInstance`): Salesforce environment
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
query = ("SELECT COUNT(Id), "
"MAX(NumberOfEmployees), "
"MIN(Industry), "
"SUM(AnnualRevenue) "
f'FROM Account where Name like \'{STR_15_RANDOM} %\'')
salesforce_origin = pipeline_builder.add_stage('Salesforce', type='origin')
salesforce_origin.set_attributes(soql_query=query,
use_bulk_api=False,
subscribe_for_notifications=False,
disable_query_validation=True)
trash = pipeline_builder.add_stage('Trash')
salesforce_origin >> trash
pipeline = pipeline_builder.build().configure_for_environment(salesforce)
sdc_executor.add_pipeline(pipeline)
account_ids = None
client = salesforce.client
try:
# Using Salesforce client, create rows in Contact.
logger.info('Creating rows using Salesforce client ...')
data_to_insert = [{'Name': f'{STR_15_RANDOM} 1',
'NumberOfEmployees': 1,
'Industry': 'Agriculture',
'AnnualRevenue': 123 },
{'Name': f'{STR_15_RANDOM} 2',
'NumberOfEmployees': 2,
'Industry': 'Finance',
'AnnualRevenue': 456},
{'Name': f'{STR_15_RANDOM} 3',
'NumberOfEmployees': 3,
'Industry': 'Utilities',
'AnnualRevenue': 789}]
account_ids = _get_ids(client.bulk.Account.insert(data_to_insert), 'id')
logger.info('Starting pipeline and snapshot')
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True, timeout_sec=TIMEOUT).snapshot
# There should be a single row with a count field
assert len(snapshot[salesforce_origin].output) == 1
assert snapshot[salesforce_origin].output[0].field['expr0'].value == 3
assert snapshot[salesforce_origin].output[0].field['expr1'].value == 3
assert snapshot[salesforce_origin].output[0].field['expr2'].value == 'Agriculture'
assert snapshot[salesforce_origin].output[0].field['expr3'].value == 1368
finally:
if sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING':
logger.info('Stopping pipeline')
sdc_executor.stop_pipeline(pipeline)
if account_ids:
logger.info('Deleting records ...')
client.bulk.Account.delete(account_ids)
| 5,342,257
|
def parser_content_labelling_Descriptor(data,i,length,end):
"""\
parser_content_labelling_Descriptor(data,i,length,end) -> dict(parsed descriptor elements).
This descriptor is not parsed at the moment. The dict returned is:
{ "type": "content_labelling", "contents" : unparsed_descriptor_contents }
(Defined in ETSI TS 102 323 specification)
"""
return { "type" : "content_labelling", "contents" : data[i+2:end] }
| 5,342,258
|
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (_BatchNorm, )):
return True
return False
| 5,342,259
|
def scheduler(system = system()):
"""Job scheduler for OLCF system."""
if not is_olcf_system(system):
raise RuntimeError('unknown system (' + system + ')')
return _system_params[system].scheduler
| 5,342,260
|
def simple_decoder_fn_inference(output_fn, encoder_state, embeddings,
start_of_sequence_id, end_of_sequence_id,
maximum_length, num_decoder_symbols,
dtype=dtypes.int32, name=None):
""" Simple decoder function for a sequence-to-sequence model used in the
`dynamic_rnn_decoder`.
The `simple_decoder_fn_inference` is a simple inference function for a
sequence-to-sequence model. It should be used when `dynamic_rnn_decoder` is
in the inference mode.
The `simple_decoder_fn_inference` is called with a set of the user arguments
and returns the `decoder_fn`, which can be passed to the
`dynamic_rnn_decoder`, such that
```
dynamic_fn_inference = simple_decoder_fn_inference(...)
outputs_inference, state_inference = dynamic_rnn_decoder(
decoder_fn=dynamic_fn_inference, ...)
```
Further usage can be found in the `kernel_tests/seq2seq_test.py`.
Args:
output_fn: An output function to project your `cell_output` onto class
logits.
An example of an output function;
```
tf.variable_scope("decoder") as varscope
output_fn = lambda x: layers.linear(x, num_decoder_symbols,
scope=varscope)
outputs_train, state_train = seq2seq.dynamic_rnn_decoder(...)
logits_train = output_fn(outputs_train)
varscope.reuse_variables()
logits_inference, state_inference = seq2seq.dynamic_rnn_decoder(
output_fn=output_fn, ...)
```
If `None` is supplied it will act as an identity function, which
might be wanted when using the RNNCell `OutputProjectionWrapper`.
encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
embeddings: The embeddings matrix used for the decoder sized
`[num_decoder_symbols, embedding_size]`.
start_of_sequence_id: The start of sequence ID in the decoder embeddings.
end_of_sequence_id: The end of sequence ID in the decoder embeddings.
maximum_length: The maximum allowed of time steps to decode.
num_decoder_symbols: The number of classes to decode at each time step.
dtype: (default: `dtypes.int32`) The default data type to use when
handling integer objects.
name: (default: `None`) NameScope for the decoder function;
defaults to "simple_decoder_fn_inference"
Returns:
A decoder function with the required interface of `dynamic_rnn_decoder`
intended for inference.
"""
with ops.name_scope(name, "simple_decoder_fn_inference",
[output_fn, encoder_state, embeddings,
start_of_sequence_id, end_of_sequence_id,
maximum_length, num_decoder_symbols, dtype]):
start_of_sequence_id = ops.convert_to_tensor(start_of_sequence_id, dtype)
end_of_sequence_id = ops.convert_to_tensor(end_of_sequence_id, dtype)
maximum_length = ops.convert_to_tensor(maximum_length, dtype)
num_decoder_symbols = ops.convert_to_tensor(num_decoder_symbols, dtype)
encoder_info = nest.flatten(encoder_state)[0]
batch_size = encoder_info.get_shape()[0].value
if output_fn is None:
output_fn = lambda x: x
if batch_size is None:
batch_size = array_ops.shape(encoder_info)[0]
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
""" Decoder function used in the `dynamic_rnn_decoder` with the purpose of
inference.
The main difference between this decoder function and the `decoder_fn` in
`simple_decoder_fn_train` is how `next_cell_input` is calculated. In this
decoder function we calculate the next input by applying an argmax across
the feature dimension of the output from the decoder. This is a
greedy-search approach. (Bahdanau et al., 2014) & (Sutskever et al., 2014)
use beam-search instead.
Args:
time: positive integer constant reflecting the current timestep.
cell_state: state of RNNCell.
cell_input: input provided by `dynamic_rnn_decoder`.
cell_output: output of RNNCell.
context_state: context state provided by `dynamic_rnn_decoder`.
Returns:
A tuple (done, next state, next input, emit output, next context state)
where:
done: A boolean vector to indicate which sentences has reached a
`end_of_sequence_id`. This is used for early stopping by the
`dynamic_rnn_decoder`. When `time>=maximum_length` a boolean vector with
all elements as `true` is returned.
next state: `cell_state`, this decoder function does not modify the
given state.
next input: The embedding from argmax of the `cell_output` is used as
`next_input`.
emit output: If `output_fn is None` the supplied `cell_output` is
returned, else the `output_fn` is used to update the `cell_output`
before calculating `next_input` and returning `cell_output`.
next context state: `context_state`, this decoder function does not
modify the given context state. The context state could be modified when
applying e.g. beam search.
"""
with ops.name_scope(name, "simple_decoder_fn_inference",
[time, cell_state, cell_input, cell_output,
context_state]):
if cell_input is not None:
raise ValueError("Expected cell_input to be None, but saw: %s" %
cell_input)
if cell_output is None:
# invariant that this is time == 0
next_input_id = array_ops.ones([batch_size,], dtype=dtype) * (
start_of_sequence_id)
done = array_ops.zeros([batch_size,], dtype=dtypes.bool)
cell_state = encoder_state
cell_output = array_ops.zeros([num_decoder_symbols],
dtype=dtypes.float32)
else:
cell_output = output_fn(cell_output)
next_input_id = math_ops.cast(
math_ops.argmax(cell_output, 1), dtype=dtype)
done = math_ops.equal(next_input_id, end_of_sequence_id)
next_input = array_ops.gather(embeddings, next_input_id)
# if time > maxlen, return all true vector
done = control_flow_ops.cond(math_ops.greater(time, maximum_length),
lambda: array_ops.ones([batch_size,], dtype=dtypes.bool),
lambda: done)
return (done, cell_state, next_input, cell_output, context_state)
return decoder_fn
| 5,342,261
|
def listdir(path):
"""listdir(path) -> list_of_strings
Return a list containing the names of the entries in the directory.
path: path of directory to list
The list is in arbitrary order. It does not include the special
entries '.' and '..' even if they are present in the directory.
"""
l = File(path).list()
if l is None:
raise OSError(0, 'No such directory', path)
return list(l)
| 5,342,262
|
def get_scripts_folder():
"""
return data folder to use for future processing
"""
return (pathlib.Path(__file__).parent.parent)
| 5,342,263
|
def main(
workdir: Path = typer.Argument(".", help="a directory path for working directory"),
url: Optional[str] = typer.Option(None, help="a download URL"),
directory: Path = typer.Option(None, help="a directory path for test cases"),
no_store: bool = typer.Option(False, help="testcases is shown but not saved"),
format: str = typer.Option("sample-%i.%e", help="custom filename format"),
login: bool = typer.Option(False, help="login into target service"),
cookie: Path = typer.Option(utils.default_cookie_path, help="directory for cookie"),
) -> None:
"""
Here is shortcut for download with `online-judge-tools`.
At first, call `judge conf` for configuration.
Pass `problem` at `contest` you want to test.
Ex) the following leads to download test cases for Problem `C` at `ABC 051`:
```download```
"""
typer.echo("Load configuration...")
if not workdir.exists():
typer.secho(f"Not exists: {str(workdir.resolve())}", fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
try:
_config = JudgeConfig.from_toml(workdir)
except KeyError as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
__config = _config.dict()
if url or directory:
# check arguments
if url:
__config["URL"] = url
if directory:
__config["testdir"] = directory.resolve()
try:
config = DownloadJudgeConfig(**__config)
except KeyError as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
typer.echo(f"Download {config.URL}")
try:
login_form: Optional[LoginForm] = None
if login:
login_form = CLILoginForm()
testcases = download_tool(
DownloadArgs(
url=config.URL,
login_form=login_form,
cookie=cookie,
)
)
except Exception as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
if not no_store:
try:
save_tool(
testcases,
SaveArgs(
format=format,
directory=Path(config.testdir),
),
)
except Exception as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
| 5,342,264
|
def preresnet110(**kwargs):
"""Constructs a PreResNet-110 model.
"""
model = PreResNet(Bottleneck, [18, 18, 18], **kwargs)
return model
| 5,342,265
|
def if_action(hass, config):
""" Wraps action method with state based condition. """
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is None:
_LOGGER.error("Missing configuration key %s", CONF_VALUE_TEMPLATE)
return False
return lambda: _check_template(hass, value_template)
| 5,342,266
|
def test():
"""Module (003)"""
config = terrascript.Terrascript()
config += terrascript.aws.aws(access_key='ACCESS_KEY_HERE',
secret_key='SECRET_KEY_HERE',
region='us-east-1')
config += terrascript.Module('vpc',
source="terraform-aws-modules/vpc/aws",
version="2.9.0")
assert_equals_json(config, 'test_003.tf.json')
| 5,342,267
|
def get_indy_cli_command_output(output: str, match: str,
return_line_offset: int = 1, remove_ansi_escape_sequences: bool = True,
multi: bool = False) -> Union[List[str],str]:
"""
Get the output for a specific indy cli command from STDOUT captured calling
indy-cli from python.
:param output: STDOUT from a batch call to indy-cli from python.
Required.
:type output: str
:param match: Find the first line in output that contains this string and
return the next line from the output.
Required.
:type match: str
:param return_line_offset: Find the first line in output that contains this
string and return the next line from the output.
Required.
:type return_line_offset: int
"""
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
matches = []
lines = iter(output.decode().split("\n"))
for line in lines:
if match in line:
count = return_line_offset
# Skip return_line_offset lines
while(count > 0):
line = lines.__next__()
count -= 1
# Return a single line just after return_line_offset lines have been
# skipped
if remove_ansi_escape_sequences:
line = ansi_escape.sub('', line)
matches.append(line)
# Search for multiple matches?
if multi:
# Continue finding matches
continue
break
if multi:
return matches
else:
try:
return matches[0]
except IndexError:
return None
| 5,342,268
|
def reports():
"""Returns all reports in the system"""
reports = crud.report.get_reports()
return reports
| 5,342,269
|
def test_basic():
"""
Ensure basic decoding works
"""
assert_equal("test", convert_base64(base64.b64encode(b"test")))
| 5,342,270
|
async def test_adding_and_removing_unsupported_reason(coresys: CoreSys):
"""Test adding and removing unsupported reason."""
coresys.core.state = CoreState.RUNNING
assert UnsupportedReason.NETWORK_MANAGER not in coresys.resolution.unsupported
with patch(
"supervisor.resolution.evaluations.network_manager.EvaluateNetworkManager.evaluate",
return_value=True,
):
await coresys.resolution.evaluate.evaluate_system()
assert UnsupportedReason.NETWORK_MANAGER in coresys.resolution.unsupported
assert not coresys.core.supported
with patch(
"supervisor.resolution.evaluations.network_manager.EvaluateNetworkManager.evaluate",
return_value=False,
):
await coresys.resolution.evaluate.evaluate_system()
assert UnsupportedReason.NETWORK_MANAGER not in coresys.resolution.unsupported
assert coresys.core.supported
| 5,342,271
|
def md_to_rst(s, fname='?'):
"""
Return reStructuredText equiv string contents of Markdown string.
If conversion (via 'pandoc' cmdline) fails, returns raw Markdown.
Requires pandoc system utility: http://johnmacfarlane.net/pandoc/
Optional fname arg used only for logging/error message.
"""
try:
args = ['pandoc', '-r', 'markdown', '-w', 'rst']
p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
(pout, perr) = p.communicate(s)
if p.returncode == 0:
return pout
raise ValueError("pandoc exit %d, stderr: %s" % (p.returncode, perr))
except Exception as e:
print("notice: error converting '%s' MD to RST "
"(probably harmless, likely missing 'pandoc' utility)"
": " % fname, e, file=stderr)
return s
| 5,342,272
|
def parse_signature(signature):
"""
Parses one signature
:param signature: stanc3 function signature
:return: return type, fucntion name and list of function argument types
"""
return_type, rest = signature.split(" ", 1)
function_name, rest = rest.split("(", 1)
args = re.findall(r"(?:[(][^()]+[)][^,()]+)|(?:[^,()]+(?:,*[]])?)", rest)
args = [i.strip() for i in args if i.strip()]
return return_type, function_name, args
| 5,342,273
|
def logger(context, name):
"""Get PySpark configured logger
Args:
context: SparkContext
name (str): Name of the logger (category)
Returns:
Logger instance
"""
# TODO: add ccdc version to name
return context._jvm.org.apache.log4j.LogManager.getLogger(name)
| 5,342,274
|
def gspan_to_eden(input, options=dict()):
"""Take a string list in the extended gSpan format and yields NetworkX graphs.
Args:
input: data source, can be a list of strings, a file name or a url
Returns:
NetworkX graph generator
Raises:
Exception: if a graph is empty
"""
header = ''
string_list = []
for line in util.read(input):
if line.strip():
if line[0] in ['g', 't']:
if string_list:
yield gspan_to_networkx(header, string_list)
string_list = []
header = line
else:
string_list += [line]
if string_list:
yield gspan_to_networkx(header, string_list)
| 5,342,275
|
def compute_fps(model, shape, epoch=100, device=None):
"""
frames per second
:param shape: 输入数据大小
"""
total_time = 0.0
if device:
model = model.to(device)
for i in range(epoch):
data = torch.randn(shape)
if device:
data = data.to(device)
start = time.time()
outputs = model(data)
end = time.time()
total_time += (end - start)
return total_time / epoch
| 5,342,276
|
def onboarding_ml_app_patterns_post(ml_app_pattern): # noqa: E501
"""Create a new MLApp pattern
# noqa: E501
:param ml_app_pattern: MLApp pattern detail description
:type ml_app_pattern: dict | bytes
:rtype: MLAppPattern
"""
if connexion.request.is_json:
ml_app_pattern = MLAppPattern.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
| 5,342,277
|
def get_stats(ns_profnum, clear=False, **kwargs):
""""
Returns and optionally clears the Polyglot-to-ISY stats
:param ns_profnum: Node Server ID (for future use)
:param clear: optional, zero out stats if True
"""
global SLOCK, STATS
SLOCK.acquire()
st = STATS
if clear:
STATS['ntotal'] = 0
STATS['rtotal'] = 0
STATS['oktotal'] = 0
STATS['ertotal'] = 0
STATS['ettotal'] = 0.0
STATS['ethigh'] = 0.0
STATS['etlow'] = 0.0
SLOCK.release()
#_LOGGER.info('get_stats(): %d %f %d', st['ntotal'], st['ettotal'], st['rtotal'])
return st
| 5,342,278
|
def test_ffmpeg():
""" stream to icecast """
input_url = TestUrl.ro1
password = input("Icecast password: ")
icecast_url = f"icecast://source:{password}@173.249.6.236:8000/babyfoon"
content_type = "-content_type audio/mpeg -f mp3"
bitrate = "-b:a 64K -minrate 64K -maxrate 64K -bufsize 64K"
# play on standard out:
cmd = f'ffmpeg -i {input_url} -f alsa default'
# send input url to icecast:
cmd = f'ffmpeg -i {input_url} {content_type} {bitrate} "{icecast_url}"'
# send recording to icecast:
cmd = f'ffmpeg -f alsa -i hw:0 {content_type} {bitrate} "{icecast_url}"'
with FfmpegProcess(cmd):
while True:
time.sleep(30)
| 5,342,279
|
def read_log_json():
""" Get all log documents/records from MondoDB """
limit = int(demisto.args().get('limit'))
# Point to all the documents
cursor = COLLECTION.find({}, {'_id': False}).limit(limit)
# Create an empty log list
entries = []
# Iterate through those documents
if cursor is not None:
for i in cursor:
# Append log entry to list
entries.append(i)
return_json = {COLLECTION_NAME: entries}
human_readable = tableToMarkdown(f'The log documents/records for collection "{COLLECTION_NAME}"', return_json)
return human_readable, {}, {}
return 'MongoDB - no documents/records - Log collection is empty', {}, {}
| 5,342,280
|
def add_parser(subp, raw):
"""Add a parser to the main subparser. """
tmpp = subp.add_parser('add', help='add an excentury project',
formatter_class=raw,
description=textwrap.dedent(DESC))
tmpp.add_argument('path', type=str,
help='project path')
| 5,342,281
|
def gen_time_plot_w_tracking(trialdata, no_titles=False, plot_font=PLOT_FONT):
"""Generate time series plot of ground truth and tracked force, sEMG, and
ultrasound data.
Args:
trialdata (dataobj.TrialData): object containing data to be plotted
no_titles (bool): whether to omit axis/title labels that are redundant
with eventual use case (e.g., copying to table for publication)
plot_font (str): desired matplotlib font family
"""
register_matplotlib_converters()
sns.set()
num_subplots = 4
fig, axs = plt.subplots(num_subplots)
plot_ind = trialdata.df.index.to_julian_date().to_numpy() - 2457780.5
plot_ind = plot_ind * 24 * 60 * 60
axs[0].plot(trialdata.df['us-csa'], color='#41b6c4')
axs[0].plot(trialdata.df['us-csa-t'], color='#41b6c4', linestyle='dotted')
axs[1].plot(trialdata.df['us-t'], color='#225ea8')
axs[1].plot(trialdata.df['us-t-t'], color='#225ea8', linestyle='dotted')
axs[2].plot(plot_ind, trialdata.df['us-tr'], color='#081d58')
axs[2].plot(plot_ind,
trialdata.df['us-tr-t'],
color='#081d58',
linestyle='dotted')
axs[3].plot(plot_ind, trialdata.df['us-jd-e'], 'r')
axs[3].set_xlabel('time (s)', fontname=plot_font)
axs[3].xaxis.set_label_coords(1.0, -0.15)
if not no_titles:
tstring = trialdata.subj + ', ' + str(180 -
int(trialdata.ang)) + '$\degree$'
fig.suptitle(tstring, fontname=plot_font)
axs[0].set_ylabel('CSA', fontname=plot_font)
axs[1].set_ylabel('T', fontname=plot_font)
axs[2].set_ylabel('AR', fontname=plot_font)
axs[3].set_ylabel('JD', fontname=plot_font)
axs[0].xaxis.set_visible(False)
axs[1].xaxis.set_visible(False)
axs[2].xaxis.set_visible(False)
for i in range(num_subplots):
for tick in axs[i].get_xticklabels():
tick.set_fontname(plot_font)
for tick in axs[i].get_yticklabels():
tick.set_fontname(plot_font)
plt.show()
| 5,342,282
|
def RecogniseForm(access_token, image, templateSign=None, classifierId=None):
"""
自定义模板文字识别
:param access_token:
:param image:图像数据(string),base64编码,注意大小不超过4M,最短边至少15px,最长边最大4096px,支持jpg/png/bmp格式
:param templateSign:模板ID(string)
:param classifierId:分类器ID(int),这个参数与templateSign至少存在一个,优先使用templateSign,存在templateSign时,使用指定模板;如果没有templateSign而有classifierld,表示使用分类器去判断使用模板
:return:返回识别结果
"""
host = 'https://aip.baidubce.com/rest/2.0/solution/v1/iocr/recognise?access_token=%s' % access_token
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
formdata = {'image': image}
if templateSign is not None:
formdata['templateSign'] = templateSign
if classifierId is not None:
formdata['classifierId'] = classifierId
data = parse.urlencode(formdata).encode('utf8')
req = request.Request(method='POST', url=host, headers=headers, data=data)
response = request.urlopen(req)
if (response.status == 200):
jobj = json.loads(response.read().decode())
datas = jobj['data']
recognise = {}
for obj in datas['ret']:
recognise[obj['word_name']] = obj['word']
return recognise
| 5,342,283
|
def rectangle(
func: Callable[..., float], a: float, b: float, eps: float = 0.0001, *args, **kwargs
) -> float:
"""
Metode segi empat adalah metode integrasi paling sederhana.
Metode ini membagi domain integrasi sebanyak n buah
dan menjumlahkan seluruh luas segi empat dengan dimensi
(a + b)/n * f(x), dimana x adalah sudut kiri segiempat
Parameter:
func = fungsi input
a = batas bawah integrasi
b = batas atas integrasi
eps = error relatif maksimal
>>> rectangle(lambda x: x**2, 0, 2)
2.6992751228325296
>>> rectangle(lambda x: x**1, 0, 2)
2.0198019801980194
"""
try:
n = 100
x = np.linspace(a, b, n)
dx = x[1] - x[0]
L0 = sum(dx * func(i, *args, **kwargs) for i in x)
err = 1
while err > eps:
n += 1
x = np.linspace(a, b, n)
dx = x[1] - x[0]
L1 = sum(dx * func(i, *args, **kwargs) for i in x)
err = np.abs(L1 - L0) / np.abs(L1)
L0 = L1
except Exception:
raise RuntimeError("Integrasi gagal, pastikan fungsi anda benar!")
return L1
| 5,342,284
|
def _rearrange_axis(data: np.ndarray,
axis: int = 0) -> tuple([np.ndarray, tuple]):
"""rearranges the `numpy.ndarray` as a two-dimensional array of size (n,
-1), where n is the number of elements of the dimension defined by `axis`.
Parameters
----------
data : :class:`numpy.ndarray`
An array to be rearranged
axis : :class:`int`, Optional
The axis that all other dimensions are rearranged around it. Defaults to 0.
Returns
-------
tuple (data :class:`numpy.ndarray`, shape :class:`tuple`
A tuple, where the first element contains the reshaped data, and the second is a tuple with all dimensions except the one specified by the axis.
"""
if not isinstance(data, np.ndarray):
raise TypeError("data must be a numpy.ndarray.")
axis = _check_axis(axis, data.ndim)
if axis != 0:
data = np.moveaxis(data, axis, 0)
trailing_shape = data.shape[1:]
data = data.reshape((data.shape[0], -1))
return data, trailing_shape
| 5,342,285
|
def landmarks_json():
"""Send landmark data for map layer as Geojson from database."""
features = []
for landmark in Landmark.query.all():
# get the first image of a landmark, if any
image = ""
if len(landmark.images) > 0:
image = landmark.images[0].imageurl
# get the average rating of a landmark
avg_rating = ""
rating_scores = [r.user_score for r in landmark.ratings]
if len(rating_scores) > 0:
avg_rating = float(sum(rating_scores))/len(rating_scores)
features.append({
"type": "Feature",
"properties": {
"name": landmark.landmark_name,
"description": landmark.landmark_description,
"artist": landmark.landmark_artist,
"display-dimensions": landmark.landmark_display_dimensions,
"location-description": landmark.landmark_location_description,
"medium": landmark.landmark_medium
},
"geometry": {
"coordinates": [
landmark.landmark_lng,
landmark.landmark_lat],
"type": "Point"
},
"id": landmark.landmark_id,
'image': image,
'avg_rating': avg_rating,
})
landmarks_geojson = {
"type": "FeatureCollection",
"features": features,
}
return jsonify(landmarks_geojson)
| 5,342,286
|
def _verify_kronecker_factored_config(model_config):
"""Verifies that a kronecker_factored model_config is properly specified.
Args:
model_config: Model configuration object describing model architecture.
Should be one of the model configs in `tfl.configs`.
Raises:
ValueError: If there are lattice regularizers.
ValueError: If there are per-feature lattice regularizers.
ValueError: If there are unimodality constraints.
ValueError: If there are trust constraints.
ValueError: If there are dominance constraints.
"""
for regularizer_config in model_config.regularizer_configs or []:
if not regularizer_config.name.startswith(_INPUT_CALIB_REGULARIZER_PREFIX):
raise ValueError(
'KroneckerFactoredLattice layer does not currently support '
'lattice regularizers.')
for feature_config in model_config.feature_configs:
for regularizer_config in feature_config.regularizer_configs or []:
if not regularizer_config.name.startswith(
_INPUT_CALIB_REGULARIZER_PREFIX):
raise ValueError(
'KroneckerFactoredLattice layer does not currently support '
'per-feature lattice regularizers.')
# Check that all lattices sizes for all features are the same.
if any(feature_config.lattice_size !=
model_config.feature_configs[0].lattice_size
for feature_config in model_config.feature_configs):
raise ValueError('KroneckerFactoredLattice layer must have the same '
'lattice size for all features.')
# Check that there are only monotonicity and bound constraints.
if any(
feature_config.unimodality != 'none' and feature_config.unimodality != 0
for feature_config in model_config.feature_configs):
raise ValueError(
'KroneckerFactoredLattice layer does not currently support unimodality '
'constraints.')
if any(feature_config.reflects_trust_in is not None
for feature_config in model_config.feature_configs):
raise ValueError(
'KroneckerFactoredLattice layer does not currently support trust '
'constraints.')
if any(feature_config.dominates is not None
for feature_config in model_config.feature_configs):
raise ValueError(
'KroneckerFactoredLattice layer does not currently support dominance '
'constraints.')
| 5,342,287
|
def _tree_cmd(options, user_args):
"""
Return the post_setup hook function for 'openmdao tree'.
Parameters
----------
options : argparse Namespace
Command line options.
user_args : list of str
Args to be passed to the user script.
"""
if options.outfile is None:
out = sys.stdout
else:
out = open(options.outfile, 'w')
if options.attrs or options.vecvars:
filt = _get_tree_filter(options.attrs, options.vecvars)
else:
filt = None
def _tree(prob):
tree(prob, show_colors=options.show_colors, show_sizes=options.show_sizes,
show_approx=options.show_approx, filter=filt, max_depth=options.depth,
rank=options.rank, stream=out)
exit()
# register the hook
if options.vecvars or options.show_sizes or options.show_approx:
funcname = 'final_setup'
else:
funcname = 'setup'
hooks._register_hook(funcname, class_name='Problem', inst_id=options.problem, post=_tree)
_load_and_exec(options.file[0], user_args)
| 5,342,288
|
def new_get_image_collection_gif(
ee_ic,
out_dir,
out_gif,
vis_params,
region,
cmap=None,
proj=None,
fps=10,
mp4=False,
grid_interval=None,
plot_title="",
date_format="YYYY-MM-dd",
fig_size=(10, 10),
dpi_plot=100,
file_format="png",
north_arrow_dict={},
scale_bar_dict={},
verbose=True,
):
"""Download all the images in an image collection and use them to generate a gif/video.
Args:
ee_ic (object): ee.ImageCollection
out_dir (str): The output directory of images and video.
out_gif (str): The name of the gif file.
vis_params (dict): Visualization parameters as a dictionary.
region (list | tuple): Geospatial region of the image to render in format [E,S,W,N].
fps (int, optional): Video frames per second. Defaults to 10.
mp4 (bool, optional): Whether to create mp4 video.
grid_interval (float | tuple[float]): Float specifying an interval at which to create gridlines, units are decimal degrees. lists will be interpreted a (x_interval, y_interval), such as (0.1, 0.1). Defaults to None.
plot_title (str): Plot title. Defaults to "".
date_format (str, optional): A pattern, as described at http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html. Defaults to "YYYY-MM-dd".
fig_size (tuple, optional): Size of the figure.
dpi_plot (int, optional): The resolution in dots per inch of the plot.
file_format (str, optional): Either 'png' or 'jpg'.
north_arrow_dict (dict, optional): Parameters for the north arrow. See https://geemap.org/cartoee/#geemap.cartoee.add_north_arrow. Defaults to {}.
scale_bar_dict (dict, optional): Parameters for the scale bar. See https://geemap.org/cartoee/#geemap.cartoee.add_scale_bar. Defaults. to {}.
verbose (bool, optional): Whether or not to print text when the program is running. Defaults to True.
"""
# from .geemap import png_to_gif
out_dir = os.path.abspath(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_gif = os.path.join(out_dir, out_gif)
count = int(ee_ic.size().getInfo())
names = ee_ic.aggregate_array("system:index").getInfo()
images = ee_ic.toList(count)
dates = ee_ic.aggregate_array("system:time_start")
dates = dates.map(lambda d: ee.Date(d).format(date_format)).getInfo()
# list of file name
img_list = []
for i, date in enumerate(dates):
image = ee.Image(images.get(i))
name = str(names[i])
# name = name + "." + file_format
name = str(i).zfill(3) + "_" + name + "." + file_format
out_img = os.path.join(out_dir, name)
img_list.append(out_img)
if verbose:
print(f"Downloading {i+1}/{count}: {name} ...")
# Size plot
plt.figure(figsize=fig_size)
# Plot image
ax = get_map(image, region=region, vis_params=vis_params, cmap=cmap, proj=proj)
# Add grid
if grid_interval is not None:
add_gridlines(ax, interval=grid_interval, linestyle=":")
# Add title
if len(plot_title) > 0:
ax.set_title(label=plot_title + " " + date + "\n", fontsize=15)
# Add scale bar
if len(scale_bar_dict) > 0:
add_scale_bar_lite(ax, **scale_bar_dict)
# Add north arrow
if len(north_arrow_dict) > 0:
add_north_arrow(ax, **north_arrow_dict)
# Save plot
plt.savefig(fname=out_img, dpi=dpi_plot)
plt.clf()
plt.close()
out_gif = os.path.abspath(out_gif)
png_to_gif(out_dir, out_gif, fps)
if verbose:
print(f"GIF saved to {out_gif}")
if mp4:
video_filename = out_gif.replace(".gif", ".mp4")
try:
import cv2
except ImportError:
print("Installing opencv-python ...")
subprocess.check_call(["python", "-m", "pip", "install", "opencv-python"])
import cv2
# Video file name
output_video_file_name = os.path.join(out_dir, video_filename)
frame = cv2.imread(img_list[0])
height, width, _ = frame.shape
frame_size = (width, height)
fps_video = fps
# Make mp4
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
# Function
def convert_frames_to_video(
input_list, output_video_file_name, fps_video, frame_size
):
"""Convert frames to video
Args:
input_list (list): Downloaded Image Name List.
output_video_file_name (str): The name of the video file in the image directory.
fps_video (int): Video frames per second.
frame_size (tuple): Frame size.
"""
out = cv2.VideoWriter(output_video_file_name, fourcc, fps_video, frame_size)
num_frames = len(input_list)
for i in range(num_frames):
img_path = input_list[i]
img = cv2.imread(img_path)
out.write(img)
out.release()
cv2.destroyAllWindows()
# Use function
convert_frames_to_video(
input_list=img_list,
output_video_file_name=output_video_file_name,
fps_video=fps_video,
frame_size=frame_size,
)
if verbose:
print(f"MP4 saved to {output_video_file_name}")
| 5,342,289
|
def extractTuples(data):
""" Saca las tuplas (palabra,prediccion),
y las devuelve como dos arrays entradas y
salidas """
inp = []
out = []
for r in data:
for i in range(len(r)):
for j in range(-CONTEXT_WINDOW,CONTEXT_WINDOW+1):
if j == CONTEXT_WINDOW or i+j <0 or i+j >= len(r):
continue
inp.append(r[i])
out.append(r[i+j])
return inp,out
| 5,342,290
|
def read_iter(fp, contig_q):
"""Returns read objects from contigs until someone passes None as a contig
:param fp: BAM file pointer (pysam.AlignmentFile)
:param contig_q: a queue into which we put contig information
(contig, eof_true) - eof_true is set if this is
the last non-empty contig and we want to pull out
all the trailing unmapped reads right after the contig
:return: a generator
"""
for contig in iter(contig_q.get, None):
logger.debug(contig[0])
for read in fp.fetch(contig[0]):
if read.flag & 0b100100000000: continue # Skip supplementary or secondary alignments
yield read
if contig[1]: # Now want the trailing reads - fp is positioned just before them
for read in fp.fetch(until_eof=contig[1]):
if read.flag & 0b100100000000: continue # Skip supplementary or secondary alignments
yield read
| 5,342,291
|
def _GetKeyKind(key):
"""Return the kind of the given key."""
return key.path().element_list()[-1].type()
| 5,342,292
|
def main(life, pixel_size=3, wait = 50, gen = -1):
"""Run life simulation."""
pygame.init()
pygame.display.set_caption("PyLife")
mat = mirror(scale(life.view_matrix(), pixel_size))# to find dimensions
screen = pygame.display.set_mode(mat.shape)
while 1: # mainloop
for e in pygame.event.get(): # quit program if window closed
if e.type == pygame.QUIT: sys.exit()
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
sys.exit()
if gen == 0: # end mainloop if there are no generations left
break
else:
gen -= 1
# preps array, draws to surface, updates life object/scren
mat = transform(life.view_matrix(), pixel_size)
pygame.surfarray.blit_array(screen, mat)
life.update_matrix()
pygame.display.flip()
pygame.time.wait(wait)
| 5,342,293
|
def load_config() -> dict:
"""
Loads the config.yml file to memory and returns it as dictionary.
:return: Dictionary containing the config.
"""
with open('config.yml', 'r') as ymlfile:
return yaml.load(ymlfile, Loader=yaml.FullLoader)
| 5,342,294
|
def find_column_equivalence(matrix, do_not_care) -> Tuple[List[int], List[Sequence]]:
""" Adapt find_row_equivalence (above) to work on columns instead of rows. """
index, classes = find_row_equivalence(zip(*matrix), do_not_care)
return index, list(zip(*classes))
| 5,342,295
|
def getBaseCount(reads, varPos):
"""
:param reads:
:param varPos:
"""
'''
returns the baseCount for the
'''
baseCount = {'A': 0, 'C': 0, 'G': 0, 'T': 0}
for read in reads:
readPos = 0
mmReadPos = 0
startPos = read.pos
try:
cigarNums = re.split("[MIDNSHP]", read.cigarstring)[:-1]
cigarLetters = re.split("[0-9]+", read.cigarstring)[1:]
except TypeError:
continue # for unmapped reads the cigarstring is empty
# to avoid a query for unmapped reads all the
# time the error is catched and the read will be skipped
# raise TypeError("Invalid Cigar String %s" % read.cigarstring)
for i in range(len(cigarLetters)): # parse over single read
if cigarLetters[i] in {"I", "S", "H"}: # Insertion, Soft Clipping and Hard Clipping
readPos = readPos + int(cigarNums[i])
elif cigarLetters[i] in {"D", "N"}: # Deletions and skipped Regions
startPos = startPos + int(cigarNums[i])
elif cigarLetters[i] in {"M"}: # Matches
for j in range(int(cigarNums[i])):
if startPos == varPos:
mmReadPos = readPos
mmReadBase = read.seq[mmReadPos]
try:
baseCount[mmReadBase] += 1 # increase number for the base at the mm pos
except KeyError:
sys.stderr.write("unknown Base %s \n" % mmReadBase)
readPos += 1
startPos += 1
return map(str, [baseCount['A'], baseCount['C'], baseCount['G'], baseCount['T']])
| 5,342,296
|
def RegisterSpecs(func):
"""The decorator to register the specification for each check item object.
The decorator first tests whether it is involved in the outmost call of the
check item object. If so, it then goes through the args, kwargs, and defaults
to populate the specification.
Args:
func: The __init__ function of a check item class.
Returns:
The wrapper function.
"""
def Wrapper(self, *args, **kwargs):
"""The function wrapper to extract argument for CheckItems."""
frame_stack = inspect.stack()
if len(frame_stack) > 1:
# Check if the caller is a method of BaseCheckItem. If so,
# `func` itself is not the outmost call to extract parameters.
frame = frame_stack[1][0]
frame_locals = frame.f_locals
if ('self' in frame_locals and
isinstance(frame_locals['self'], BaseCheckItem)):
return func(self, *args, **kwargs)
# Record the args and kwargs into a dict.
params = {}
# Get the arguments for the function.
# Example:
# def f(a, b=1, c=2, *pos, **named):
# pass
# This returns:
# ArgSpec(args=['a', 'b', 'c'], varargs='pos', keywords='named',
# defaults=(1, 2))
argspec = inspect.getargspec(func)
# If an arg has default, the corresponding index in the `defaults` array is
# N - (num_args_without_default), where N is the index of the arg in
# the `args` array.
# We started N as 1 to count for the `self` argument.
default_idx = (1 - (len(argspec.args) - len(argspec.defaults))
if argspec.defaults else None)
# For class member functions, the first item in args is `self`. Skip it.
for idx, arg_name in enumerate(argspec.args[1:]):
if idx < len(args):
arg_value = args[idx]
elif arg_name in kwargs:
arg_value = kwargs[arg_name]
elif (argspec.defaults and default_idx >= 0
and default_idx < len(argspec.defaults)):
arg_value = argspec.defaults[default_idx]
else:
raise ValueError('Missing argument "%s" for "%s".'
% (arg_name, self.__class__.__name__))
if argspec.defaults:
default_idx += 1
if isinstance(arg_value, check_range.BaseRange):
arg_value = arg_value.RawArgs()
params[arg_name] = arg_value
# Assign the parameters.
self.SetSpecs(params)
# Call the original function.
obj = func(self, *args, **kwargs)
return obj
return Wrapper
| 5,342,297
|
def get_changes_to_be_committed() -> Set[Path]:
"""After every time `add` is performed, the filepath is added to this text file."""
return {Path(path) for path in path_to.changes_to_be_committed.read_text().split("\n") if path}
| 5,342,298
|
def write_channels_metadata(meta_data_dict, file_name, access_mode="a"):
"""
Write the channel metadata into the given file. If file doesn't exist create it.
If the file exists, the channel indexes given in the meta_data_dict must be in
the existing range.
Parameters
----------
meta_data_dict(dictionary): see dictionary description in the read_metadata function.
file_name: write to this file.
access_mode: file access mode, default is append.
"""
# Open the file for reading and writing. If it doesn't exist, create.
with h5py.File(file_name, access_mode) as f:
try: # If file already exists check the imaris file format version and get number of channels.
imaris_format_version = f.attrs["ImarisVersion"].tobytes().decode("UTF-8")
if imaris_format_version not in file_format_versions:
raise ValueError(
f"Unsupported imaris file format version {imaris_format_version}."
)
dataset_dirname = f.attrs["DataSetDirectoryName"].tobytes().decode("UTF-8")
dataset_info_dirname = (
f.attrs["DataSetInfoDirectoryName"].tobytes().decode("UTF-8")
)
num_channels = len(f[dataset_dirname]["ResolutionLevel 0"]["TimePoint 0"])
except KeyError: # We are dealing with a new file.
num_channels = len(meta_data_dict["channels_information"])
dataset_info_dirname = default_dataset_info_dirname
dataset_dirname = default_dataset_dirname
_ims_set_nullterm_str_attribute(f, "ImarisDataSet", b"ImarisDataSet")
_ims_set_nullterm_str_attribute(f, "ImarisVersion", b"5.5.0")
_ims_set_nullterm_str_attribute(
f, "DataSetInfoDirectoryName", dataset_info_dirname.encode("UTF-8")
)
_ims_set_nullterm_str_attribute(
f, "DataSetDirectoryName", dataset_dirname.encode("UTF-8")
)
f.attrs["NumberOfDataSets"] = np.array([1], dtype=np.uint32)
f.create_group(dataset_info_dirname + "/ImarisDataSet")
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname]["ImarisDataSet"], "Creator", b"SimpleITK"
)
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname]["ImarisDataSet"], "NumberOfImages", b"1"
)
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname]["ImarisDataSet"],
"Version",
str(sitk.Version()).encode("UTF-8"),
)
f.create_group(dataset_info_dirname + "/Imaris")
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname]["Imaris"], "ThumbnailMode", b"thumbnailNone"
)
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname]["Imaris"],
"Version",
str(sitk.Version()).encode("UTF-8"),
)
for i in range(num_channels):
f.create_group(dataset_info_dirname + f"/Channel {i}")
indexes, _ = zip(*meta_data_dict["channels_information"])
if not all([i in range(num_channels) for i in indexes]):
raise ValueError(
f"The index of one or more channels in meta data dictionary is outside the expected range [0, {num_channels-1}]." # noqa: E501
)
# Write the channel information, if it exists in the dictionary.
# When modifying an existing file some of the information
# may not exist, i.e. we are only changing the channel colors.
# Imaris supports two color modes ['BaseColor', 'TableColor'].
for i, channel_information in meta_data_dict["channels_information"]:
channel_str = f"Channel {i}"
if "name" in channel_information:
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname][channel_str],
"Name",
channel_information["name"].encode("UTF-8"),
)
if "description" in channel_information:
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname][channel_str],
"Description",
channel_information["description"].encode("UTF-8"),
)
prev_color_mode = (
f[dataset_info_dirname][channel_str]
.attrs["ColorMode"]
.tobytes()
.decode("UTF-8")
if "ColorMode" in f[dataset_info_dirname][channel_str].attrs
else ""
)
if (
"color" in channel_information or "color_table" in channel_information
) and prev_color_mode == "TableColor":
del f[dataset_info_dirname][channel_str].attrs["ColorTableLength"]
if "ColorTable" not in f[dataset_info_dirname][channel_str].attrs:
del f[dataset_info_dirname][channel_str]["ColorTable"]
if "color" in channel_information:
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname][channel_str], "ColorMode", b"BaseColor"
)
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname][channel_str],
"Color",
" ".join([f"{v:.3f}" for v in channel_information["color"]]).encode(
"UTF-8"
),
)
elif "color_table" in channel_information:
if prev_color_mode == "BaseColor":
del f[dataset_info_dirname][channel_str].attrs["Color"]
# Imaris expects the color table infromation to be either in an attribute
# or in a dataset.
# For some reason, I can't get h5py to write the dataset in the format expected by Imaris.
# String, Fixed length=1, padding=H5T_STR_NULLTERM, cset = H5T_CSET_ASCII
# The padding is always H5T_STR_NULLPAD.
# Tried a workaround similar to that described on SO, creating a custom type but that didn't work:
# https://stackoverflow.com/questions/38267076/how-to-write-a-dataset-of-null-terminated-fixed-length-strings-with-h5py
# tid = h5py.h5t.C_S1.copy()
# tid.set_strpad(h5py.h5t.STR_NULLTERM)
# H5T_C_S1_1 = h5py.Datatype(tid)
#
# The current "solution" is to write the color table information as an
# attribute and if that fails write as dataset so the information isn't lost.
# If the color table is large (>64K bytes) then writting
# to attribute will fail as it is larger than the HDF5 limit. We then save it as
# dataset even if imaris will not read it. We can export the file settings which will
# export the color table as a text file. We can then import the color table back directly
# from imaris and save the file.
# Possibly revisit, using low level h5py API as done for the
# attribute writing.
try:
f[dataset_info_dirname][channel_str].attrs[
"ColorTable"
] = np.frombuffer(
(
" ".join(
[f"{v:.3f}" for v in channel_information["color_table"]]
)
+ " "
).encode("UTF-8"),
dtype="S1",
)
except RuntimeError:
f[dataset_info_dirname][channel_str].create_dataset(
"ColorTable",
data=np.frombuffer(
(
" ".join(
[
f"{v:.3f}"
for v in channel_information["color_table"]
]
)
+ " "
).encode("UTF-8"),
dtype="S1",
),
)
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname][channel_str],
"ColorTableLength",
str(int(len(channel_information["color_table"]) / 3)).encode(
"UTF-8"
),
)
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname][channel_str], "ColorMode", b"TableColor"
)
if "range" in channel_information:
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname][channel_str],
"ColorRange",
" ".join([f"{v:.3f}" for v in channel_information["range"]]).encode(
"UTF-8"
),
)
if "gamma" in channel_information:
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname][channel_str],
"GammaCorrection",
f'{channel_information["gamma"]:.3f}'.encode("UTF-8"),
)
if "alpha" in channel_information:
_ims_set_nullterm_str_attribute(
f[dataset_info_dirname][channel_str],
"ColorOpacity",
f'{channel_information["alpha"]:.3f}'.encode("UTF-8"),
)
| 5,342,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.