content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def sequence_plus_one(x_init, iter, dtype=int):
"""
Mathematical sequence: x_n = x_0 + n
:param x_init: initial values of the sequence
:param iter: iteration until the sequence should be evaluated
:param dtype: data type to cast to (either int of float)
:return: element at the given iteration and array of the whole sequence
"""
def iter_function(x_seq, i, x_init):
return x_seq[0, :] + i
return sequence(x_init, iter, iter_function, dtype)
| 5,337,000
|
def _is_test_product_type(product_type):
"""Returns whether the given product type is for tests purposes or not."""
return product_type in (
apple_product_type.ui_test_bundle,
apple_product_type.unit_test_bundle,
)
| 5,337,001
|
def legend(labels=[""], handlecolors=[""], handlelength=1, handletextpad=None, loc=None, **kwargs):
"""
Alias function of plt.legend() with most frequently used parameters.
Args:
labels (sequence of strings)
handlescolors (list)
handlelength (None/int/float)
handletextpad (None/int/float)
loc (str):
| Location String Location Code
| -------------------------------
| 'best' 0
| 'upper right' 1
| 'upper left' 2
| 'lower left' 3
| 'lower right' 4
| 'right' 5
| 'center left' 6
| 'center right' 7
| 'lower center' 8
| 'upper center' 9
| 'center' 10
Keyword Args:
title (str)
edgecolor (str)
fancybox (bool):
| True: legendbox with round edges
| False: legendbox with normal edges
"""
legend = plt.legend(labels=labels,
handlelength=handlelength,
handletextpad=handletextpad,
loc=loc, **kwargs)
if handlecolors != [""]:
for ndx, color in enumerate(handlecolors):
legend.legendHandles[ndx].set_color(color)
return
| 5,337,002
|
def _synced(method, self, args, kwargs):
"""Underlying synchronized wrapper."""
with self._lock:
return method(*args, **kwargs)
| 5,337,003
|
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta('NewBase', bases, {})
| 5,337,004
|
def team(slug):
"""The team page. Shows statuses for all users in the team."""
db = get_session(current_app)
team = db.query(Team).filter_by(slug=slug).first()
if not team:
return page_not_found('Team not found.')
return render_template(
'status/team.html',
team=team,
users=team.users,
teams=db.query(Team).order_by(Team.name).all(),
statuses=team.recent_statuses(
request.args.get('page', 1),
startdate(request),
enddate(request)))
| 5,337,005
|
def mask_layer(layer, mask, mask_value = np.nan):
"""apply a mask to a layer
layer[mask == True] = mask_value
"""
layer[mask] = mask_value
return layer
| 5,337,006
|
def test_ice_removed():
"""
Testing that all ice is removed
when gems are matched on top.
All gems should match and remove
the ice underneath.
Grid is 2 by 3.
:return:
"""
print("\nBoard 1:\n")
print(b)
# ice should be like this
ice_grid = [[-1] * columns0 for _ in range(rows0)]
expected_ice = b.ice_grid.grid
swap_locations = [(0, 0), (0, 1)]
b.set_swap_locations(swap_locations)
actual_removals, actual_bonuses = b.find_matches()
b.match_list = actual_removals
b.bonus_list = actual_bonuses
b.remove_gems_add_bonuses()
actual_ice_grid = b.ice_grid.grid
assert expected_ice == actual_ice_grid
| 5,337,007
|
def tag_neutron_resources(resources):
"""Set tags to the provided resources.
param resources: list of openstacksdk objects to tag.
"""
tags = CONF.neutron_defaults.resource_tags
if not tags:
return
os_net = clients.get_network_client()
for res in resources:
try:
os_net.set_tags(res, tags=tags)
except os_exc.SDKException:
LOG.warning("Failed to tag %s with %s. Ignoring, but this is "
"still unexpected.", res, tags, exc_info=True)
| 5,337,008
|
def test_rbfed_cpfp_transaction(revault_network, bitcoind):
"""We don't have explicit RBF logic, but since we signal for it we should
be able to replace a previous CPFP with a higher-fee, higher-feerate one.
NOTE: we unfortunately can't replace it with one adding new to_be_cpfped transactions
as it would add new unconfirmed inputs. Hopefully this is going to change soon.
"""
rn = revault_network
CSV = 6
rn.deploy(
2,
1,
csv=CSV,
with_watchtowers=False,
bitcoind_rpc_mocks={"estimatesmartfee": {"feerate": 1 * 1_000 / COIN}},
)
man = rn.mans()[0]
vaults = rn.fundmany(list(range(1, 4)))
rn.activate_fresh_vaults(vaults)
# Trigger CPFP for all the unvaults
unvaults = get_unvault_txids(man, vaults)
revault_network.broadcast_unvaults_anyhow(vaults, priority=True)
revault_network.bitcoind_proxy.mocks["estimatesmartfee"] = {
"feerate": 50 * 1_000 / COIN
}
bitcoind.generate_blocks_censor(1, unvaults)
man.wait_for_log(
f"CPFPed transactions with ids '{{.*{unvaults[0]}.*}}'",
)
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == len(unvaults) + 1)
cpfp_txid = next(
txid for txid in bitcoind.rpc.getrawmempool() if txid not in unvaults
)
cpfp_entry = bitcoind.rpc.getmempoolentry(cpfp_txid)
assert cpfp_entry["fees"]["ancestor"] * COIN / cpfp_entry["ancestorsize"] >= 50
assert len(cpfp_entry["depends"]) == len(unvaults)
for txid in unvaults:
assert txid in cpfp_entry["depends"]
# Now if the feerate spikes we should be able to replace the former CPFP tx
# with an updated one.
revault_network.bitcoind_proxy.mocks["estimatesmartfee"] = {
"feerate": 80 * 1_000 / COIN
}
bitcoind.generate_blocks_censor(1, unvaults)
wait_for(lambda: cpfp_txid not in bitcoind.rpc.getrawmempool())
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == len(unvaults) + 1)
new_cpfp_txid = next(
txid for txid in bitcoind.rpc.getrawmempool() if txid not in unvaults
)
assert new_cpfp_txid != cpfp_txid
cpfp_entry = bitcoind.rpc.getmempoolentry(new_cpfp_txid)
assert cpfp_entry["fees"]["ancestor"] * COIN / cpfp_entry["ancestorsize"] >= 80
assert len(cpfp_entry["depends"]) == len(unvaults)
for txid in unvaults:
assert txid in cpfp_entry["depends"]
# And if the first Unvault automagically gets confirmed alone and feerate
# continue to spike, we can replace the CPFP with one paying a higher feerate
# (and fee) but spending only the two remaining ones
revault_network.bitcoind_proxy.mocks["estimatesmartfee"] = {
"feerate": 110 * 1_000 / COIN
}
bitcoind.generate_blocks_censor(1, unvaults[1:])
man.wait_for_log(f"Unvault transaction at {unvaults[0]}.* is now confirmed")
bitcoind.generate_blocks_censor(1, unvaults[1:])
cpfp_txid = new_cpfp_txid
wait_for(lambda: cpfp_txid not in bitcoind.rpc.getrawmempool())
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == len(unvaults[1:]) + 1)
new_cpfp_txid = next(
txid for txid in bitcoind.rpc.getrawmempool() if txid not in unvaults[1:]
)
assert new_cpfp_txid != cpfp_txid
cpfp_entry = bitcoind.rpc.getmempoolentry(new_cpfp_txid)
assert len(cpfp_entry["depends"]) == len(unvaults[1:])
for txid in unvaults[1:]:
assert txid in cpfp_entry["depends"]
assert unvaults[0] not in cpfp_entry["depends"]
| 5,337,009
|
def write_batch_list_windows(output_folder, decomb=False):
"""Write batch file which will encode videos when runt
Find videos
Build output name
Rename file if output exists
Encode video
Copy Exif data from old video to new
Remove old video
"""
error = " || goto :error"
content = []
files = glob(os.path.join(output_folder, '*.*'))
with open(os.path.join(output_folder, 'batch-encode.bat'), 'w') as batch:
content.append("@echo off\n")
content.append("rem created by: %s\n\n" % photo_sort.version)
# content.append("rem files: %d\n\n" % len(files)) # Write number of video files
with exiftool.ExifTool() as et:
for input_file in files:
(base, extension) = os.path.splitext(input_file)
if extension in video_extensions:
output_file = base + '.mp4'
rotation = get_rotation(et, input_file)
input_file = os.path.basename(input_file)
if os.path.exists(output_file):
content.append("move %s %s_ %s\n" % (input_file, input_file, error))
input_file += '_'
output_file = os.path.basename(base) + '.mp4'
command = get_encode_command(input_file, output_file, rotation, decomb)
content.append(" ".join(command) + error + "\n")
command = get_exif_command(input_file, output_file)
content.append(" ".join(command) + error + "\n")
content.append("rm %s %s\n\n" % (input_file, error))
content.append(":error\n")
content.append("echo Failed with error #%errorlevel%.\n")
content.append("exit /b %errorlevel%\n")
batch.writelines(content)
| 5,337,010
|
def test_get_sig_diffusion():
"""
"""
fueltype_lookup = {
'solid_fuel': 0,
'gas': 1,
'electricity': 2,
'oil': 3,
'heat_sold': 4,
'biomass': 5,
'hydrogen': 6,
'heat': 7}
technologies = {
'boilerA': read_data.TechnologyData(
fueltype='gas',
eff_by=0.5,
eff_ey=0.5,
year_eff_ey=2015,
eff_achieved=1.0,
diff_method='linear',
market_entry=1990,
tech_max_share=1.0),
'boilerC': read_data.TechnologyData(
fueltype='gas',
eff_by=0.5,
eff_ey=0.5,
year_eff_ey=2015,
eff_achieved=1.0,
diff_method='linear',
market_entry=1990,
tech_max_share=0.999),
'boilerB': read_data.TechnologyData(
fueltype='electricity',
eff_by=0.5,
eff_ey=0.5,
year_eff_ey=2015,
eff_achieved=1.0,
diff_method='linear',
market_entry=1990,
tech_max_share=1.0)}
tech_increased_service = ['boilerA']
regions = ['regA']
sig_param = s_generate_sigmoid.get_l_values(
technologies,
tech_increased_service,
regions)
assert sig_param['regA']['boilerA'] == 1.0
# -----
tech_increased_service = ['boilerC']
sig_param = s_generate_sigmoid.get_l_values(
technologies,
tech_increased_service,
regions)
assert sig_param['regA']['boilerC'] == 0.999
# -----
tech_increased_service = ['boilerC']
sig_param = s_generate_sigmoid.get_l_values(
technologies,
tech_increased_service,
regions=regions)
assert sig_param['regA']['boilerC'] == 0.999
| 5,337,011
|
def generate_twitch_clip(user_id):
"""Generate a Twitch Clip from user's channel.
Returns the URL and new clip object on success."""
user = User.get_user_from_id(user_id)
twitch_id = str(user.twitch_id)
payload_clips = {"broadcaster_id": TEST_ID or twitch_id} # Edit this to test
r_clips = requests.post("https://api.twitch.tv/helix/clips",
data=payload_clips,
headers=create_header(user))
if r_clips.status_code == 202:
# Save the clip's slug; used as `id` in Twitch API
clip_slug = r_clips.json().get("data")[0].get("id")
# Send a request to Get Clips to confirm clip was created.
clip_info = get_clip_info(clip_slug, user)
if clip_info:
# Store the url
url = clip_info.get("url")
# Save clip to DB
new_clip = TwitchClip.save_twitch_clip(clip_slug, user_id)
return (new_clip, url)
# TODO: If this fails, return None.
# Add better error handling.
return None, None
| 5,337,012
|
def purge(dir, pattern):
"""
Delete files whose names match a specific pattern.
Example: purge("/usr/local/nagios/etc/cfgs/hosts", "^i-.*\.cfg$") equals to rm -rf i-*.cfg
:param dir: the directory under which to delete files
:param pattern: regex pattern
:return: None
"""
for f in os.listdir(dir):
if re.search(pattern, f):
os.remove(os.path.join(dir, f))
| 5,337,013
|
def get_disabled():
"""
Return a list of all disabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
"""
return _get_svc_list(status="DISABLED")
| 5,337,014
|
def calling_method():
"""
call recursive method
:return: list all post 2 days delta-time
"""
list_posts = list()
return create_json_poyload(list_posts)
| 5,337,015
|
def check_notifier(notifiers):
"""Check if the configured notifier really exists."""
notifiers_available = {
"syslog": notify_syslog,
"pushover": notify_pushover,
"mail": notify_mail,
"twilio": notify_twilio
}
notifiers_valid = []
for notifier in notifiers:
try:
notifiers_valid.append(notifiers_available[notifier])
except KeyError:
syslog.syslog(syslog.LOG_ERR,
f"Unknown notifier {notifier} configured")
return notifiers_valid
| 5,337,016
|
def read_json(path):
"""
Read a BayesNet object from the json format. This
format has the ".bn" extension and is completely
unique to pyBN.
Arguments
---------
*path* : a string
The file path
Returns
-------
None
Effects
-------
- Instantiates and sets a new BayesNet object
Notes
-----
This function reads in a libpgm-style format into a bn object
File Format:
{
"V": ["Letter", "Grade", "Intelligence", "SAT", "Difficulty"],
"E": [["Intelligence", "Grade"],
["Difficulty", "Grade"],
["Intelligence", "SAT"],
["Grade", "Letter"]],
"Vdata": {
"Letter": {
"ord": 4,
"numoutcomes": 2,
"vals": ["weak", "strong"],
"parents": ["Grade"],
"children": None,
"cprob": [[.1, .9],[.4, .6],[.99, .01]]
},
...
}
"""
def byteify(input):
if isinstance(input, dict):
return {byteify(key):byteify(value) for key,value in input.iteritems()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
bn = BayesNet()
f = open(path,'r')
ftxt = f.read()
success=False
try:
data = byteify(json.loads(ftxt))
bn.V = data['V']
bn.E = data['E']
bn.F = data['F']
success = True
except ValueError:
print("Could not read file - check format")
bn.V = topsort(bn.E)
return bn
| 5,337,017
|
def solve_primal(run_id, problem, mip_solution, solver):
"""Solve primal by fixing integer variables and solving the NLP.
If the search fails and f `mip_solution` has a solution pool, then also
try to find a feasible solution starting at the solution pool points.
Parameters
----------
run_id : str
the run_id used for logging
problem : Problem
the mixed integer, (possibly) non convex problem
mip_solution : MipSolution
the linear relaxation solution
solver : Solver
the NLP solver used to solve the problem
"""
starting_point = [v.value for v in mip_solution.variables]
solution = solve_primal_with_starting_point(
run_id, problem, starting_point, solver
)
if solution.status.is_success():
return solution
# Try solutions from mip solution pool, if available
if mip_solution.solution_pool is None:
return solution
for mip_solution_from_pool in mip_solution.solution_pool:
if seconds_left() <= 0:
return solution
starting_point = [
v.value
for v in mip_solution_from_pool.inner.variables
]
solution_from_pool = solve_primal_with_starting_point(
run_id, problem, starting_point, solver
)
if solution_from_pool.status.is_success():
return solution_from_pool
# No solution from pool was feasible, return original infeasible sol
return solution
| 5,337,018
|
def domain_tokenizer(iterator):
"""Tokenizer generator.
Args:
iterator: Input iterator with strings.
Yields:
array of tokens per each value in the input.
"""
for value in iterator:
yield DOMAIN_TOKENIZER_RE.findall(value)
| 5,337,019
|
def SpliceContinuations(tree):
"""Given a pytree, splice the continuation marker into nodes.
Arguments:
tree: (pytree.Node) The tree to work on. The tree is modified by this
function.
"""
def RecSplicer(node):
"""Inserts a continuation marker into the node."""
if isinstance(node, pytree.Leaf):
if node.prefix.lstrip().startswith('\\\n'):
new_lineno = node.lineno - node.prefix.count('\n')
return pytree.Leaf(
type=format_token.CONTINUATION,
value=node.prefix,
context=('', (new_lineno, 0)))
return None
num_inserted = 0
for index, child in enumerate(node.children[:]):
continuation_node = RecSplicer(child)
if continuation_node:
node.children.insert(index + num_inserted, continuation_node)
num_inserted += 1
RecSplicer(tree)
| 5,337,020
|
def rayleigh(flow_resis, air_dens, sound_spd,
poros, freq=np.arange(100, 10001, 1)):
"""
Returns through the Rayleigh Model the Material Charactheristic Impedance
and the Material Wave Number.
Parameters:
----------
flow_resis : int
Resistivity of the material
air_dens : int | float
The air density
sound_spd : int | float
The speed of the sound
poros : float
Porosity of the material
freq : ndarray
A range of frequencies
NOTE: default range goes from 100 [Hz] to 10 [kHz].
Returns:
-------
zc : int | float | complex
Material Charactheristic Impedance
kc : int | float | complex
Material Wave Number
"""
omega = 2 * np.pi * freq
alpha = (1 - (1j * poros * flow_resis) / (air_dens * omega)) ** 0.5
# Material Charactheristic Impedance (zc) and the Material Wave Number (kc)
kc = (omega/sound_spd) * alpha
zc = ((air_dens * sound_spd)/poros) * alpha
return zc, kc
| 5,337,021
|
def test_regress_234():
"""Test task_exit checkpointing with fast tasks"""
run_race(0)
| 5,337,022
|
def set_iam_policy(project_id: str, policy: dict, token: str) -> dict:
"""Sets the Cloud IAM access control policy for a ServiceAccount.
Args:
project_id: GCP project ID.
policy: IAM policy.
token: Access token from the Google Authorization Server.
Returns:
A dict containing the response body.
"""
host = "https://cloudresourcemanager.googleapis.com"
url = f"{host}/v1/projects/{project_id}:setIamPolicy"
resp = requests.post(url, json={
"policy": policy,
}, headers={
"Authorization": f"Bearer {token}"
})
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as err:
logging.error(err.response.text)
raise err
return resp.json()
| 5,337,023
|
def feature_set_is_deployed(db: Session, fset_id: int) -> bool:
"""
Returns if this feature set is deployed or not
:param db: SqlAlchemy Session
:param feature_set_id: The Feature Set ID in question
:return: True if the feature set is deployed
"""
d = db.query(models.FeatureSetVersion). \
filter((models.FeatureSetVersion.feature_set_id == fset_id) &
(models.FeatureSetVersion.deployed == True)). \
count()
return bool(d)
| 5,337,024
|
def test_remove_iid():
"""Test if entry with iid is successfully removed."""
iid_manager, obj_a = get_iid_manager()
assert iid_manager.remove_iid(0) is None
assert iid_manager.remove_iid(1) == obj_a
| 5,337,025
|
def main(file_loc, cores=16) -> None:
"""main.
Args:
file_loc:
cores:
Returns:
None:
"""
print(file_loc)
files = os.listdir(file_loc)
if cores > 1:
with Pool(processes=cores) as pool:
jobs = [pool.apply_async(func=html_to_txt, args=(f, file_loc)) for f in files if Path(f).suffix == ".html"]
results = [job.get() for job in tqdm(jobs, desc="Translating .html to .txt")]
else:
results = [html_to_txt(f, file_loc) for f in tqdm(jobs, desc="Translating .html to .txt") if Path(f).suffix == ".html"]
| 5,337,026
|
def get_retry_request(
request: Request,
*,
spider: Spider,
#response: Response,
reason: Union[str, Exception] = 'unspecified',
max_retry_times: Optional[int] = None,
priority_adjust: Optional[int] = None,
logger: Logger = retry_logger,
stats_base_key: str = 'retry',
):
"""
Returns a new :class:`~scrapy.Request` object to retry the specified
request, or ``None`` if retries of the specified request have been
exhausted.
For example, in a :class:`~scrapy.Spider` callback, you could use it as
follows::
def parse(self, response):
if not response.text:
new_request_or_none = get_retry_request(
response.request,
spider=self,
reason='empty',
)
return new_request_or_none
*spider* is the :class:`~scrapy.Spider` instance which is asking for the
retry request. It is used to access the :ref:`settings <topics-settings>`
and :ref:`stats <topics-stats>`, and to provide extra logging context (see
:func:`logging.debug`).
*reason* is a string or an :class:`Exception` object that indicates the
reason why the request needs to be retried. It is used to name retry stats.
*max_retry_times* is a number that determines the maximum number of times
that *request* can be retried. If not specified or ``None``, the number is
read from the :reqmeta:`max_retry_times` meta key of the request. If the
:reqmeta:`max_retry_times` meta key is not defined or ``None``, the number
is read from the :setting:`RETRY_TIMES` setting.
*priority_adjust* is a number that determines how the priority of the new
request changes in relation to *request*. If not specified, the number is
read from the :setting:`RETRY_PRIORITY_ADJUST` setting.
*logger* is the logging.Logger object to be used when logging messages
*stats_base_key* is a string to be used as the base key for the
retry-related job stats
"""
settings = spider.crawler.settings
stats = spider.crawler.stats
retry_times = request.meta.get('retry_times', 0) + 1
if max_retry_times is None:
max_retry_times = request.meta.get('max_retry_times')
if max_retry_times is None:
max_retry_times = settings.getint('RETRY_TIMES')
if retry_times <= max_retry_times:
logger.debug(
"Retrying %(request)s (failed %(retry_times)d times): %(reason)s",
{'request': request, 'retry_times': retry_times, 'reason': reason},
extra={'spider': spider}
)
new_request = request.copy()
new_request.meta['retry_times'] = retry_times
new_request.dont_filter = True
if priority_adjust is None:
priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')
new_request.priority = request.priority + priority_adjust
if callable(reason):
reason = reason()
if isinstance(reason, Exception):
reason = global_object_name(reason.__class__)
stats.inc_value(f'{stats_base_key}/count')
stats.inc_value(f'{stats_base_key}/reason_count/{reason}')
return new_request
else:
stats.inc_value(f'{stats_base_key}/max_reached')
logger.error(
"Gave up retrying %(request)s (failed %(retry_times)d times): "
"%(reason)s",
{'request': request, 'retry_times': retry_times, 'reason': reason},
extra={'spider': spider},
)
return None
| 5,337,027
|
async def graf(request: Request):
"""
Zobrazí graf nameranej charakteristiky
"""
localtime = time.asctime(time.localtime(time.time()))
print("Graf; Čas:", localtime)
return templates.TemplateResponse("graf.html", {"request": request, "time": localtime})
| 5,337,028
|
def version():
"""Show the version of the software."""
from jsub import version as jsub_version
click.echo('JSUB version: %s' % jsub_version())
| 5,337,029
|
def mapRangeUnclamped(Value, InRangeA, InRangeB, OutRangeA, OutRangeB):
"""Returns Value mapped from one range into another where the Value is
clamped to the Input Range.
(e.g. 0.5 normalized from the range 0->1 to 0->50 would result in 25)"""
return lerp(OutRangeA, OutRangeB, GetRangePct(InRangeA, InRangeB, Value))
| 5,337,030
|
def folders(request):
"""Handle creating, retrieving, updating, deleting of folders.
"""
if request.method == "GET":
q = bookshelf_models.Folder.objects.filter(owner=request.user)
data = [[e.guid, e.title] for e in q]
if request.method == "POST":
if "create" in request.POST:
newfolder = bookshelf_models.Folder(owner=request.user, title="New Folder")
newfolder.save()
data = [[newfolder.guid, "New Folder"]]
if "update" in request.POST:
guid = request.POST.get("id", "")
folder = bookshelf_models.Folder.objects.get(guid=guid)
folder.title = request.POST.get("newname", "")
folder.save()
data = [[folder.guid, folder.title]]
if "delete" in request.POST:
folderid = request.POST.get("folderid", "")
nbids = request.POST.getlist("nbids")
folder = bookshelf_models.Folder.objects.get(owner=request.user, guid=folderid)
folder.delete()
for nbid in nbids:
nb = notebook_models.Notebook.objects.get(owner=request.user, guid=nbid)
nb.delete()
data = {"response":"ok"}
jsobj = json.dumps(data)
return HttpResponse(jsobj, mimetype='application/json')
| 5,337,031
|
def reponame(url, name=None):
"""
Determine a repo's cloned name from its URL.
"""
if name is not None:
return name
name = os.path.basename(url)
if name.endswith('.git'):
name = name[:-4]
return name
| 5,337,032
|
def specified_options(opts: argparse.Namespace, exclude=None) -> Dict:
"""
Cast an argparse Namespace into a dictionary of options.
Remove all options that were not specified (equal to None).
Arguments:
opts: The namespace to cast.
exclude: Names of options to exclude from the result.
Returns:
A dictionary of specified-only options.
"""
exclude = exclude or set()
options = opts.__dict__.items() # noqa: WPS609
return {opt: value for opt, value in options if value is not None and opt not in exclude}
| 5,337,033
|
def test_reordering_sequential(sorted_entries_seq):
"""
Ensures the reordering logic works as expected. This test simply provides
sequential sort order values and try to reorder them.
"""
qs = SortedModel.objects
nodes = sorted_entries_seq
operations = {nodes[5].pk: -1, nodes[2].pk: +3}
expected = _sorted_by_order(
[
(nodes[0].pk, 0),
(nodes[1].pk, 1),
(nodes[2].pk, 2 + 3),
(nodes[3].pk, 3 - 1),
(nodes[4].pk, 4 + 1 - 1),
(nodes[5].pk, 5 - 1 - 1),
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
| 5,337,034
|
def Sort_list_by_Prism_and_Date(lst):
"""
Argument:
- A list containing the prism name, position of recording, decimal year, position and meteo corrected position for each prism.
Return:
- A list containing lists of prisms sorted by name and date.
"""
#text must be a converted GKA file
outList = [] #[[Name,[Data]],[],[],...]
#Sort by prism name
for k in lst:
index = FindIndexByName(k[0],outList)
if index != None:
outList[index][1].append(k)
else:
outList.append([k[0],[k]])
#Sort by crescent date
for j in outList:
j[1] = SortCrescent(j[1],2)
return outList
| 5,337,035
|
def log(message: str) -> Generator:
"""指定したメッセージ付きで開始と終了のログを出力するコンテキストマネージャを返します。
>>> with log('hello'):
... print('world')
START: hello
world
END: hello
:type message: str
:param message: メッセージ
:rtype: typing.Generator
:return: コンテキストマネージャ
"""
print(f'START: {message}')
try:
yield
finally:
print(f'END: {message}')
| 5,337,036
|
def MostrarRaices(raizBiseccion, raizNewton, raizNewtonModificado, raizSecante, tolerancia):
"""
Mostrara por pantalla las raices obtenidas dependiendo de los dos tipos distintos de tolerancia.
Si no se obtuvo raiz durante su busqueda, se mostrara un mensaje indicando el metodo que fallo
"""
if tolerancia == 1e-5:
if raizBiseccion is not None:
print("* Raiz Biseccion = {0} +- {1}".format('{0:.5f}'.format(raizBiseccion), '{0:.5f}'.format(tolerancia)))
else:
print("El Metodo de Biseccion no converge")
if raizNewton is not None:
print("* Raiz Newton = {0} +- {1}".format('{0:.5f}'.format(raizNewton), '{0:.5f}'.format(tolerancia)))
else:
print("El Metodo de Newton Raphson no converge")
if raizNewtonModificado is not None:
print("* Raiz Newton modificado = {0} +- {1}".format('{0:.5f}'.format(raizNewtonModificado),
'{0:.5f}'.format(tolerancia)))
else:
print("El Metodo de Newton Raphson modificado no converge")
if raizSecante is not None:
print("* Raiz Secante = {0} +- {1}".format('{0:.5f}'.format(raizSecante), '{0:.5f}'.format(tolerancia)))
else:
print("El Metodo de la secante no converge")
if tolerancia == 1e-13:
if raizBiseccion is not None:
print(
"* Raiz Biseccion = {0} +- {1}".format('{0:.13f}'.format(raizBiseccion), '{0:.13f}'.format(tolerancia)))
else:
print("El Metodo de Biseccion no converge")
if raizNewton is not None:
print("* Raiz Newton = {0} +- {1}".format('{0:.13f}'.format(raizNewton), '{0:.13f}'.format(tolerancia)))
else:
print("El Metodo de Newton Raphson no converge")
if raizNewtonModificado is not None:
print("* Raiz Newton modificado = {0} +- {1}".format('{0:.13f}'.format(raizNewtonModificado),
'{0:.13f}'.format(tolerancia)))
else:
print("El Metodo de Newton Raphson modificado no converge")
if raizSecante is not None:
print("* Raiz Secante = {0} +- {1}".format('{0:.13f}'.format(raizSecante), '{0:.13f}'.format(tolerancia)))
else:
print("El Metodo de la secante no converge")
| 5,337,037
|
def _convert_to_RVector(value, force_Rvec=True):
"""
Convert a value or list into an R vector of the appropriate type.
Parameters
----------
value : numeric or str, or list of numeric or str
Value to be converted.
force_Rvec : bool, default True
If `value` is not a list, force conversion into a R vector?
False will return an int, float, or str if value is non-list.
True will always return an R vector.
Returns
-------
int, float, str, an rpy2 R vector
A value or R vector of an appropriate data type.
"""
if not isinstance(value, list) and not force_Rvec:
return value
elif not isinstance(value, list) and force_Rvec:
value = [value]
else:
pass
if all(isinstance(x, bool) for x in value):
return ro.BoolVector(value)
elif all(isinstance(x, (int, np.integer)) for x in value):
return ro.IntVector(value)
elif all(isinstance(x, (int, np.integer, float, np.float)) for x in value):
return ro.FloatVector(value)
else:
return ro.StrVector(value)
| 5,337,038
|
def submit(expression):
"""
Update the plotted function to the new math *expression*.
*expression* is a string using "t" as its independent variable, e.g.
"t ** 3".
"""
ydata = eval(expression)
l.set_ydata(ydata)
ax.relim()
ax.autoscale_view()
plt.draw()
| 5,337,039
|
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
temp=e_x / e_x.sum(axis=0) # only difference
if np.isnan(temp).any()==True:
return [0.0,1.0,0.0]
else:
return temp
| 5,337,040
|
def fetch_events_art_history(base_url='https://www.sas.upenn.edu'):
"""
Fetch events from Art History Department
"""
page = requests.get(urljoin(base_url, '/arthistory/events'))
page_soup = BeautifulSoup(page.content, 'html.parser')
range_pages = max([int(n_page.text) for n_page in page_soup.find('div',
attrs={'class': 'pagination pagination-centered'}).find_all('li') if n_page.text.isdigit()])
events = []
for n_page in range(1, range_pages):
page = requests.get(
(urljoin(base_url, '/arthistory/events?&page={}')).format(n_page))
page_soup = BeautifulSoup(page.content, 'html.parser')
all_events = page_soup.find(
'div', attrs={'class': 'item-list'}).find_all('li')
for event in all_events:
event_url = urljoin(base_url, event.find('a')['href'])
title = event.find('h3').text if event.find(
'h3') is not None else ''
# event_type = event.find('strong').text if event.find('strong') is not None else ''
date = event.find('span', attrs={'class': 'date-display-single'})
if date is not None:
date, event_time = date.attrs.get('content').split('T')
if '-' in event_time:
starttime, endtime = event_time.split('-')
try:
starttime, endtime = dateutil.parser.parse(starttime).strftime(
"%I:%M %p"), dateutil.parser.parse(endtime).strftime("%I:%M %p")
except:
pass
else:
starttime, endtime = event_time, ''
else:
date, starttime, endtime = '', '', ''
location = event.find('div', attrs={'class': 'location'})
location = location.text.strip() if location is not None else ''
event_soup = BeautifulSoup(requests.get(
event_url).content, 'html.parser')
description = event_soup.find('div', attrs={'class': 'field-body'})
description = description.text.strip() if description is not None else ''
events.append({
'title': title,
'speaker': '',
'date': date,
'location': location,
'description': description,
'starttime': starttime,
'endtime': endtime,
'url': event_url,
'owner': 'Art History'
})
return events
| 5,337,041
|
def calc_Q(nu=0.0,delta=0.0,lam=1.0,ret_k=False):
"""
Calculate psic Q in the cartesian lab frame.
nu and delta are in degrees, lam is in angstroms
if ret_k == True return tuple -> (Q,ki,kr)
"""
(ki,kr) = calc_kvecs(nu=nu,delta=delta,lam=lam)
Q = kr - ki
if ret_k == True:
return (Q,ki,kr)
else:
return Q
| 5,337,042
|
def build_data(args):
"""
build test data
"""
task_name = args.task_name.lower()
processor = reader.MatchProcessor(data_dir=args.data_dir,
task_name=task_name,
vocab_path=args.vocab_path,
max_seq_len=args.max_seq_len,
do_lower_case=args.do_lower_case)
test_data_generator = processor.data_generator(
batch_size=args.batch_size,
phase='test',
epoch=1,
shuffle=False,
device=args.gpu)
num_test_examples = processor.get_num_examples(phase='test')
test_data = [test_data_generator, num_test_examples]
return processor, test_data
| 5,337,043
|
def _generate_index():
"""Helper function to generate the index page from `main_readme.rst`"""
with codecs.open('main_readme.rst', 'r', encoding='utf-8', errors='ignore') as f:
main_readme = f.read()
index_rst = ''
index_rst += '.. Syncurity SDK documentation master file, created by\n'
index_rst += ' sphinx-quickstart on Fri Oct 27 14:18:07 2017.\n\n'
index_rst += 'Welcome to Syncurity SDK\'s documentation!\n'
index_rst += '=========================================\n\n'
index_rst += '.. toctree::\n'
index_rst += ' :maxdepth: 2\n'
index_rst += ' :caption: Contents:\n\n'
index_rst += main_readme
with open('index.rst', 'w') as f:
f.write(index_rst)
print('Generated documentation index')
| 5,337,044
|
def load(config_path = None, config_files = None,
state_path = "~/.tcf", ignore_ssl = True):
"""Load the TCF Library configuration
This is needed before you can access from your client program any
other module.
:param config_path: list of strings containing UNIX-style
paths (DIR:DIR) to look for config files (conf_*.py) that will
be loaded in alphabetical order. An empty path clears the
current list.
:param config_files: list of extra config files to load
:param str state_path: (optional) path where to store state
:param bool ignore_ssl: (optional) wether to ignore SSL
verification or not (useful for self-signed certs)
"""
if not config_path:
config_path = [ "/etc/tcf:~/.tcf:.tcf" ]
if not config_files:
config_files = []
if config_path != "":
global path
path = commonl.path_expand(config_path)
logger.info("configuration path %s", path)
commonl.config_import(path, re.compile("^conf[-_].*.py$"))
for config_file in config_files:
commonl.config_import_file(config_file, "__main__")
if urls == []:
logger.warning(
"No broker URLs available; please use --url or "
"add to a conf_*.py in any of %s with:\n"
"\n"
" tcfl.config.url_add('https://URL:PORT', ssl_ignore = True)\n"
"\n" % ":".join(config_path))
for _url in urls: # create target broker objects
url = _url[0]
ssl_ignore = ignore_ssl or _url[1]
if len(_url) > 2:
aka = _url[2]
else:
aka = None
ttb_client.rest_init(os.path.expanduser(state_path),
url, ssl_ignore, aka)
| 5,337,045
|
def main():
"""Main entry point."""
instance = generative_model.CreateInstanceFromFlags()
with tempfile.TemporaryDirectory(prefix="deeplearning_clgen_docker_") as d:
ExportInstance(instance, pathlib.Path(d), FLAGS.docker_base_image)
subprocess.check_call(["docker", "build", d])
| 5,337,046
|
def before_example(scenario, outline, steps):
"""Record the "before example" event."""
record_event('example', '{')
record_example_event('before', scenario, outline, steps)
record_event('types', 'before example')
| 5,337,047
|
def format_date(d):
"""Date format used in the report."""
if type(d) == str:
d = dateutil_parse(d)
return d.isoformat()
| 5,337,048
|
def pytorch_argmax(op):
"""Implementation of argmax for pytorch."""
def _impl(x, dim):
dim = tuple(sorted(dim))
n = ()
for _s in range(len(x.shape)):
if _s not in dim:
n = n + (_s,)
n = n + dim
x = x.permute(n)
ns = x.shape[0 : -len(dim)] + (-1,)
r = torch.argmax(x.reshape(ns), -1, keepdim=False)
rl = list(r.shape)
for _sd in dim:
rl.insert(_sd, 1)
rf = tuple(rl)
return (torch.reshape(r, rf),)
return _impl, op.inputs[1:]
| 5,337,049
|
def apply_rows(applicators, rows):
"""
Yield rows after applying the applicator functions to them.
Applicators are simple unary functions that return a value, and that
value is stored in the yielded row. E.g.
`row[col] = applicator(row[col])`. These are useful to, e.g., cast
strings to numeric datatypes, to convert formats stored in a cell,
extract features for machine learning, and so on.
Args:
applicators: a tuple of (cols, applicator) where the applicator
will be applied to each col in cols
rows: an iterable of rows for applicators to be called on
Yields:
Rows with specified column values replaced with the results of
the applicators
"""
for row in rows:
for (cols, function) in applicators:
for col in (cols or []):
value = row.get(col, '')
row[col] = function(row, value)
yield row
| 5,337,050
|
def merge_databases(parent_db, other):
"""Merge ``other`` into ``parent_db``, including updating exchanges.
All databases must be SQLite databases.
``parent_db`` and ``other`` should be the names of databases.
Doesn't return anything."""
from . import databases
from .backends import (
ActivityDataset,
ExchangeDataset,
SQLiteBackend,
sqlite3_lci_db,
)
from .database import Database
assert parent_db in databases
assert other in databases
first = Database(parent_db)
second = Database(other)
if type(first) != SQLiteBackend or type(second) != SQLiteBackend:
raise ValidityError("Both databases must be `SQLiteBackend`")
first_codes = {
obj.code
for obj in ActivityDataset.select().where(ActivityDataset.database == parent_db)
}
second_codes = {
obj.code
for obj in ActivityDataset.select().where(ActivityDataset.database == other)
}
if first_codes.intersection(second_codes):
raise ValidityError("Duplicate codes - can't merge databases")
with sqlite3_lci_db.atomic():
ActivityDataset.update(database=parent_db).where(
ActivityDataset.database == other
).execute()
ExchangeDataset.update(input_database=parent_db).where(
ExchangeDataset.input_database == other
).execute()
ExchangeDataset.update(output_database=parent_db).where(
ExchangeDataset.output_database == other
).execute()
Database(parent_db).process()
del databases[other]
| 5,337,051
|
def get_wine_quality(num_rows=None):
"""
Wine Quality dataset from UCI repository (
https://archive.ics.uci.edu/ml/datasets/Wine+Quality
Using the white wine data set, not the red.
- Dimensions: 4898 rows, 12 columns.
- Task: Regression
:param num_rows:
:return: X,y
"""
filename = 'winequality-white.csv'
if not os.path.isfile(filename):
urlretrieve(get_wine_quality_url, filename)
wine = pd.read_csv(filename, header=0, nrows=num_rows, delimiter=";")
X = wine.iloc[:, :-1].values
y = wine.iloc[:, -1].values
return X, y
| 5,337,052
|
def _check_not_matrix(**kwargs):
"""
If any value in *kwargs* is a `np.matrix`, raise a TypeError with the key
name in its message.
"""
for k, v in kwargs.items():
if isinstance(v, np.matrix):
raise TypeError(f"Argument {k!r} cannot be a np.matrix")
| 5,337,053
|
def parse_eos(eos):
"""Function to interpret input as an EOS"""
if hasattr(eos, 'asq_of_rho_p'):
return eos # already is EOS class
if eos == 'H' or eos == 'h':
return SimpleHydrogen()
try:
return Ideal(float(eos)) # try parsing as a gamma value
except ValueError:
raise ValueError('Cannot parse EOS "{0:}".'.format(eos))
| 5,337,054
|
def get_2D_hse_kpoints(struct_for_path, ibzkpth):
"""
Args:
struct_for_path: Structure from which linemode k-points will
be generated.
ibzkpth:
Returns:
the Kpoints file object in the form of a string
ready for execution by MPInterfaces
calibrate objects
"""
# Read IBZKPT from prep step
ibz_lines = open(ibzkpth).readlines()
n_ibz_kpts = int(ibz_lines[1].split()[0])
# Read linemode KPOINTs from the dict (makes sure it is Kpoints
# file with only 20 per atom for the optimized settings
# Kpoints.from_dict(kpoint_dict).write_file('linemode_KPOINTS')
kpath = HighSymmKpath(struct_for_path)
Kpoints.automatic_linemode(20, kpath).write_file('KPOINTS_linemode')
remove_z_kpoints_linemode()
linemode_lines = open('KPOINTS_linemode').readlines()
# put them together
abs_path = []
for i in range(4, len(linemode_lines), 3):
start_kpt = linemode_lines[i].split()
end_kpt = linemode_lines[i+1].split()
increments = [
(float(end_kpt[0]) - float(start_kpt[0])) / 20,
(float(end_kpt[1]) - float(start_kpt[1])) / 20,
(float(end_kpt[2]) - float(start_kpt[2])) / 20
]
abs_path.append(start_kpt[:3] + ['0', start_kpt[4]])
for n in range(1, 20):
abs_path.append(
[str(float(start_kpt[0]) + increments[0] * n),
str(float(start_kpt[1]) + increments[1] * n),
str(float(start_kpt[2]) + increments[2] * n), '0']
)
abs_path.append(end_kpt[:3] + ['0', end_kpt[4]])
n_linemode_kpts = len(abs_path)
# write out the kpoints file and return the object
Kpoints_hse_file = '\n'.join(
['Automatically generated mesh',
'{}'.format(n_ibz_kpts + n_linemode_kpts),
'Reciprocal Lattice',
'{}'.format(str(''.join([line for line in ibz_lines[3:]])))]) + \
'{}'.format(str('\n'.join(
[' '.join(point) for point in abs_path])))
## can be used for test print out
# with open('KPOINTS_HSE', 'w') as kpts:
# kpts.write('Automatically generated mesh\n')
# kpts.write('{}\n'.format(n_ibz_kpts + n_linemode_kpts))
# kpts.write('Reciprocal Lattice\n')
# for line in ibz_lines[3:]:
# kpts.write(line)
# for point in abs_path:
# kpts.write('{}\n'.format(' '.join(point)))
return Kpoints_hse_file
| 5,337,055
|
async def verify(context: Context) -> List[ImageSourceVerifyImageSignatures]:
"""Verifies an image(s)."""
results = []
ctx = get_context_object(context)
try:
results = await _verify(ctx)
except Exception as exception: # pylint: disable=broad-except
if ctx["verbosity"] > 0:
logging.fatal(exception)
if ctx["verbosity"] > LOGGING_DEFAULT:
exc_info = sys.exc_info()
print_exception(*exc_info)
sys.exit(1)
finally:
await ctx["imagesource"].close()
for result in results:
result.close()
| 5,337,056
|
def read_cache(logger: logging.Logger, cache_file: str) -> CachedData:
"""Read file with Py pickle in it."""
if not cache_file:
return CachedData()
if not os.path.exists(cache_file):
logger.warning("Cache file '%s' doesn't exist.", cache_file)
return CachedData()
with open(cache_file, 'rb') as fhandle:
try:
cache = pickle.load(fhandle)
except EOFError:
# Note: occurred with empty file.
cache = CachedData()
logger.debug(
'Cache file is probably empty: %s', traceback.format_exc()
)
logger.debug(cache)
return cache
| 5,337,057
|
def neutralize(word, g, word_to_vec_map):
"""
Removes the bias of "word" by projecting it on the space orthogonal to the bias axis.
This function ensures that gender neutral words are zero in the gender subspace.
Arguments:
word -- string indicating the word to debias
g -- numpy-array of shape (50,), corresponding to the bias axis (such as gender)
word_to_vec_map -- dictionary mapping words to their corresponding vectors.
Returns:
e_debiased -- neutralized word vector representation of the input "word"
"""
# Select word vector representation of "word". Use word_to_vec_map. (≈ 1 line)
e = word_to_vec_map[word]
# Compute e_biascomponent using the formula give above. (≈ 1 line)
e_biascomponent = (np.dot(e,g) / np.square(np.linalg.norm(g))) * g
# Neutralize e by substracting e_biascomponent from it
# e_debiased should be equal to its orthogonal projection. (≈ 1 line)
e_debiased = e - e_biascomponent
return e_debiased
| 5,337,058
|
def loadRowCluster(ndPage,algo):
"""
load cluster algo = aglo
"""
xpCluster = f".//Cluster[@algo='{algo}']"
lClusters= ndPage.xpath(xpCluster)
return lClusters
| 5,337,059
|
def _elementwise(f):
""" Enables elementwise operations
The wrapper implements two different modes of argument evaluation
for given p_1,..., p_k that represent the predicted distributions
and and x_1,...,x_m that represent the values to evaluate them on.
"elementwise" (default): Repeat the sequence of p_i until there are m,
i.e., p_1,...,p_k,p_1,p_2,...,p_k,p_1,...,p_m'
where m' is the remainder of dividing m by k.
"batch": x_1, ..., x_m is evaluated on every distribution p_i
resulting in a matrix m columns and k rows.
Parameters
----------
f: The function to decorate
Returns
-------
Decorated function
"""
def wrapper(self, x, *args, **kwargs):
if len(np.array(x).shape) > 1:
x = x.flatten()
# cache index
index_ = self.index
self.index = slice(None)
# disable elementwise mode if x is scalar
elementwise = (self.mode == 'elementwise' and len(np.array(x).shape) != 0)
if elementwise:
evaluations = len(x)
else:
evaluations = len(self.X)
# compose result
result = []
number_of_points = len(self.X)
for index in range(evaluations):
# set evaluation index and point
if elementwise:
self.index = index % number_of_points
at = x[index]
else:
self.index = index
at = x
# evaluate the function at this point
result.append(f(self, at, *args, **kwargs))
# rollback index
self.index = index_
if len(result) > 1:
return np.array(result)
else:
return result[0]
return _forward_meta(wrapper, f)
| 5,337,060
|
def is_valid_task_id(task_id):
"""
Return False if task ID is not valid.
"""
parts = task_id.split('-')
if len(parts) == 5 and [len(i) for i in parts[1:]] == [8, 4, 4, 4]:
tp = RE_TASK_PREFIX.split(parts[0])
return (len(tp) == 5 and
all(i.isdigit() for i in tp[::2]) and
tp[1] in TT and
tp[3] in TG)
return False
| 5,337,061
|
def setlocale(newlocale):
"""Set the locale"""
try:
locale.setlocale(locale.LC_ALL, newlocale)
except AttributeError: # locale is None
pass
# it looks like some stuff isn't initialised yet when this is called the
# first time and its init gets the locale settings from the environment
os.environ["LC_ALL"] = newlocale
| 5,337,062
|
def figure_14_9():
"""Return the unweighted, undirected graph from Figure 14.9 of DSAP.
This is the same graph as in Figure 14.10.
"""
E = (
('A', 'B'), ('A', 'E'), ('A', 'F'), ('B', 'C'), ('B', 'F'),
('C', 'D'), ('C', 'G'), ('D', 'G'), ('D', 'H'), ('E', 'F'),
('E', 'I'), ('F' 'I'), ('G', 'J'), ('G', 'K'), ('G', 'L'),
('H', 'L'), ('I', 'J'), ('I', 'M'), ('I', 'N'), ('J', 'K'),
('K', 'N'), ('K', 'O'), ('L', 'P'), ('M', 'N'),
)
return graph_from_edgelist(E, False)
| 5,337,063
|
def audit_umbrelladns(networks_fwrules):
"""Accepts a list of firewall rules for a client
Checks for rules to allow DNS lookups to Umbrella and
deny all other DNS lookups.
Returns a list of clients and a boolean of whether Umbrella DNS
is configured properly"""
umbrelladns_audit = []
host1 = '208.67.222.222/32'
host2 = '208.67.220.220/32'
for customer in networks_fwrules:
customer_result = {
'organizationId': customer['organizationId'],
'organizationName': customer['organizationName']
}
for network in customer['networks']:
umbrella_allow, dns_deny = 'False', 'False'
if 'l3FirewallRules' in network:
for rule in network['l3FirewallRules']:
destcidr = rule['destCidr'].split(",")
if rule['policy'] == 'allow' \
and rule['protocol'] == 'tcp' \
and rule['destPort'] == '53' \
and (host1 in destcidr and host2 in destcidr):
umbrella_allow = 'True'
if rule['policy'] == 'allow' \
and rule['protocol'] == 'udp' \
and rule['destPort'] == '53' \
and (host1 in destcidr and host2 in destcidr):
umbrella_allow = 'True'
if rule['policy'] == 'deny' \
and rule['protocol'] == 'tcp' \
and rule['destPort'] == '53' \
and rule['destCidr'] == 'Any':
dns_deny = 'True'
if rule['policy'] == 'deny' \
and rule['protocol'] == 'udp' \
and rule['destPort'] == '53' \
and rule['destCidr'] == 'Any':
dns_deny = 'True'
if umbrella_allow is 'True' and dns_deny is 'True':
customer_result['umbrellaDns'] = 'True'
else:
customer_result['umbrellaDns'] = 'False'
umbrelladns_audit.append(customer_result)
return umbrelladns_audit
| 5,337,064
|
def _type_is_valid(policy_type_id):
"""
check that a type is valid
"""
if SDL.get(A1NS, _generate_type_key(policy_type_id)) is None:
raise PolicyTypeNotFound(policy_type_id)
| 5,337,065
|
def test_check_metadata_unique_full_name_values():
""" METADATA.pb: check if fonts field only has unique "full_name" values. """
check = CheckTester(googlefonts_profile,
"com.google.fonts/check/metadata/unique_full_name_values")
# Our reference FamilySans family is good:
font = TEST_FILE("familysans/FamilySans-Regular.ttf")
assert_PASS(check(font),
'with a good family...')
# then duplicate a full_name entry to make it FAIL:
md = check["family_metadata"]
md.fonts[0].full_name = md.fonts[1].full_name
assert_results_contain(check(font, {"family_metadata": md}),
FAIL, 'duplicated',
'with a duplicated full_name entry.')
| 5,337,066
|
def convert_samples_library(c, lib_ids):
"""
Change Library ID in samples_library_tags table
"""
c.execute('alter table samples_library rename to old_samples_library')
c.execute('''CREATE TABLE "samples_library" (
"id" varchar(10) NOT NULL PRIMARY KEY,
"library_name" varchar(100) NOT NULL UNIQUE,
"library_species_id" integer NOT NULL REFERENCES "samples_species" ("id"),
"hidden" bool NOT NULL,
"account_number" varchar(100),
"cell_line_id" integer REFERENCES "samples_cellline" ("id"),
"condition_id" integer REFERENCES "samples_condition" ("id"),
"antibody_id" integer REFERENCES "samples_antibody" ("id"),
"replicate" smallint unsigned NOT NULL,
"experiment_type_id" integer NOT NULL REFERENCES "samples_experimenttype" ("id"),
"library_type_id" integer REFERENCES "samples_librarytype" ("id"),
"creation_date" date,
"made_for" varchar(50) NOT NULL,
"made_by" varchar(50) NOT NULL,
"stopping_point" varchar(25) NOT NULL,
"amplified_from_sample_id" varchar(10),
"undiluted_concentration" decimal,
"successful_pM" decimal,
"ten_nM_dilution" bool NOT NULL,
"avg_lib_size" integer,
"notes" text NOT NULL
);''')
c.execute("""
select library_id, library_name, library_species_id, hidden, account_number, cell_line_id,
condition_id, antibody_id, replicate, experiment_type_id, library_type_id,
creation_date, made_for, made_by, stopping_point, amplified_from_sample_id,
undiluted_concentration, successful_pM, ten_nM_dilution, avg_lib_size, notes
from old_samples_library;""")
new_rows = []
for row in c:
new_rows.append({
'id': row[0],
'library_name': row[1],
'library_species_id': row[2],
'hidden': row[3],
'account_number': row[4],
'cell_line_id': row[5],
'condition_id': row[6],
'antibody_id': row[7],
'replicate': row[8],
'experiment_type_id': row[9],
'library_type_id': row[10],
'creation_date': row[11],
'made_for': row[12],
'made_by': row[13],
'stopping_point': row[14],
'amplified_from_sample_id': row[15],
'undiluted_concentration': row[16],
'successful_pM': row[17],
'ten_nM_dilution': row[18],
'avg_lib_size': row[19],
'notes': row[20],
})
sql = '''insert into samples_library
(id, library_name, library_species_id, hidden, account_number, cell_line_id,
condition_id, antibody_id, replicate, experiment_type_id, library_type_id,
creation_date, made_for, made_by, stopping_point, amplified_from_sample_id,
undiluted_concentration, successful_pM, ten_nM_dilution, avg_lib_size, notes)
values
(:id, :library_name, :library_species_id, :hidden, :account_number, :cell_line_id,
:condition_id, :antibody_id, :replicate, :experiment_type_id, :library_type_id,
:creation_date, :made_for, :made_by, :stopping_point, :amplified_from_sample_id,
:undiluted_concentration, :successful_pM, :ten_nM_dilution, :avg_lib_size, :notes);
'''
c.executemany(sql, new_rows)
c.execute('drop table old_samples_library;')
| 5,337,067
|
def parse_element_container(elem: ET.Element) -> Tuple[Types.FlexElement, ...]:
"""Parse XML element container into FlexElement subclass instances.
"""
tag = elem.tag
if tag == "FxPositions":
# <FxPositions> contains an <FxLots> wrapper per currency.
# Element structure here is:
# <FxPositions><FxLots><FxLot /></FxLots></FxPositions>
# Flatten the nesting to create FxPositions as a tuple of FxLots
fxlots = (parse_element_container(child) for child in elem)
return tuple(itertools.chain.from_iterable(fxlots))
instances = tuple(parse_data_element(child) for child in elem)
return instances
| 5,337,068
|
def _is_smooth_across_dateline(mid_lat, transform, rtransform, eps):
"""
test whether the CRS is smooth over the dateline
idea borrowed from IsAntimeridianProjToWGS84 with minor mods...
"""
left_of_dt_x, left_of_dt_y, _ = rtransform.TransformPoint(180-eps, mid_lat)
right_of_dt_x, right_of_dt_y, _ = rtransform.TransformPoint(-180+eps, mid_lat)
if _dist(right_of_dt_x-left_of_dt_x, right_of_dt_y-left_of_dt_y) > 1:
return False
left_of_dt_lon, left_of_dt_lat, _ = transform.TransformPoint(left_of_dt_x, left_of_dt_y)
right_of_dt_lon, right_of_dt_lat, _ = transform.TransformPoint(right_of_dt_x, right_of_dt_y)
if (_dist(left_of_dt_lon - 180 + eps, left_of_dt_lat - mid_lat) > 2 * eps or
_dist(right_of_dt_lon + 180 - eps, right_of_dt_lat - mid_lat) > 2 * eps):
return False
return True
| 5,337,069
|
def build_frame_csvs_airsim(loader, save_path):
"""
@params: [loader (obj)]
@returns: None
Takes data from loader and compiles it into csv frames for training
TODO: CURRENTLY DOES NOT WORK WITH CARLA
"""
fn_train = os.path.abspath('./datasets/{}'.format(save_path))
unique_poses = np.unique(loader['poses'], axis=0) # there should be one frame per unique pose
for timestep_pose in unique_poses:
t = int(timestep_pose[0])
x_pos = timestep_pose[1]
y_pos = timestep_pose[2]
z_pos = timestep_pose[3]
if len(timestep_pose) > 4:
theta_pos = timestep_pose[4]
frame_obs = []
for obsj in range(len(loader['x'])):
observation = loader['x'][obsj]
if observation[0] != t: continue # ensure timestep matches
object_presence = loader['y'][obsj]
observation = np.append(observation, object_presence)
frame_obs.append(observation)
if len(frame_obs) > 0:
comp_df = pd.DataFrame(frame_obs).iloc[:,1:]
comp_df.to_csv(fn_train+'{}.csv'.format(t), header=False, index=False)
| 5,337,070
|
def reverse_args(func: Func) -> fn:
"""
Creates a function that invokes func with the positional arguments order
reversed.
Examples:
>>> concat = sk.reverse_args(lambda x, y, z: x + y + z)
>>> concat("a", "b", "c")
'cba'
"""
func = to_callable(func)
return fn(lambda *args, **kwargs: func(*args[::-1], **kwargs))
| 5,337,071
|
def ar(x, y, z):
"""Offset arange by z/2."""
return z / 2 + np.arange(x, y, z, dtype='float')
| 5,337,072
|
def cli(profile, region, table, properties,primary):
"""DynamoDB properties removal Tool."""
count = dynamodb.scan_table_and_remove(profile, region, table, properties,primary)
print("{} records updated".format(count))
| 5,337,073
|
def conv_node(command,comm):
"""
This function converts values from one unit to another unit. This function
requires the 'pint' module. For this function to work, the input should be of the from
'!conv(unit_1,unit_2)'.
"""
print "conv_node"
print command
#To get re.search to wark, I'll have to clean up the command to just the stuff in the brackets.(This can be improved on)
#This removes the "!conv" part
m = re.search('!conv(.+)',command)
if m:
mod_comm = m.group(1)
#This cleans out the brackets
for x in mod_comm:
if(x == '(' or x ==')'):
mod_comm = mod_comm.replace(x,'')
print mod_comm
get_stuff = re.match(r"([0-9]+)([a-z]+)(,)([a-z]+)",mod_comm, re.I)
print get_stuff
#This part will do stuff using the info inside the brackets using the "pint" library
if get_stuff:
conv_lst = get_stuff.groups()
print conv_lst
ureg = UnitRegistry()
user_in = int(conv_lst[0])*ureg.parse_expression(conv_lst[1])
print user_in
user_out = user_in.to(ureg.parse_expression(conv_lst[3]))
print user_out
print "%s is %s" %(user_in,user_out)
comm.reply(str(user_in)+' is '+str(user_out))
else:
print "Wait, what?! What are you doing here? Dear heavens! Run, run while you can!"
| 5,337,074
|
def dot(inputs, axes, normalize=False, **kwargs):
"""Functional interface to the `Dot` layer.
Args:
inputs: A list of input tensors (at least 2).
axes: Integer or tuple of integers,
axis or axes along which to take the dot product.
normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the dot product of the samples from the inputs.
"""
return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)
| 5,337,075
|
def delete_queue(name, region, opts=None, user=None):
"""
Deletes a queue in the region.
name
Name of the SQS queue to deletes
region
Name of the region to delete the queue from
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_queue <sqs queue> <region>
"""
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
log.debug("map %s", url_map)
if name in url_map:
delete = {"queue-url": url_map[name]}
rtn = _run_aws("delete-queue", region=region, opts=opts, user=user, **delete)
success = True
err = ""
out = "{} deleted".format(name)
else:
out = ""
err = "Delete failed"
success = False
ret = {
"retcode": 0 if success else 1,
"stdout": out,
"stderr": err,
}
return ret
| 5,337,076
|
def get_all_methods(klass):
"""Get all method members (regular, static, class method).
"""
if not inspect.isclass(klass):
raise ValueError
pairs = list()
for attr, value in inspect.getmembers(
klass, lambda x: inspect.isroutine(x)):
if not (attr.startswith("__") or attr.endswith("__")):
pairs.append((attr, value))
return pairs
| 5,337,077
|
def _suffix_directory(key: pathlib.Path):
"""Converts '/folder/.../folder/folder/folder' into 'folder/folder'"""
key = pathlib.Path(key)
shapenet_folder = key.parent.parent
key = key.relative_to(shapenet_folder)
return key
| 5,337,078
|
def setDesktop( studyID ):
"""This method sets and returns TRUST_PLOT2D desktop"""
global moduleDesktop, desktop
if studyID not in moduleDesktop:
moduleDesktop[studyID] = DynamicDesktop( sgPyQt )
moduleDesktop[studyID].initialize()
desktop = moduleDesktop[studyID]
return desktop
| 5,337,079
|
def load(file_path: str):
"""Used for loading dataset files that have been downloaded.
Args:
file_path: Path to file to be loaded.
Returns:
x: Data used to train models.
y: Dataset labels.
Example:
>>> data,labels = load("model/mnist.npz")
>>> # Print first dataset example and first label
>>> print(data[0])
>>> print(label[0])
[0 200 ... 15 0]
5
"""
with np.load(file_path) as data:
return data['x'], \
data['y']
| 5,337,080
|
def xrefchar(*args):
"""
xrefchar(xrtype) -> char
Get character describing the xref type.
@param xrtype: combination of Cross-Reference type flags and a
cref_t of dref_t value (C++: char)
"""
return _ida_xref.xrefchar(*args)
| 5,337,081
|
def problem_5_14_8(scalars, vectors):
"""
>>> u = list2vec([1,1,0,0])
>>> v = list2vec([0,1,1,0])
>>> w = list2vec([0,0,1,1])
>>> x = list2vec([1,0,0,1])
>>> problem_5_14_8([1, -1, 1], [u, v, w]) == x
True
>>> problem_5_14_8([-1, 1, 1], [u, v, x]) == w
True
>>> problem_5_14_8([1, 1, -1], [u, w, x]) == v
True
>>> problem_5_14_8([1, -1, 1], [v, w, x]) == u
True
"""
return lin_comb_sum(scalars, vectors)
| 5,337,082
|
def atomic(fn, self, *args, **kwargs):
"""
Atomic method.
"""
return self._atom(fn, args, kwargs)
| 5,337,083
|
def cal_loss(images):
""" break it down into training steps.
Args:
images: input images.
"""
generated_images = generator(noise, training=False)
real_output = discriminator(images, training=False)
fake_output = discriminator(generated_images, training=False)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
print(f'Loss G {gen_loss:.6f} Loss D {disc_loss:.6f}.')
| 5,337,084
|
def test_run_helloworld_sync_error(sync_service):
"""Execute the helloworld example with erroneous command."""
# -- Setup ----------------------------------------------------------------
#
# Start a new run for the workflow template.
with sync_service() as api:
workflow_id = create_workflow(
api,
source=BENCHMARK_DIR,
specfile=TEMPLATE_WITH_INVALID_CMD
)
user_id = create_user(api)
with sync_service(user_id=user_id) as api:
group_id = create_group(api, workflow_id)
names = io_file(data=['Alice', 'Bob'], format='plain/text')
file_id = upload_file(api, group_id, names)
args = [
serialize_arg('names', serialize_fh(file_id, 'data/names.txt')),
serialize_arg('sleeptime', 3)
]
run_id = start_run(api, group_id, arguments=args)
# -- Validate the run handle against the expected state -------------------
with sync_service(user_id=user_id) as api:
r = api.runs().get_run(run_id)
serialize.validate_run_handle(r, state=st.STATE_ERROR)
assert len(r['messages']) > 0
| 5,337,085
|
def log_param(key, value):
"""
Log a parameter under the current run, creating a run if necessary.
:param key: Parameter name (string)
:param value: Parameter value (string, but will be string-ified if not)
"""
run_id = _get_or_start_run().info.run_uuid
MlflowClient().log_param(run_id, key, value)
| 5,337,086
|
def check_dynamic_fields(conn, concurrently, dataset_filter, excluded_field_names, fields, name,
rebuild_indexes=False, rebuild_view=False):
"""
Check that we have expected indexes and views for the given fields
"""
# If this type has time/space fields, create composite indexes (as they are often searched together)
# We will probably move these into product configuration in the future.
composite_indexes = (
('lat', 'lon', 'time'),
('time', 'lat', 'lon'),
('sat_path', 'sat_row', 'time')
)
all_exclusions = tuple(excluded_field_names)
for composite_names in composite_indexes:
# If all of the fields are available in this product, we'll create a composite index
# for them instead of individual indexes.
if contains_all(fields, *composite_names):
all_are_excluded = set(excluded_field_names) >= set(composite_names)
_check_field_index(
conn,
[fields.get(f) for f in composite_names],
name, dataset_filter,
concurrently=concurrently,
replace_existing=rebuild_indexes,
# If all fields were excluded individually it should be removed.
should_exist=not all_are_excluded,
index_type='gist'
)
all_exclusions += composite_names
# Create indexes for the individual fields.
for field in fields.values():
if not field.postgres_index_type:
continue
_check_field_index(
conn, [field],
name, dataset_filter,
should_exist=field.indexed and (field.name not in all_exclusions),
concurrently=concurrently,
replace_existing=rebuild_indexes,
)
# A view of all fields
_ensure_view(conn, fields, name, rebuild_view, dataset_filter)
| 5,337,087
|
def set_dict_if_set(_dest, _attribute, _value):
"""Set a dict attribute if value is set"""
if _value is not None:
_dest[_attribute] = _value
| 5,337,088
|
def main(mode="output", device_id=None):
"""Run a Midi example
Arguments:
mode - if 'output' run a midi keyboard output example
'input' run a midi event logger input example
'list' list available midi devices
(default 'output')
device_id - midi device number; if None then use the default midi input or
output device for the system
"""
if mode == "input":
input_main(device_id)
elif mode == "output":
output_main(device_id)
elif mode == "list":
print_device_info()
else:
raise ValueError("Unknown mode option '%s'" % mode)
| 5,337,089
|
def default_configuration(log_file=None):
"""Basic all-encompassing configuration used in tests and handlers.
Raises:
PermissionDenied if the log file is not writable
"""
global _new_handler
reset_configuration()
# We must have a log file available.
if not log_file and not _root_logger.hasHandlers():
_root_logger.addHandler(_default_handler)
return
elif _root_logger.hasHandlers():
return
# Let exception propagate.
with open(log_file, "a"):
pass
formatter = DefaultSingleLineLogFormatter(["process"])
_new_handler = logging.FileHandler(log_file, encoding="utf-8")
_new_handler.setFormatter(formatter)
# Setting an error handler is nice because our daemon doesn't have a stderr
# to trace such things to.
def handle_error(record):
logger.exception("Logging exception handling %r", record)
_new_handler.handleError = handle_error
_root_logger.addHandler(_new_handler)
_root_logger.setLevel(logging.DEBUG)
| 5,337,090
|
def text_to_string(filename, useEncoding):
"""Read a text file and return a string."""
with open(filename, encoding=useEncoding, errors='ignore') as infile:
return infile.read()
| 5,337,091
|
def forward_propagation(x, paras, bn_paras, decay=0.9):
""" forward propagation function
Paras
------------------------------------
x: input dataset, of shape (input size, number of examples)
W: weight matrix of shape (size of current layer, size of previous layer)
b: bias vector of shape (size of current layer,1)
gamma: scale vector of shape (size of current layer ,1)
beta: offset vector of shape (size of current layer ,1)
decay: the parameter of exponential weight average
moving_mean: decay * moving_mean + (1 - decay) * current_mean
moving_var: decay * moving_var + (1 - decay) * moving_var
Returns
------------------------------------
y: the output of the last Layer(y_predict)
caches: list, every element is a tuple:(W,b,z,A_pre)
"""
L = len(paras) // 4 # number of layer
caches = []
# calculate from 1 to L-1 layer
for l in range(1, L):
W = paras["W" + str(l)]
b = paras["b" + str(l)]
gamma = paras["gamma" + str(l)]
beta = paras["beta" + str(l)]
# linear forward -> relu forward ->linear forward....
z = linear(x, W, b)
mean, var, sqrt_var, normalized, out = batch_norm(z, gamma, beta)
caches.append((x, W, b, gamma, sqrt_var, normalized, out))
x = relu(out)
bn_paras["moving_mean" + str(l)] = decay * bn_paras["moving_mean" + str(l)] + (1 - decay) * mean
bn_paras["moving_var" + str(l)] = decay * bn_paras["moving_var" + str(l)] + (1 - decay) * var
# calculate Lth layer
W = paras["W" + str(L)]
b = paras["b" + str(L)]
z = linear(x, W, b)
caches.append((x, W, b, None, None, None, None))
y = sigmoid(z)
return y, caches, bn_paras
| 5,337,092
|
def sentence_indexes_for_fragment(fragment: Fragment, sentences: list) -> list:
"""Get the start and end indexes in the whole article for the sentences encompassing a fragment."""
start_sentence_index = sentence_index_for_fragment_index(fragment.start, sentences)
end_sentence_index = sentence_index_for_fragment_index(fragment.end, sentences)
return list(range(start_sentence_index, end_sentence_index +1))
| 5,337,093
|
def _watchos_extension_impl(ctx):
"""Implementation of watchos_extension."""
top_level_attrs = [
"app_icons",
"strings",
"resources",
]
# Xcode 11 requires this flag to be passed to the linker, but it is not accepted by earlier
# versions.
# TODO(min(Xcode) >= 11): Make this unconditional when the minimum supported Xcode is Xcode 11.
xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
if xcode_support.is_xcode_at_least_version(xcode_config, "11"):
extra_linkopts = ["-e", "_WKExtensionMain"]
# This is required when building with watchOS SDK 6.0 or higher but with a minimum
# deployment version lower than 6.0. See
# https://developer.apple.com/documentation/xcode_release_notes/xcode_11_release_notes.
minimum_os = apple_common.dotted_version(ctx.attr.minimum_os_version)
if minimum_os < apple_common.dotted_version("6.0"):
extra_linkopts.append(
# The linker will search for this library relative to sysroot, which will already
# be the watchOS SDK directory.
#
# This is a force-load (unlike Xcode, which uses a standard `-l`) because we can't
# easily control where it appears in the link order relative to WatchKit.framework
# (where this symbol also lives, in watchOS 6+), so we need to guarantee that the
# linker doesn't skip the static library's implementation of `WKExtensionMain` if
# it already resolved the symbol from the framework.
"-Wl,-force_load,/usr/lib/libWKExtensionMainLegacy.a",
)
else:
extra_linkopts = []
link_result = linking_support.register_linking_action(
ctx,
extra_linkopts = extra_linkopts,
stamp = ctx.attr.stamp,
)
binary_artifact = link_result.binary_provider.binary
debug_outputs_provider = link_result.debug_outputs_provider
actions = ctx.actions
apple_toolchain_info = ctx.attr._toolchain[AppleSupportToolchainInfo]
bin_root_path = ctx.bin_dir.path
bundle_id = ctx.attr.bundle_id
bundle_name, bundle_extension = bundling_support.bundle_full_name_from_rule_ctx(ctx)
executable_name = bundling_support.executable_name(ctx)
entitlements = entitlements_support.entitlements(
entitlements_attr = getattr(ctx.attr, "entitlements", None),
entitlements_file = getattr(ctx.file, "entitlements", None),
)
features = features_support.compute_enabled_features(
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
)
label = ctx.label
platform_prerequisites = platform_support.platform_prerequisites_from_rule_ctx(ctx)
predeclared_outputs = ctx.outputs
rule_descriptor = rule_support.rule_descriptor(ctx)
archive = outputs.archive(
actions = actions,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
)
bundle_verification_targets = [struct(target = ext) for ext in ctx.attr.extensions]
processor_partials = [
partials.apple_bundle_info_partial(
actions = actions,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
executable_name = executable_name,
bundle_id = bundle_id,
entitlements = entitlements,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
product_type = rule_descriptor.product_type,
),
partials.binary_partial(
actions = actions,
binary_artifact = binary_artifact,
executable_name = executable_name,
label_name = ctx.label.name,
),
partials.bitcode_symbols_partial(
actions = actions,
binary_artifact = binary_artifact,
debug_outputs_provider = debug_outputs_provider,
dependency_targets = ctx.attr.extensions,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.clang_rt_dylibs_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
binary_artifact = binary_artifact,
features = features,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.debug_symbols_partial(
actions = actions,
bin_root_path = bin_root_path,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
debug_dependencies = ctx.attr.extensions,
debug_outputs_provider = debug_outputs_provider,
dsym_info_plist_template = apple_toolchain_info.dsym_info_plist_template,
executable_name = executable_name,
platform_prerequisites = platform_prerequisites,
rule_label = label,
),
partials.embedded_bundles_partial(
bundle_embedded_bundles = True,
platform_prerequisites = platform_prerequisites,
embeddable_targets = ctx.attr.extensions,
plugins = [archive],
),
# Following guidance of the watchOS 2 migration guide's recommendations for placement of a
# framework, scoping dynamic frameworks only to the watch extension bundles:
# https://developer.apple.com/library/archive/documentation/General/Conceptual/AppleWatch2TransitionGuide/ConfiguretheXcodeProject.html
partials.framework_import_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
provisioning_profile = getattr(ctx.file, "provisioning_profile", None),
rule_descriptor = rule_descriptor,
targets = ctx.attr.deps,
),
partials.resources_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_verification_targets = bundle_verification_targets,
bundle_id = bundle_id,
bundle_name = bundle_name,
environment_plist = ctx.file._environment_plist,
executable_name = executable_name,
launch_storyboard = None,
platform_prerequisites = platform_prerequisites,
plist_attrs = ["infoplists"],
rule_attrs = ctx.attr,
rule_descriptor = rule_descriptor,
rule_label = label,
top_level_attrs = top_level_attrs,
),
partials.swift_dylibs_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
binary_artifact = binary_artifact,
label_name = label.name,
dependency_targets = ctx.attr.extensions,
platform_prerequisites = platform_prerequisites,
),
]
if platform_prerequisites.platform.is_device:
processor_partials.append(
partials.provisioning_profile_partial(
actions = actions,
profile_artifact = ctx.file.provisioning_profile,
rule_label = label,
),
)
processor_result = processor.process(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
codesignopts = codesigning_support.codesignopts_from_rule_ctx(ctx),
entitlements = entitlements,
executable_name = executable_name,
ipa_post_processor = ctx.executable.ipa_post_processor,
partials = processor_partials,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
process_and_sign_template = apple_toolchain_info.process_and_sign_template,
provisioning_profile = getattr(ctx.file, "provisioning_profile", None),
rule_descriptor = rule_descriptor,
rule_label = label,
)
return [
DefaultInfo(
files = processor_result.output_files,
),
OutputGroupInfo(
**outputs.merge_output_groups(
link_result.output_groups,
processor_result.output_groups,
)
),
WatchosExtensionBundleInfo(),
] + processor_result.providers
| 5,337,094
|
def train_and_eval(trial: optuna.Trial, ex_dir: str, seed: [int, None]):
"""
Objective function for the Optuna `Study` to maximize.
.. note::
Optuna expects only the `trial` argument, thus we use `functools.partial` to sneak in custom arguments.
:param trial: Optuna Trial object for hyper-parameter optimization
:param ex_dir: experiment's directory, i.e. the parent directory for all trials in this study
:param seed: seed value for the random number generators, pass `None` for no seeding
:return: objective function value
"""
# Synchronize seeds between Optuna trials
pyrado.set_seed(seed)
# Environment
env_hparams = dict(dt=1/250., max_steps=1500)
env = QQubeSim(**env_hparams)
env = ActNormWrapper(env)
# Policy
policy_hparam = dict(
feats=FeatureStack([identity_feat, sign_feat, abs_feat, squared_feat, bell_feat, MultFeat([4, 5])])
)
policy = LinearPolicy(spec=env.spec, **policy_hparam)
# Algorithm
algo_hparam = dict(
num_sampler_envs=1, # parallelize via optuna n_jobs
max_iter=150,
pop_size=trial.suggest_categorical('pop_size', [100, 150, 200, 250]),
num_rollouts=trial.suggest_categorical('num_rollouts', [4, 6, 8, 10, 12]),
num_is_samples=trial.suggest_categorical('num_is_samples', [50, 100, 150, 200]),
expl_std_init=trial.suggest_uniform('expl_std_init', 0.2, 1.5),
expl_std_min=0.02,
symm_sampling=trial.suggest_categorical('symm_sampling', [True, False]),
)
csv_logger = create_csv_step_logger(osp.join(ex_dir, f'trial_{trial.number}'))
algo = PoWER(osp.join(ex_dir, f'trial_{trial.number}'), env, policy, **algo_hparam, logger=csv_logger)
# Train without saving the results
algo.train(snapshot_mode='latest', seed=seed)
# Evaluate
min_rollouts = 1000
sampler = ParallelSampler(env, policy, num_envs=1, min_rollouts=min_rollouts) # parallelize via optuna n_jobs
ros = sampler.sample()
mean_ret = sum([r.undiscounted_return() for r in ros])/min_rollouts
return mean_ret
| 5,337,095
|
def print_good(string, **kwargs):
"""print string in green
"""
okgreen = '\033[92m'
reset = '\033[39m'
print(okgreen + string + reset, **kwargs)
| 5,337,096
|
def alert_query(alert, authz):
"""Construct a search query to find new matching entities and documents
for a particular alert. Update handling is done via a timestamp of the
latest known result."""
# Many users have bookmarked complex queries, otherwise we'd use a
# precise match query.
query = {
'simple_query_string': {
'query': alert.query,
'fields': ['text'],
'default_operator': 'AND',
'minimum_should_match': '90%'
}
}
filter_since = {
'range': {
'created_at': {'gt': alert.notified_at}
}
}
return {
'size': MAX_PAGE,
'query': {
'bool': {
'should': [query],
'filter': [filter_since, authz_query(authz)],
'minimum_should_match': 1
}
}
}
| 5,337,097
|
def iscomment(s):
"""
Define what we call a comment in MontePython chain files
"""
return s.startswith('#')
| 5,337,098
|
def convert_embeddings(srcPath, dstPath):
"""
:param srcPath: path of source embeddings
:param dstPath: path of output
"""
vocab = {}
id = 0
wrongCnt = 0
with open(srcPath, 'r', encoding='utf-8') as fin:
lines = fin.readlines()
wordNums = len(lines)
line = lines[0].strip().split()
vectorDims = len(line) - 1
embeddings = np.zeros((wordNums, vectorDims), dtype=np.float32)
for line in lines:
items = line.strip().split()
if len(items) != vectorDims + 1:
wrongCnt += 1
print(line)
continue
if items[0] in vocab:
wrongCnt += 1
print(line)
continue
vocab[items[0]] = id
embeddings[id] = [float(v) for v in items[1:]]
id += 1
embeddings = embeddings[0: id, ]
with open(dstPath, 'wb') as fout:
pickle.dump([embeddings, vocab], fout)
print('valid embedding nums : {0}, embeddings shape : {1},'
' wrong format embedding nums : {2}, total embedding nums : {3}'.format(len(vocab),
embeddings.shape,
wrongCnt,
wordNums))
print('Original embeddings has been converted from {0} to {1}'.format(srcPath, dstPath))
| 5,337,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.