content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def judge_1d100_with_6_ver(target: int, dice: int):
"""
Judge 1d100 dice result, and return text and color for message.
Result is critical, success, failure or fumble.
Arguments:
target {int} -- target value (ex. skill value)
dice {int} -- dice value
Returns:
message {string}
rgb_color {string}
"""
if dice <= target:
if dice <= 5:
return "クリティカル", yig.config.COLOR_CRITICAL
return "成功", yig.config.COLOR_SUCCESS
if dice >= 96:
return "ファンブル", yig.config.COLOR_FUMBLE
return "失敗", yig.config.COLOR_FAILURE
| 14,000
|
def get_normals(self, indices=None, loc="center"):
"""Return the array of the normals coordinates.
Parameters
----------
self : MeshVTK
a MeshVTK object
indices : list
list of the points to extract (optional)
loc : str
localization of the normals ("center" or "point")
Returns
-------
normals: ndarray
Normals coordinates
"""
# Get surfaces
surf = self.get_surf()
if loc == "center":
normals = surf.cell_normals
elif loc == "point":
if self.node_normals is None:
self.surf.compute_normals(
cell_normals=False, point_normals=True, inplace=True
)
self.node_normals = self.surf["Normals"]
normals = self.node_normals
if indices is None:
return normals
else:
return normals[indices, :]
| 14,001
|
def string_to_weld_literal(s):
"""
Converts a string to a UTF-8 encoded Weld literal byte-vector.
Examples
--------
>>> string_to_weld_literal('hello')
'[104c,101c,108c,108c,111c]'
"""
return "[" + ",".join([str(b) + 'c' for b in list(s.encode('utf-8'))]) + "]"
| 14,002
|
def verify_model_licensed(class_name : str, model_path:str):
"""
Load a licensed model from HDD
"""
try :
m = eval(class_name).load(model_path)
return m
except:
print(f"Could not load Annotator class={class_name} located in {model_path}. Try updaing spark-nlp-jsl")
| 14,003
|
def examine_api(api):
"""Find all style issues in the given parsed API."""
global failures
failures = {}
for key in sorted(api.keys()):
examine_clazz(api[key])
return failures
| 14,004
|
def enable_logging_app_factory(log_file: Path, level) -> logging.Logger:
"""
Enable logging for the system.
:param level: Logging Level
:param log_file: Log File path
:return:
"""
from logging.handlers import RotatingFileHandler
import sys
logger = logging.getLogger(LOGGER)
formatter = logging.Formatter(LOGGER + ': %(asctime)s %(levelname)7s: %(message)s')
fileHandler = RotatingFileHandler(log_file, mode="a+", maxBytes=5000000, backupCount=5)
fileHandler.setFormatter(formatter)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
consoleHandler.setLevel(logging.INFO)
logger.setLevel(level)
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler)
return logger
| 14,005
|
def launch():
""" Initialize the module. """
return BinCounterWorker(BinCounter, PT_STATS_RESPONSE, STATS_RESPONSE)
| 14,006
|
def make_map(source):
"""Creates a Bokeh figure displaying the source data on a map
Args:
source: A GeoJSONDataSource object containing bike data
Returns: A Bokeh figure with a map displaying the data
"""
tile_provider = get_provider(Vendors.STAMEN_TERRAIN_RETINA)
TOOLTIPS = [
('bikes available', '@bikes'),
]
p = figure(x_range=(-8596413.91, -8558195.48), y_range=(4724114.13, 4696902.60),
x_axis_type="mercator", y_axis_type="mercator", width=1200, height=700, tooltips=TOOLTIPS)
p.add_tile(tile_provider)
p.xaxis.visible = False
p.yaxis.visible = False
p.circle(x='x', y='y', size='size', color='color', alpha=0.7, source=source)
color_bar_palette = viridis(256)
color_mapper = LinearColorMapper(palette=color_bar_palette, low=0, high=100)
color_bar = ColorBar(color_mapper=color_mapper, background_fill_alpha=0.7, title='% Full',
title_text_align='left', title_standoff=10)
p.add_layout(color_bar)
label = Label(x=820, y=665, x_units='screen', y_units='screen',
text='Dot size represents total docks in station', render_mode='css',
border_line_color=None, background_fill_color='white', background_fill_alpha=0.7)
p.add_layout(label)
return p
| 14,007
|
def main(count):
"""メイン処理
"""
fd_7seg, speaker = init()
print('app_a1 start')
speaker.play(MELODY_LIST[0])
count_down(fd_7seg, count)
speaker.play(MELODY_LIST[1])
print('app_a1 stop')
| 14,008
|
def noise_graph_update(graph: ig.Graph, noise_csv_dir: str, log: Logger) -> None:
"""Updates attributes noises and noise_source to graph.
"""
noise_csvs = os.listdir(noise_csv_dir)
for csv_file in noise_csvs:
edge_noises = pd.read_csv(noise_csv_dir + csv_file)
edge_noises[E.noise_source.name] = edge_noises[E.noise_source.name].replace({np.nan: ''})
log.info(f'updating {len(edge_noises)} edge noises from '+ csv_file)
for edge in edge_noises.itertuples():
graph.es[getattr(edge, E.id_ig.name)][E.noises.value] = getattr(edge, E.noises.name)
graph.es[getattr(edge, E.id_ig.name)][E.noise_source.value] = getattr(edge, E.noise_source.name)
| 14,009
|
def remove(packages):
"""Removes package(s) using apt.
:param packages:
"""
sudo('apt-get -y remove %s' % packages)
| 14,010
|
def save_all_pages(pages, root='.'):
"""Save picture references in pages on the form:
pages = {
urn1 : [page1, page2, ..., pageN],
urn2: [page1, ..., pageM]},
...
urnK: [page1, ..., pageL]
}
Each page reference is a URL.
"""
# In case urn is an actual URN, works also if urn is passed as sesamid
for urn in pages:
folder_name = urn.split(':')[-1]
folder_ref = os.path.join(root, folder_name)
try:
os.mkdir(folder_ref)
except FileExistsError:
True
for p in pages[urn]:
# pell ut entydig referanse til bildet fra URL-en i bildelisten som filnavn
filename = p.split('/')[6].split(':')[-1] + '.jpg'
path = os.path.join(folder_ref, filename)
get_picture_from_url(p).save(path)
return True
| 14,011
|
def rotate_images(images, rot90_scalars=(0, 1, 2, 3)):
"""Return the input image and its 90, 180, and 270 degree rotations."""
images_rotated = [
images, # 0 degree
tf.image.flip_up_down(tf.image.transpose_image(images)), # 90 degrees
tf.image.flip_left_right(tf.image.flip_up_down(images)), # 180 degrees
tf.image.transpose_image(tf.image.flip_up_down(images)) # 270 degrees
]
results = tf.stack([images_rotated[i] for i in rot90_scalars])
results = tf.reshape(results,
[-1] + images.get_shape().as_list()[1:])
return results
| 14,012
|
def import_archive(
ctx, archives, webpages, extras_mode_existing, extras_mode_new, comment_mode, include_authinfos, migration,
batch_size, import_group, group, test_run
):
"""Import data from an AiiDA archive file.
The archive can be specified by its relative or absolute file path, or its HTTP URL.
"""
# pylint: disable=unused-argument
from aiida.common.progress_reporter import set_progress_bar_tqdm, set_progress_reporter
if AIIDA_LOGGER.level <= logging.REPORT: # pylint: disable=no-member
set_progress_bar_tqdm(leave=(AIIDA_LOGGER.level <= logging.INFO))
else:
set_progress_reporter(None)
all_archives = _gather_imports(archives, webpages)
# Preliminary sanity check
if not all_archives:
echo.echo_critical('no valid exported archives were found')
# Shared import key-word arguments
import_kwargs = {
'import_new_extras': extras_mode_new == 'import',
'merge_extras': ExtrasImportCode[extras_mode_existing].value,
'merge_comments': comment_mode,
'include_authinfos': include_authinfos,
'batch_size': batch_size,
'create_group': import_group,
'group': group,
'test_run': test_run,
}
for archive, web_based in all_archives:
_import_archive_and_migrate(archive, web_based, import_kwargs, migration)
| 14,013
|
def csv(args:[str])->str:
"""create a string of comma-separated values"""
return ','.join(args)
| 14,014
|
def _update_class(oldclass, newclass):
"""Update a class object."""
# XXX What about __slots__?
olddict = oldclass.__dict__
newdict = newclass.__dict__
# PDF changed to remove use of set as not in Jython 2.2
for name in olddict.keys():
if name not in newdict:
delattr(oldclass, name)
for name in newdict.keys():
if name not in ["__dict__", "__doc__"]:
if name not in olddict:
setattr(oldclass, name, newdict[name])
continue
new = getattr(newclass, name)
old = getattr(oldclass, name, None)
if new == old:
continue
if old is None:
setattr(oldclass, name, new)
continue
if isinstance(new, types.MethodType):
changed = _update_method(old, new)
setattr(oldclass, name, changed)
elif isinstance(new, types.FunctionType):
# __init__ is a function
changed = _update_function(old, new)
setattr(oldclass, name, changed)
else:
# Fallback to just replace the item
setattr(oldclass, name, new)
return oldclass
| 14,015
|
def update_signature():
"""Create and update signature in gmail.
Returns:Draft object, including updated signature.
Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""
creds, _ = google.auth.default()
try:
# create gmail api client
service = build('gmail', 'v1', credentials=creds)
primary_alias = None
# pylint: disable=E1101
aliases = service.users().settings().sendAs().list(userId='me')\
.execute()
for alias in aliases.get('sendAs'):
if alias.get('isPrimary'):
primary_alias = alias
break
send_as_configuration = {
'displayName': primary_alias.get('sendAsEmail'),
'signature': 'Automated Signature'
}
# pylint: disable=E1101
result = service.users().settings().sendAs() \
.patch(userId='me', sendAsEmail=primary_alias.get('sendAsEmail'),
body=send_as_configuration).execute()
print(F'Updated signature for: {result.get("displayName")}')
except HttpError as error:
print(F'An error occurred: {error}')
result = None
return result.get('signature')
| 14,016
|
def generate_block(constraints, p, rng=None):
"""Generated a balanced set of trials, might be only part of a run."""
if rng is None:
rng = np.random.RandomState()
n_trials = constraints.trials_per_run
# --- Assign trial components
# Assign the target to a side
gen_dist = np.repeat([0, 1], n_trials // 2)
while max_repeat(gen_dist) > constraints.max_dist_repeat:
gen_dist = rng.permutation(gen_dist)
# Assign pulse counts to each trial
count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) + 1
count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1])
expected_count_dist = count_pmf * n_trials
count_error = np.inf
while count_error > constraints.sum_count_error:
pulse_count = flexible_values(p.pulse_count, n_trials, rng,
max=p.pulse_count_max).astype(int)
count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max + 1)
count_error = np.sum(np.abs(count_dist[count_support]
- expected_count_dist))
# Assign initial ITI to each trial
total_iti = np.inf
while not_in_range(total_iti, constraints.iti_range):
wait_iti = flexible_values(p.wait_iti, n_trials, rng)
total_iti = wait_iti.sum()
# Use the first random sample if we're not being precise
# about the overall time of the run (i.e. in psychophys rig)
if not p.keep_on_time:
break
# --- Build the trial_info structure
trial = np.arange(1, n_trials + 1)
trial_info = pd.DataFrame(dict(
trial=trial,
gen_dist=gen_dist,
pulse_count=pulse_count.astype(int),
wait_iti=wait_iti,
))
# --- Assign trial components
# Map from trial to pulse
trial = np.concatenate([
np.full(c, i, dtype=np.int) for i, c in enumerate(pulse_count, 1)
])
pulse = np.concatenate([
np.arange(c) + 1 for c in pulse_count
])
n_pulses = pulse_count.sum()
# Assign gaps between pulses
run_duration = np.inf
while not_in_range(run_duration, constraints.run_range):
wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng)
gap_dur = flexible_values(p.pulse_gap, n_pulses, rng)
run_duration = np.sum([
wait_iti.sum(),
wait_pre_stim.sum(),
gap_dur.sum(),
p.pulse_dur * n_pulses,
])
# Use the first random sample if we're not being precise
# about the overall time of the run (i.e. in psychophys rig)
if not p.keep_on_time:
break
# Assign pulse intensities
max_contrast = np.log10(1 / np.sqrt(p.stim_gratings))
log_contrast = np.zeros(n_pulses)
pulse_dist = np.concatenate([
np.full(n, i, dtype=np.int) for n, i in zip(pulse_count, gen_dist)
])
llr_mean = np.inf
llr_sd = np.inf
expected_acc = np.inf
while (not_in_range(llr_mean, constraints.mean_range)
or not_in_range(llr_sd, constraints.sd_range)
or not_in_range(expected_acc, constraints.acc_range)):
for i in [0, 1]:
dist = "norm", p.dist_means[i], p.dist_sds[i]
rows = pulse_dist == i
n = rows.sum()
log_contrast[rows] = flexible_values(dist, n, rng,
max=max_contrast)
pulse_llr = compute_llr(log_contrast, p)
target_llr = np.where(pulse_dist, pulse_llr, -1 * pulse_llr)
llr_mean = target_llr.mean()
llr_sd = target_llr.std()
dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum()
dv_sd = np.sqrt(constraints.sigma ** 2 * pulse_count)
expected_acc = stats.norm(dv, dv_sd).sf(0).mean()
# --- Build the pulse_info structure
pulse_info = pd.DataFrame(dict(
trial=trial,
pulse=pulse,
gap_dur=gap_dur,
log_contrast=log_contrast,
contrast=10 ** log_contrast,
pulse_llr=pulse_llr,
))
# --- Update the trial_info structure
trial_info["wait_pre_stim"] = wait_pre_stim
trial_llr = (pulse_info
.groupby("trial")
.sum()
.loc[:, "pulse_llr"]
.rename("trial_llr"))
trial_info = trial_info.join(trial_llr, on="trial")
# TODO reorder the columns so they are more intuitively organized?
return trial_info, pulse_info
| 14,017
|
def mean_by_weekday(day, val):
"""
Returns a list that contain weekday, mean of beginning and end of presence.
"""
return [day_abbr[day], mean(val['start']), mean(val['end'])]
| 14,018
|
def parse_metrics(match, key):
"""Gets the metrics out of the parsed logger stream"""
elements = match.split(' ')[1:]
elements = filter(lambda x: len(x) > 2, elements)
elements = [float(e) for e in elements]
metrics = dict(zip(['key', 'precision', 'recall', 'f1'], [key] + elements))
return metrics
| 14,019
|
def minecraftify(clip: vs.VideoNode, div: float = 64.0, mod: int | None = None) -> vs.VideoNode:
"""
Function that transforms your clip into a Minecraft.
Idea from Meme-Maji's Kobayashi memery (love you varde).
:param clip: Input clip
:param div: How much to divide the clip's resolution with
:param mod: Force the downscaled clip to be MOD# compliant
:return: A Minecraft.
"""
ow, oh = round(clip.width/div), round(clip.height/div)
if mod is not None:
ow, oh = force_mod(ow, mod), force_mod(oh, mod)
i444 = core.resize.Bicubic(clip, format=vs.YUV444PS)
down = Point().scale(i444, ow, oh)
return Point().scale(down, clip.width, clip.height)
| 14,020
|
def keyframeRegionTrackCtx(q=1,e=1,ex=1,ch=1,i1="string",i2="string",i3="string",n="string"):
"""
http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/keyframeRegionTrackCtx.html
-----------------------------------------
keyframeRegionTrackCtx is undoable, queryable, and editable.
This command can be used to create a track context for the dope sheet editor.
-----------------------------------------
Return Value:
string Context name
In query mode, return type is based on queried flag.
-----------------------------------------
Flags:
-----------------------------------------
ex : exists [boolean] []
Returns true or false depending upon whether the specified object exists. Other flags are ignored.
-----------------------------------------
ch : history [boolean] []
If this is a tool command, turn the construction history on for the tool in question.
-----------------------------------------
i1 : image1 [string] ['query', 'edit']
First of three possible icons representing the tool associated with the context.
-----------------------------------------
i2 : image2 [string] ['query', 'edit']
Second of three possible icons representing the tool associated with the context.
-----------------------------------------
i3 : image3 [string] ['query', 'edit']
Third of three possible icons representing the tool associated with the context.
-----------------------------------------
n : name [string]
If this is a tool command, name the tool appropriately.
"""
| 14,021
|
def processGOTerm(goTerm):
"""
In an object representing a GO term, replace single-element lists with
their only member.
Returns the modified object as a dictionary.
"""
ret = dict(goTerm) #Input is a defaultdict, might express unexpected behaviour
for key, value in ret.items():
if len(value) == 1:
ret[key] = value[0]
return ret
| 14,022
|
def searchxapian_show(request):
""" zeigt den Inhalt eines Dokumentes """
SORT_BY = { -1: _(u'Relevanz'),
0: _(u'URL'),
1: _(u'Überschrift/Titel'),
2: _(u'Datum der letzten Änderung') }
if request.path.find('index.html') < 0:
my_path = request.path.replace('searchxapian', 'index.html/searchxapian')
else:
my_path = request.path
item_container = get_item_container(my_path, '/searchxapian/')
def get_sort_by_choices():
ret = []
ret.append((-1, SORT_BY[-1])) # Siehe SORT_BY
ret.append((0, SORT_BY[0]))
ret.append((1, SORT_BY[1]))
ret.append((2, SORT_BY[2]))
return ret
def get_domain_choices():
""" """
ret = []
ret.append( ('', _(u'Alle Seiten')) )
if item_container != None:
url = item_container.container.site.url[7:]
ret.append( (url, _(u'Nur Seiten der Domaine <i>') + url + '</i>') )
return ret
class DmsItemForm(forms.Form):
query = forms.CharField(required=False, max_length=60,
widget=forms.TextInput(attrs={'size':60}) )
sort_by = forms.CharField(
widget=forms.Select(choices=
get_sort_by_choices(),
attrs={'size':4, 'style':'width:60%'} ) )
domain = forms.ChoiceField(required=False, choices=get_domain_choices(),
widget=forms.RadioSelect() )
def get_prev_next(query, offset, delta, domain, sort_by, count):
aquery = u'query=%s' % urllib.quote_plus(query)
if domain == '':
site = ''
else:
site = '&domain=' + domain
show_prev = ''
show_next = ''
show_middle = ''
n_start = 0
if count > offset + 10*delta:
show_next_more = True
count = offset + 10*delta
else:
show_next_more = False
if offset > 10*delta:
show_prev_more = True
n_start = offset - 10*delta
else:
show_prev_more = False
n = n_start
while n < count:
if n < offset:
show_prev += show_link(u'./?%s&offset=%i&sort_by=%i%s' % (aquery, n, sort_by, site),
smart_unicode(n), url_class='navLink') + ' '
elif n == offset:
show_middle = '<b>%i</b> ' % n
else:
show_next += show_link(u'./?%s&offset=%i&sort_by=%i%s' % \
(aquery, n, sort_by, site),
smart_unicode(n), url_class='navLink') + ' '
n += delta
if show_next_more:
show_next += show_link(u'./?%s&offset=%i&sort_by=%i%s' % \
(aquery, n, sort_by, site),
' » Weiter', url_class='navLink')
if show_prev_more:
show_prev = show_link(u'./?%s&offset=%i&sort_by=%i%s' % \
(aquery, n_start-delta, sort_by, site),
'Zurück « ', url_class='navLink') + show_prev
if count < delta:
show_middle = ''
return show_prev, show_middle, show_next
def get_search_results(request):
sort_by = -1
offset = 0
delta = 20
count = -1
if show_errors:
data = request.POST.copy()
query = data['query']
domain = data['domain']
else:
data = { 'query': '', 'sort_by': -1,}
query = ''
domain = ''
if params.has_key('offset'):
offset = int(params['offset'])
if params.has_key('sort_by'):
sort_by = int(params['sort_by'])
if params.has_key('domain'):
domain = params['domain']
if params.has_key('query'):
query = params['query']
data = { 'query': query, 'sort_by': sort_by, 'domain': domain}
s = xmlrpclib.Server('http://localhost:3000')
sort_by = int(data['sort_by'])
ascending = sort_by==2
res = s.search(query, offset, delta, domain, sort_by, ascending)
return res, query, offset, delta, domain, sort_by, data
def get_link_list(rs):
results = []
for r in rs:
this_link = show_link(r['url'], r['title']) + u' {%s}' % r['percent']
# --- Siehe SORT_BY
if sort_by == 0:
this_link += '<br />' + r['url']
elif sort_by == 2:
this_link += ', ' + get_german_date(r['date'])
results.append(this_link)
return results
app_name = 'searchxapian'
my_title = _(u'Suchanfrage stellen')
if item_container != None:
my_absolute_url = item_container.get_absolute_url()
else:
my_absolute_url = './'
show_errors = ( request.method == 'POST' )
params = request.GET.copy()
if params!={} or show_errors:
res, query, offset, delta, domain, sort_by, data = get_search_results(request)
query = decode_html(query)
# --- Rohdaten in Liste ueberfuehren
count = res['count']
rs = res['results']
results = get_link_list(rs)
if query.find('&') >= 0:
q = query
else:
try:
q = encode_html(query.decode('iso-8859-1'))
except:
q = encode_html(query)
show_prev, show_middle, show_next = \
get_prev_next(q, offset, delta, domain, sort_by, count)
else :
sort_by = -1
query = ''
count = 20
data = { 'query': '', 'sort_by': sort_by, 'doamin': '', }
results = []
show_prev = ''
show_middle = ''
show_next = ''
f = DmsItemForm(data)
# --- Reihenfolge, Ueberschriften, Hilfetexte
tabs = [
('tab_base',['query',]),
('tab_more', ['sort_by', 'domain', ]) ]
# --- Formular zusammenbauen
content = get_tabbed_form(tabs, help_form, app_name , f)
# --- externe Suchmaschinen
search_engines = get_search_engines()
links = []
for engine in search_engines:
if query.find('&') < 0:
url = engine.url_query % (urllib.quote_plus(encode_html(query.decode('iso-8859-1'))),
SEARCH_DOMAIN)
else:
url = engine.url_query % (urllib.quote_plus(query), SEARCH_DOMAIN)
links.append(show_link(url, engine.name, url_class="navLink"))
t = get_template('utils/info_slot_right_list_simple.html')
c = Context ( { 'header': _(u'Externe Suche'),
'links': links
} )
slot_info_right = t.render(c)
# --- Zur Ausgangsseite
back_link = show_link(my_absolute_url, _(u'Zur Ausgangsseite ...'),
url_class="navLink")
t = get_template('utils/info_slot_right.html')
c = Context ( { 'header': _(u'Ausgangsseite'),
'info': back_link
} )
slot_info_right += '<br /><br />\n' + t.render(c)
vars = get_item_vars_add(request, item_container, app_name, my_title,
content, show_errors)
vars['next'] = get_site_url(item_container, 'searchxapian/')
vars['path'] = item_container.container.path + 'searchxapian/'
vars['sub_title'] = ''
vars['slot_right_info'] = slot_info_right
vars['action'] = ''
vars['results'] = results
vars['count'] = count
vars['show_prev'] = show_prev
vars['show_middle'] = show_middle
vars['show_next'] = show_next
vars['sort_by'] = SORT_BY[sort_by]
vars['google_search'] = 'google'
vars['no_top_main_navigation'] = True
return render_to_response ( 'app/searchxapian/base.html', vars )
| 14,023
|
def html2plaintext(html, body_id=None, encoding='utf-8'):
""" From an HTML text, convert the HTML to plain text.
If @param body_id is provided then this is the tag where the
body (not necessarily <body>) starts.
"""
## (c) Fry-IT, www.fry-it.com, 2007
## <peter@fry-it.com>
## download here: http://www.peterbe.com/plog/html2plaintext
html = ustr(html)
tree = etree.fromstring(html, parser=etree.HTMLParser())
if body_id is not None:
source = tree.xpath('//*[@id=%s]' % (body_id,))
else:
source = tree.xpath('//body')
if len(source):
tree = source[0]
url_index = []
i = 0
for link in tree.findall('.//a'):
url = link.get('href')
if url:
i += 1
link.tag = 'span'
link.text = '%s [%s]' % (link.text, i)
url_index.append(url)
html = ustr(etree.tostring(tree, encoding=encoding))
# \r char is converted into , must remove it
html = html.replace(' ', '')
html = html.replace('<strong>', '*').replace('</strong>', '*')
html = html.replace('<b>', '*').replace('</b>', '*')
html = html.replace('<h3>', '*').replace('</h3>', '*')
html = html.replace('<h2>', '**').replace('</h2>', '**')
html = html.replace('<h1>', '**').replace('</h1>', '**')
html = html.replace('<em>', '/').replace('</em>', '/')
html = html.replace('<tr>', '\n')
html = html.replace('</p>', '\n')
html = re.sub('<br\s*/?>', '\n', html)
html = re.sub('<.*?>', ' ', html)
html = html.replace(' ' * 2, ' ')
# strip all lines
html = '\n'.join([x.strip() for x in html.splitlines()])
html = html.replace('\n' * 2, '\n')
for i, url in enumerate(url_index):
if i == 0:
html += '\n\n'
html += ustr('[%s] %s\n') % (i + 1, url)
return html
| 14,024
|
def _create_regularization_of_grad(param, grad, regularization=None):
""" Create and add backward regularization Operators
Function helper of append_regularization_ops.
"""
# If no gradient or no regularization is specified, then we don't need to do anything
if grad is None or (param.regularizer is None and regularization is None):
return grad
regularization_term = None
if param.regularizer is not None:
# Add variable for regularization term in grad block
regularization_term = param.regularizer(param, grad, grad.block)
elif regularization is not None:
regularization_term = regularization(param, grad, grad.block)
assert regularization_term is not None
new_grad = grad
if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
# FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
# the grad's type and name will be changed. But the gradient's name
# is used in ParallelExecutor Reduce mode, so I add a flag for
# the new_grad here.
new_grad = grad.block.create_var(
name=grad.name + core.kNewGradSuffix(),
dtype=param.dtype,
shape=param.shape,
lod_level=param.lod_level,
type=core.VarDesc.VarType.LOD_TENSOR)
inputs = {"X": [grad, regularization_term]}
outputs = {"Out": [new_grad]}
if in_dygraph_mode():
core.ops.sum(inputs, {}, outputs)
else:
grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
return new_grad
| 14,025
|
def MooreSpace(q):
"""
Triangulation of the mod `q` Moore space.
INPUT:
- ``q`` -0 integer, at least 2
This is a simplicial complex with simplices of dimension 0, 1,
and 2, such that its reduced homology is isomorphic to
`\\ZZ/q\\ZZ` in dimension 1, zero otherwise.
If `q=2`, this is the real projective plane. If `q>2`, then
construct it as follows: start with a triangle with vertices
1, 2, 3. We take a `3q`-gon forming a `q`-fold cover of the
triangle, and we form the resulting complex as an
identification space of the `3q`-gon. To triangulate this
identification space, put `q` vertices `A_0`, ..., `A_{q-1}`,
in the interior, each of which is connected to 1, 2, 3 (two
facets each: `[1, 2, A_i]`, `[2, 3, A_i]`). Put `q` more
vertices in the interior: `B_0`, ..., `B_{q-1}`, with facets
`[3, 1, B_i]`, `[3, B_i, A_i]`, `[1, B_i, A_{i+1}]`, `[B_i,
A_i, A_{i+1}]`. Then triangulate the interior polygon with
vertices `A_0`, `A_1`, ..., `A_{q-1}`.
EXAMPLES::
sage: simplicial_complexes.MooreSpace(2)
Minimal triangulation of the real projective plane
sage: simplicial_complexes.MooreSpace(3).homology()[1]
C3
sage: simplicial_complexes.MooreSpace(4).suspension().homology()[2]
C4
sage: simplicial_complexes.MooreSpace(8)
Triangulation of the mod 8 Moore space
"""
if q <= 1:
raise ValueError("the mod q Moore space is only defined if q is at least 2")
if q == 2:
return RealProjectivePlane()
facets = []
for i in range(q):
Ai = "A" + str(i)
Aiplus = "A" + str((i+1) % q)
Bi = "B" + str(i)
facets.append([1, 2, Ai])
facets.append([2, 3, Ai])
facets.append([3, 1, Bi])
facets.append([3, Bi, Ai])
facets.append([1, Bi, Aiplus])
facets.append([Bi, Ai, Aiplus])
for i in range(1, q-1):
Ai = "A" + str(i)
Aiplus = "A" + str((i+1) % q)
facets.append(["A0", Ai, Aiplus])
return UniqueSimplicialComplex(facets,
name='Triangulation of the mod {} Moore space'.format(q))
| 14,026
|
def show(tournament_name, params=[], filter_response=True):
"""Retrieve a single tournament record by `tournament name`"""
utils._validate_query_params(params=params, valid_params=VALID_PARAMS, route_type='tournament')
uri = TOURNAMENT_PREFIX + tournament_name
response = api.get(uri, params)
if filter_response:
response = _filter_tournament_response(response, params)
return response
| 14,027
|
def auto_run_api_pk(**kwargs):
"""run api by pk and config
"""
id = kwargs['id']
env = kwargs['config']
config_name = 'rig_prod' if env == 1 else 'rig_test'
api = models.API.objects.get(id=id)
config = eval(models.Config.objects.get(name=config_name, project=api.project).body)
test_case = eval(api.body)
summary = loader.debug_api(test_case, api.project.id, config=config)
api_request = summary['details'][0]['records'][0]['meta_data']['request']
api_response = summary['details'][0]['records'][0]['meta_data']['response']
# API执行成功,设置tag为自动运行成功
if summary['stat']['failures'] == 0 and summary['stat']['errors'] == 0:
models.API.objects.filter(id=id).update(tag=3)
return 'success'
elif summary['stat']['failures'] == 1:
# models.API.objects.filter(id=id).update(tag=2)
return 'fail'
| 14,028
|
def stairway():
"""This function plays Stairway To Heaven.
Msg:
Playing Stairway...
"""
raise Exception("DENIED")
| 14,029
|
def create(arxiv_id: ArXivID,
arxiv_ver: int,
resource_type: str,
resource_id: str,
description: str,
creator: Optional[str]) -> Relation:
"""
Create a new relation for an e-print.
Parameters
----------
arxiv_id: ArXivID
The arXiv ID of the e-print.
arxiv_ver: int
The version of the e-print.
resource_type: str
The type of the corresponding resource.
resource_id: str
An identifier of the resource e.g., DOI.
description: str
A description for the relation.
creator: Optional[str]
Info of the user/app who requested this relation creation.
Returns
-------
Relation
The newly-created relation.
"""
# store it to DB
rel_data = RelationDB(rel_type=RelationType.ADD,
arxiv_id=str(arxiv_id),
arxiv_ver=arxiv_ver,
resource_type=resource_type,
resource_id=resource_id,
description=description,
added_at=datetime.now(UTC),
creator=creator,
supercedes_or_suppresses=None)
try:
db.session.add(rel_data)
db.session.commit()
except Exception as e:
db.session.rollback()
raise StorageError from e
# return the result
return relation_from_DB(rel_data)
| 14,030
|
def cli(ctx):
"""list modify create delete addtovolumeaccessgroup removefromvolumeaccessgroup """
| 14,031
|
def p_contain_resist(D, t, f_y, f_u=None):
"""Pressure containment resistance in accordance with DNVGL-ST-F101.
(press_contain_resis)
Reference:
DNVGL-ST-F101 (2017-12)
sec:5.4.2.2 eq:5.8 p:94 $p_{b}(t)$
"""
if f_u is None:
f_cb = f_y
else:
f_cb = np.minimum(f_y, f_u/1.15)
p_b = (2*t/(D-t) * f_cb * 2/np.sqrt(3))
return p_b
| 14,032
|
def test_pvector_field_default_non_optional():
"""
By default ``pvector_field`` is non-optional, i.e. does not allow
``None``.
"""
class Record(PRecord):
value = pvector_field(int)
with pytest.raises(TypeError):
Record(value=None)
| 14,033
|
def revalue(request):
"""其它设备参数修改"""
value = request.GET.get('value')
name = request.GET.get('name')
others = Machines().filter_machines(OtherMachineInfo, pk=request.GET.get('dID'))[0]
if name == 'remark':
others.remark = value
elif name == 'machine_name':
others.machine_name = value
elif name == 'reson_str':
others.reson_str = value
elif name == 'oth_cab_id':
return '再考虑考虑'
others.save()
return JsonResponse({'is_ok': 1})
| 14,034
|
def indieauth_endpoint():
""" IndieAuth token endpoint """
import authl.handlers.indieauth
if 'me' in flask.request.args:
# A ticket request is being made
me_url = flask.request.args['me']
try:
endpoint, _ = authl.handlers.indieauth.find_endpoint(me_url,
rel='ticket_endpoint')
except RuntimeError:
endpoint = None
if not endpoint:
raise http_error.BadRequest("Could not get ticket endpoint")
LOGGER.info("endpoint: %s", endpoint)
send_auth_ticket(me_url, flask.request.url_root, endpoint)
return "Ticket sent", 202
if 'grant_type' in flask.request.form:
# token grant
if flask.request.form['grant_type'] == 'ticket':
# TicketAuth
if 'ticket' not in flask.request.form:
raise http_error.BadRequest("Missing ticket")
ticket = parse_token(flask.request.form['ticket'])
LOGGER.info("Redeeming ticket for %s; scopes=%s", ticket['me'],
ticket['scope'])
scopes = set(ticket.get('scope', '').split())
if 'ticket' not in scopes:
raise http_error.BadRequest("Missing 'ticket' scope")
scopes.remove('ticket')
scope = ' '.join(scopes)
token = get_token(ticket['me'], config.token_lifetime, scope)
response = {
'access_token': token,
'token_type': 'Bearer',
'me': ticket['me'],
'expires_in': config.token_lifetime,
'refresh_token': get_token(ticket['me'],
config.token_lifetime,
ticket['scope'])
}
if scope:
response['scope'] = scope
return json.dumps(response), {'Content-Type': 'application/json'}
raise http_error.BadRequest("Unknown grant type")
if 'action' in flask.request.form:
raise http_error.BadRequest()
if 'Authorization' in flask.request.headers:
# ticket verification
parts = flask.request.headers['Authorization'].split()
if parts[0].lower() == 'bearer':
token = parse_token(parts[1])
return json.dumps(token), {'Content-Type': 'application/json'}
raise http_error.Unauthorized("Invalid authorization header")
raise http_error.BadRequest()
| 14,035
|
def policy_simulation_c(model,var,ages):
""" policy simulation for couples"""
if var == 'd':
return {'hs': lifecycle_c(model,var=var,MA=[0],ST_w=[1,3],ages=ages,calc='sum')['y'][0] +
lifecycle_c(model,var=var,MA=[1],ST_h=[1,3],ages=ages,calc='sum')['y'][0],
'hs_f': lifecycle_c(model,var=var,MA=[0],ST_w=[1,3],ages=ages,calc='sum')['y'][0],
'hs_m': lifecycle_c(model,var=var,MA=[1],ST_h=[1,3],ages=ages,calc='sum')['y'][0],
'base': lifecycle_c(model,var=var,MA=[0,1],ages=ages,calc='sum')['y'][0],
'base_f': lifecycle_c(model,var=var,MA=[0],ages=ages,calc='sum')['y'][0],
'base_m': lifecycle_c(model,var=var,MA=[1],ages=ages,calc='sum')['y'][0],
'ls': lifecycle_c(model,var=var,MA=[0],ST_w=[0,2],ages=ages,calc='sum')['y'][0] +
lifecycle_c(model,var=var,MA=[1],ST_h=[0,2],ages=ages,calc='sum')['y'][0],
'ls_f': lifecycle_c(model,var=var,MA=[0],ST_w=[0,2],ages=ages,calc='sum')['y'][0],
'ls_m': lifecycle_c(model,var=var,MA=[1],ST_h=[0,2],ages=ages,calc='sum')['y'][0]
}
if var == 'probs':
return {'base_f': retirement_probs_c(model,ma=0),
'base_m': retirement_probs_c(model,ma=1)
}
if var == 'GovS':
return lifecycle_c(model,var=var,MA=[0,1],ages=ages,calc='total_sum')['y'][0]
if var == 'RetAge':
return {'hs':
np.mean(np.concatenate((RetAge_C(model,ma=0,ST_w=[1,3]),
RetAge_C(model,ma=1,ST_h=[1,3])))),
'base_m':
np.mean(RetAge_C(model,ma=1)),
'base_f':
np.mean(RetAge_C(model,ma=0)),
'base':
np.mean(np.concatenate((RetAge_C(model,ma=0),
RetAge_C(model,ma=1)))),
'ls':
np.mean(np.concatenate((RetAge_C(model,ma=0,ST_w=[0,2]),
RetAge_C(model,ma=1,ST_h=[0,2]))))
}
| 14,036
|
def emit_obj_db_entry(target, source, env):
"""Emitter for object files. We add each object file
built into a global variable for later use"""
for t in target:
if str(t) is None:
continue
OBJ_DB.append(t)
return target, source
| 14,037
|
def listtimes(list, c):
"""multiplies the elements in the list by the given scalar value c"""
ret = []
for i in range(0, len(list)):
ret.extend([list[i]]*c);
return ret;
| 14,038
|
def main(): # pragma: no cover
"""
Creates an arm using the NLinkArm class and uses its inverse kinematics
to move it to the desired position.
"""
q_now = np.array([0, 0, 0, 0, 0, 0]) # to np.array([0, 0, 0, np.pi*2/3, np.pi*2/3, np.pi*2/3])
goal_pos = [0.015457, 0.057684, 0.269762] # from [0.042224, -0.042224, 0.269772] -> (0, 0, 0)
# arm = NLinkArm(link_lengths, q_now, goal_pos, show_animation)
state = WAIT_FOR_NEW_GOAL
solution_found = False
while True:
old_goal = np.array(goal_pos)
goal_pos = np.array(goal_pos)
x_now = forward_kinematics(q_now)
errors, distance = distance_to_goal(x_now, goal_pos)
print("distance", distance)
# State machine to allow changing of goal before current goal has been reached
if state is WAIT_FOR_NEW_GOAL:
if distance > 0.1 and not solution_found:
q_new, solution_found = inverse_kinematics(q_now, goal_pos)
if not solution_found:
print("Solution could not be found.")
break
elif solution_found:
state = MOVING_TO_GOAL
elif state is MOVING_TO_GOAL:
if distance > 0.1 and all(old_goal == goal_pos):
q_now = q_new # q_now + Kp * \
# ang_diff(q_new, q_now) * dt
else:
print("Done")
# state = WAIT_FOR_NEW_GOAL # TODO: need this?
# solution_found = False
break
# arm.update_joints(q_now) # TODO: plot scatter
print("FINAL Q: ", q_now)
| 14,039
|
def get(request):
"""Given a Request, return a Resource object (with caching).
We need the request because it carries media_type_default.
"""
# XXX This is not thread-safe. It used to be, but then I simplified it
# when I switched to diesel. Now that we have multiple engines, some of
# which are threaded, we need to make this thread-safe again.
# Get a cache Entry object.
# =========================
if request.fs not in __cache__:
entry = Entry()
__cache__[request.fs] = entry
entry = __cache__[request.fs]
# Process the resource.
# =====================
mtime = os.stat(request.fs)[stat.ST_MTIME]
if entry.mtime == mtime: # cache hit
if entry.exc is not None:
raise entry.exc
else: # cache miss
try:
entry.resource = load(request, mtime)
except: # capture any Exception
entry.exc = ( LoadError(traceback.format_exc())
, sys.exc_info()[2]
)
else: # reset any previous Exception
entry.exc = None
entry.mtime = mtime
if entry.exc is not None:
raise entry.exc[0] # TODO Why [0] here, and not above?
# Return
# ======
# The caller must take care to avoid mutating any context dictionary at
# entry.resource.pages[0].
return entry.resource
| 14,040
|
def split_audio_ixs(n_samples, rate=STEP_SIZE_EM, min_coverage=0.75):
"""
Create audio,mel slice indices for the audio clip
Args:
Returns:
"""
assert 0 < min_coverage <= 1
# Compute how many frames separate two partial utterances
samples_per_frame = int((SAMPLING_RATE * WINDOW_STEP_DIARIZATION / 1000))
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
frame_step = int(np.round((SAMPLING_RATE / rate) / samples_per_frame))
assert 0 < frame_step, "The rate is too high"
assert frame_step <= H_L, "The rate is too low, it should be %f at least" % \
(SAMPLING_RATE / (samples_per_frame * H_L))
wav_slices, mel_slices = [], []
steps = max(1, n_frames - H_L + frame_step + 1)
for i in range(0, steps, frame_step):
mel_range = np.array([i, i + H_L])
wav_range = mel_range * samples_per_frame
mel_slices.append(slice(*mel_range))
wav_slices.append(slice(*wav_range))
last_wav_range = wav_slices[-1]
coverage = (n_samples - last_wav_range.start) / \
(last_wav_range.stop - last_wav_range.start)
if coverage < min_coverage and len(mel_slices) > 1:
mel_slices = mel_slices[:-1]
wav_slices = wav_slices[:-1]
return wav_slices, mel_slices
| 14,041
|
def new(w: int, h: int, fmt: str, bg: int) -> 'Image':
"""
Creates new image by given size and format
and fills it with bg color
"""
if fmt not in ('RGB', 'RGBA', 'L', 'LA'):
raise ValueError('invalid format')
c = len(fmt)
image = Image()
image.im = _new_image(w, h, c)
lib.image_draw_rect(image.im, 0, 0, w, h, bg)
return image
| 14,042
|
def df_to_s3(df : pd.DataFrame, aws_access_key_id : str, region_name : str, aws_secret_access_key : str, bucket_name : str, upload_name : str) -> None:
"""Storing on the cloud made easy. All that is required is an s3 bucket that is open to the local IP address. For more information about setting up an AWS s3 bucket, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html
Parameters
----------
df : pd.DataFrame
The `pandas dataframe` to be processed.
aws_access_key_id : str
The access_key provided by AWS.
region_name : str
The region name provided by AWS.
aws_secret_access_key : str
The secret access_key provided by AWS.
bucket_name : str
The name of the bucket that the user has assigned upon creation.
upload_name : str
The name of the directory inside the s3 bucket that the user wishes to assign.
"""
print(f'Uploading dataframe to AWS s3 bucket: {bucket_name}')
s3_client = boto3.client('s3', aws_access_key_id= aws_access_key_id , region_name= region_name, aws_secret_access_key= aws_secret_access_key)
# A temporary file ensures that the data isn't permanently stored locally
with tempfile.NamedTemporaryFile() as temp:
df.to_csv(temp.name + '.csv')
s3_client.upload_file(f'{temp.name}.csv', bucket_name, f'{upload_name}/{temp.name}.csv')
temp.close()
| 14,043
|
def eval_f(f, xs):
"""Takes a function f = f(x) and a list xs of values that should be used as arguments for f.
The function eval_f should apply the function f subsequently to every value x in xs, and
return a list fs of function values. I.e. for an input argument xs=[x0, x1, x2,..., xn] the
function eval_f(f, xs) should return [f(x0), f(x1), f(x2), ..., f(xn)]."""
return [f(x) for x in xs]
# alternatively: return list(map(f, xs))
| 14,044
|
def cie94_loss(x1: Tensor, x2: Tensor, squared: bool = False, **kwargs) -> Tensor:
"""
Computes the L2-norm over all pixels of the CIEDE2000 Color-Difference for two RGB inputs.
Parameters
----------
x1 : Tensor:
First input.
x2 : Tensor:
Second input (of size matching x1).
squared : bool
Returns the squared L2-norm.
Returns
-------
ΔE_00_l2 : Tensor
The L2-norm over all pixels of the CIEDE2000 Color-Difference.
"""
ΔE_94_squared = rgb_cie94_color_difference(x1, x2, squared=True, **kwargs).flatten(1)
ε = kwargs.get('ε', 0)
if squared:
return ΔE_94_squared.sum(1)
return ΔE_94_squared.sum(1).clamp_min(ε).sqrt()
| 14,045
|
def get_current_user_id() -> str:
"""
This functions gets the id of the current user that is signed in to the Azure CLI.
In order to get this information, it looks like there are two different services,
"Microsoft Graph" (developer.microsoft.com/graph) and "Azure AD Graph"
(graph.windows.net), the latter being deprecated
(https://devblogs.microsoft.com/microsoft365dev/microsoft-graph-or-azure-ad-graph/).
I think these services correspond to two different python libraries, msal
(https://docs.microsoft.com/en-us/python/api/overview/azure/active-directory?view=azure-python)
and adal (https://docs.microsoft.com/en-us/python/api/adal/adal?view=azure-python),
but these libraries don't appear to do anything super useful on their own.
The deprecated Azure Graph API seems to correspond to a higher-level library
azure-graphrbac, which does seem to have the functionality we need:
azure.graphrbac.GraphRbacManagementClient.signed_in_user, but is deprecated along
with Azure Graph
(https://github.com/Azure/azure-sdk-for-python/issues/14022#issuecomment-752279618).
The msgraph library that we use here seems to be a not-very-high-level library
for Microsoft Graph (https://github.com/microsoftgraph/msgraph-sdk-python-core).
As a side note, another way to get this information is to use the command line to
call `az ad signed-in-user show`, but that appears to be relying on the deprecated
Azure Graph API as it gives a deprecation warning.
"""
# crucial scopes parameter is needed, see
# https://github.com/microsoftgraph/msgraph-sdk-python-core/issues/106#issuecomment-969281260
with get_credential() as credential:
client = GraphClient(
credential=credential, scopes=["https://graph.microsoft.com"]
)
# https://docs.microsoft.com/en-us/graph/api/user-get?view=graph-rest-1.0&tabs=http
result = client.get("/me")
return result.json()["id"]
| 14,046
|
def scale_labels(subject_labels):
"""Saves two lines of code by wrapping up the fitting and transform methods of the LabelEncoder
Parameters
:param subject_labels: ndarray
Label array to be scaled
:return: ndarray
Scaled label array
"""
encoder = preprocessing.LabelEncoder()
_ = encoder.fit(subject_labels)
return encoder.transform(subject_labels)
| 14,047
|
def _get_basemap(grid_metadata_dict):
"""Creates basemap.
M = number of rows in grid
M = number of columns in grid
:param grid_metadata_dict: Dictionary created by
`grids.create_equidistant_grid`.
:return: basemap_object: Basemap handle (instance of
`mpl_toolkits.basemap.Basemap`).
:return: basemap_x_matrix_metres: M-by-N numpy array of x-coordinates under
Basemap projection (different than pyproj projection).
:return: basemap_y_matrix_metres: Same but for y-coordinates.
"""
x_matrix_metres, y_matrix_metres = grids.xy_vectors_to_matrices(
x_unique_metres=grid_metadata_dict[grids.X_COORDS_KEY],
y_unique_metres=grid_metadata_dict[grids.Y_COORDS_KEY]
)
projection_object = grid_metadata_dict[grids.PROJECTION_KEY]
latitude_matrix_deg, longitude_matrix_deg = (
projections.project_xy_to_latlng(
x_coords_metres=x_matrix_metres, y_coords_metres=y_matrix_metres,
projection_object=projection_object)
)
standard_latitudes_deg, central_longitude_deg = _get_lcc_params(
projection_object)
basemap_object = Basemap(
projection='lcc', lat_1=standard_latitudes_deg[0],
lat_2=standard_latitudes_deg[1], lon_0=central_longitude_deg,
rsphere=projections.DEFAULT_EARTH_RADIUS_METRES,
ellps=projections.SPHERE_NAME, resolution=RESOLUTION_STRING,
llcrnrx=x_matrix_metres[0, 0], llcrnry=y_matrix_metres[0, 0],
urcrnrx=x_matrix_metres[-1, -1], urcrnry=y_matrix_metres[-1, -1]
)
basemap_x_matrix_metres, basemap_y_matrix_metres = basemap_object(
longitude_matrix_deg, latitude_matrix_deg)
return basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres
| 14,048
|
def load_holes(observatory: str):
"""Loads a list holes to ``targetdb.hole``."""
targetdb.database.become_admin()
observatory_pk = targetdb.Observatory.get(label=observatory).pk
row_start = 13
row_end = -13
min_cols = 14
holes = []
for row in range(row_start, row_end - 1, -1):
end_col = min_cols + ((row_start - row) if row >= 0 else (row - row_end))
for col in range(1, end_col + 1, 1):
if row == 0:
holeid = f"R0C{col}"
else:
holeid = f"R{row:+}C{col}"
holes.append(
dict(
row=row,
column=col,
holeid=holeid,
observatory_pk=observatory_pk,
)
)
targetdb.Hole.insert(holes).on_conflict(
conflict_target=[targetdb.Hole.holeid, targetdb.Hole.observatory],
action="IGNORE",
).execute(targetdb.database)
| 14,049
|
def test_true_phase_preservation(chunk):
"""Test if dft is (phase-) preserved when signal is at same place but coords range is changed"""
x = np.arange(-15, 15)
y = np.random.rand(len(x))
N1 = np.random.randint(30) + 5
N2 = np.random.randint(30) + 5
l = np.arange(-N1, 0) + np.min(x)
r = np.arange(1, N2 + 1) + np.max(x)
s1 = xr.DataArray(
np.concatenate([np.zeros(N1), y, np.zeros(N2)]),
dims=("x",),
coords={"x": np.concatenate([l, x, r])},
)
if chunk:
s1 = s1.chunk()
S1 = xrft.dft(s1, dim="x", true_phase=True)
assert s1.chunks == S1.chunks
N3 = N1
while N3 == N1:
N3 = np.minimum(np.random.randint(30), N1 + N2)
N4 = N1 + N2 - N3
l = np.arange(-N3, 0) + np.min(x)
r = np.arange(1, N4 + 1) + np.max(x)
s2 = xr.DataArray(
np.concatenate([np.zeros(N3), y, np.zeros(N4)]),
dims=("x",),
coords={"x": np.concatenate([l, x, r])},
)
if chunk:
s2 = s2.chunk()
S2 = xrft.dft(s2, dim="x", true_phase=True)
assert s2.chunks == S2.chunks
xrt.assert_allclose(S1, S2)
| 14,050
|
def unary_math_intr(fn, intrcode):
"""
Implement the math function *fn* using the LLVM intrinsic *intrcode*.
"""
@lower(fn, types.Float)
def float_impl(context, builder, sig, args):
res = call_fp_intrinsic(builder, intrcode, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
unary_math_int_impl(fn, float_impl)
return float_impl
| 14,051
|
def is_is_int(a):
"""Return `True` if `a` is an expression of the form IsInt(b).
>>> x = Real('x')
>>> is_is_int(IsInt(x))
True
>>> is_is_int(x)
False
"""
return is_app_of(a, Kind.IS_INTEGER)
| 14,052
|
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testReadMe"
, "testCreateFile"
, "testRewriteFile"
, "testUpdateFile"
, "testDeleteFile"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testWebDAVFile"
, "testWebDAVFileUrlLib"
]
}
return TestUtils.getTestSuite(TestWebDAVAccess, testdict, select=select)
| 14,053
|
def normalize_channel_wise(tensor: torch.Tensor, mean: torch.Tensor, std: torch.Tensor) -> torch.Tensor:
"""Normalizes given tensor channel-wise
Parameters
----------
tensor: torch.Tensor
Tensor to be normalized
mean: torch.tensor
Mean to be subtracted
std: torch.Tensor
Std to be divided by
Returns
-------
result: torch.Tensor
"""
if len(tensor.size()) != 3:
raise ValueError
for channel in range(tensor.size(0)):
tensor[channel, :, :] -= mean[channel]
tensor[channel, :, :] /= std[channel]
return tensor
| 14,054
|
def load_images(shot_paths):
"""
images = {
shot1: {
frame_id1: PIL image1,
...
},
...
}
"""
images = list(tqdm(map(load_image, shot_paths), total=len(shot_paths), desc='loading images'))
images = {k: v for k, v in images}
return images
| 14,055
|
def rmSingles(fluxcomponent, targetstring='target'):
"""
Filter out targets in fluxcomponent that have only one ALMA source.
"""
nindiv = len(fluxcomponent)
flagger = numpy.zeros(nindiv)
for icomp in range(nindiv):
target = fluxcomponent[targetstring][icomp]
match = fluxcomponent[targetstring] == target
nmatch = fluxcomponent[targetstring][match].size
if nmatch == 1:
flagger[icomp] = 1
goodflag = flagger == 0
fluxcomponent = fluxcomponent[goodflag]
return fluxcomponent
| 14,056
|
def discover_guids(file_path, keys):
"""
"""
# Now we revise the files
if isinstance(file_path, list):
discovered_files = file_path
else:
discovered_files = [os.path.join(file_path, f) for f in os.listdir(file_path)]
known_guids = set()
for f in discovered_files:
dir_path, fname = os.path.split(f)
if fname.startswith("_"):
continue
with open(f, 'r') as fp:
data = json.load(fp)
results = _recursive_guid(None, data, keys)
known_guids = known_guids.union(results)
return list(known_guids)
| 14,057
|
def gray():
"""Convert image to gray scale."""
form = ImageForm(meta={'csrf': False})
current_app.logger.debug(f"request: {request.form}")
if form.validate():
service_info = cv_services["gray"]
json_format = request.args.get("json", False)
# Image Processing
image = services.convert_to_image(request.files["image"].read())
err, image = services.gray(image)
current_app.logger.debug(f"respond: {image}")
respond = services.convert_to_base64(image)
# Respond
respond = jsonify({"image": str(respond)}), 200
else:
respond = jsonify(message=form.errors), 404
return respond
| 14,058
|
def all_examples(
ctx: typer.Context,
path_out: str = typer.Argument("./tmp", help="Path to example output directory."),
):
"""Generate all examples."""
output_path_string = validate_output_path(path_out)
output_path = Path(output_path_string) / Path("examples")
typer.echo(f"Examples will be saved to {output_path.resolve()}")
save_job_examples(output_path)
save_work_order_examples(output_path)
save_input_data_examples(output_path)
report_finished_task(ctx)
| 14,059
|
def compute_accuracy(outputs, targets, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = targets.size(0)
_, preds = outputs.topk(maxk, 1, True, True)
preds = preds.t()
corrects = preds.eq(targets[None])
result_list = []
for k in topk:
correct_k = corrects[:k].flatten().sum(dtype=torch.float32)
result_list.append(correct_k * (100.0 / batch_size))
return result_list
| 14,060
|
def init_logger():
"""Initialize and configure a logger for the application.
"""
# create logger
with open("./function/logging.json", "r", encoding="utf-8") as fd:
logging.config.dictConfig(json.load(fd))
# reduce log level for modules
logging.captureWarnings(True)
requests.packages.urllib3.disable_warnings()
| 14,061
|
def unpack_text_io_wrapper(fp, encoding):
"""
If *fp* is a #io.TextIOWrapper object, this function returns the underlying
binary stream and the encoding of the IO-wrapper object. If *encoding* is not
None and does not match with the encoding specified in the IO-wrapper, a
#RuntimeError is raised.
"""
if isinstance(fp, io.TextIOWrapper):
if fp.writable() and encoding is not None and fp.encoding != encoding:
msg = 'TextIOWrapper.encoding({0!r}) != {1!r}'
raise RuntimeError(msg.format(fp.encoding, encoding))
if encoding is None:
encoding = fp.encoding
fp = fp.buffer
return fp, encoding
| 14,062
|
def _reset_graphmode_for_inplaceassign(graph_list, graph_mode):
"""Operator with InplaceAssign should always be composite op"""
for i, g in enumerate(graph_list):
if any((op['name'] == 'InplaceAssign' for op in g['op_desc'])):
graph_mode[i] = 'composite'
| 14,063
|
def main():
"""
Fill in this function.
"""
print("Hello World.")
| 14,064
|
def dummy_blob(size_arr=(9, 9, 9), pixdim=(1, 1, 1), coordvox=None):
"""
Create an image with a non-null voxels at coordinates specified by coordvox.
:param size_arr:
:param pixdim:
:param coordvox: If None: will create a single voxel in the middle of the FOV.
If tuple: (x,y,z): Create single voxel at specified coordinate
If list of tuples: [(x1,y1,z1), (x2,y2,z2)]: Create multiple voxels.
:return: Image object
"""
# nx, ny, nz = size_arr
data = np.zeros(size_arr)
# if not specified, voxel coordinate is set at the middle of the volume
if coordvox is None:
coordvox = tuple([round(i / 2) for i in size_arr])
elif isinstance(coordvox, list):
for icoord in coordvox:
data[icoord] = 1
elif isinstance(coordvox, tuple):
data[coordvox] = 1
else:
ValueError("Wrong type for coordvox")
# Create image with default orientation LPI
affine = np.eye(4)
affine[0:3, 0:3] = affine[0:3, 0:3] * pixdim
nii = nib.nifti1.Nifti1Image(data, affine)
img = Image(data, hdr=nii.header, dim=nii.header.get_data_shape())
return img
| 14,065
|
def main():
"""
example:
"a big dog fell down the stairs ." vs. "a big dog fallen down the stairs ."
"""
vocab = get_vocab_words()
modifiers = ['over there', 'some time ago', 'this morning', 'at home', 'last night']
names_ = (configs.Dirs.legal_words / 'names.txt').open().read().split()
names = find_counterbalanced_subset(names_, min_size=10, max_size=len(names_))
vbds_vbns_args = [
('arose', 'arisen', ['']),
# optional arguments
('knew', 'known', ['a lot of things', 'she could do it']),
('saw', 'seen', ['a bird', 'a shape', 'something']),
('began', 'begun', ['to work']),
('fell', 'fallen', ['down the stairs']),
('flew', 'flown', ['into the sky', 'away']),
('drove', 'driven', ['out of the garage', 'down the road', 'with one wheel', 'without looking']),
('grew', 'grown', ['quickly',]),
('hid', 'hidden', ['from view', 'behind the bush']),
('rose', 'risen', ['from bed']),
('swore', 'sworn', ['not to do it again']),
('drank', 'drunk', ['some juice', 'the soup', 'your coffee']),
('ate', 'eaten', ['a lot', 'more than me', 'some ice cream']),
('drew', 'drawn', ['a picture', 'a map', 'a round circle']),
('wrote', 'written', ['a story', 'a note', 'into a book', 'with a large pen']),
('sang', 'sung', ['a nice song', 'in the theater', 'with a pretty voice', 'my favorite song']),
('spoke', 'spoken', ['very fast', 'to me', 'about many things', 'without thinking']),
('came', 'come', ['to the store', 'just in time', 'when we needed her', 'too late']),
# transitive
('was', 'been', ['here', 'alone', 'afraid']),
('beat', 'beaten', ['the dough', 'a little boy', 'their pet']),
('became', 'become', ['angry', 'very different', 'someone else']),
('bit', 'bitten', ['her own tongue', 'into the cake', 'off a big chunk']),
('blew', 'blown', ['out the candle', 'away the dirt',]),
('chose', 'chosen', ['the best option', 'the good one', ]),
('did', 'done', ['nothing wrong', 'something bad', 'the best she could ']),
('forgave', 'forgiven', ['her', 'the child', 'him']),
('gave', 'given', ['a book to a student', 'something sweet to the baby', 'money to the man']),
('rode', 'ridden', ['a horse', 'a cart', 'in the front seat', 'away']),
('shook', 'shaken', ['the plate', 'the table', 'the bowl']),
('strode', 'stridden', ['']),
('took', 'taken', ['a paper', 'some food', 'the bell', 'it', 'them']),
('threw', 'thrown', ['the trash out', 'the paper ball', 'some away', 'his ball']),
]
while True:
# random choices
name = random.choice(names)
mod = random.choice(modifiers)
vbd, vbn, args = random.choice(vbds_vbns_args)
arg = random.choice(args)
if (vbd not in vocab or vbn not in vocab) or vbd == vbn:
# print(f'"{verb_base:<22} excluded due to some forms not in vocab')
continue
if arg == '':
continue
# vbd is correct
yield template.format(name, vbn, arg, mod) # bad
yield template.format(name, vbd, arg, mod) # good
# vbn is correct
yield template.format(name, 'had ' + vbd, arg, mod)
yield template.format(name, 'had ' + vbn, arg, mod)
| 14,066
|
def poly_edges(P, T):
"""
Returns the ordered edges from the given polygons
Parameters
----------
P : Tensor
a (N, D,) points set tensor
T : LongTensor
a (M, T,) topology tensor
Returns
-------
tuple
a tuple containing the edges of the given polygons
"""
p = P[torch.cat((T, T[0].unsqueeze(0)), dim=0)]
return tuple(p[1:]-p[:-1])
| 14,067
|
def make_list(v):
"""
If the object is not a list already, it converts it to one
Examples:
[1, 2, 3] -> [1, 2, 3]
[1] -> [1]
1 -> [1]
"""
if not jsoncfg.node_is_array(v):
if jsoncfg.node_is_scalar(v):
location = jsoncfg.node_location(v)
line = location.line
column = location.column
else:
line = v.line
column = v.column
a = jsoncfg.config_classes.ConfigJSONArray(line, column)
a._append(v)
return a
return v
| 14,068
|
def data_gen(V, batch, nbatches, max_words_in_sentence):
"""
Generate random data for a src-tgt copy task.
# 5: # of sentences per batch == batch(2nd arg)
# 4: # of words in each sentence
# 7: size of word dictionary
np.random.randint(low=1, high=7, size=(5, 4)) # 5 by 4 matrix
>>> gen = data_gen(7, 5, 2, 4)
>>> batch0 = next(gen)
>>> batch0.src
>>> batch0.trg
>>> batch0.src.shape # [5, 4]
>>> batch0.ntokens # 15
tensor([[1, 2, 3, 2],
[1, 2, 1, 4],
[1, 2, 4, 5],
[1, 1, 2, 1],
[1, 2, 5, 5]]) # [5, 4]
>>> batch0.src_mask
tensor([[[1, 1, 1, 1]],
[[1, 1, 1, 1]],
[[1, 1, 1, 1]],
[[1, 1, 1, 1]],
[[1, 1, 1, 1]]], dtype=torch.uint8) # [5, 1, 4]
>>> batch0.trg
tensor([[1, 2, 3],
[1, 2, 1],
[1, 2, 4],
[1, 1, 2],
[1, 2, 5]]) # [5, 3]
>>> batch0.trg_y
tensor([[2, 3, 2],
[2, 1, 4],
[2, 4, 5],
[1, 2, 1],
[2, 5, 5]]) # [5, 3]
>>> batch0.trg_mask
tensor([[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]],
[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]],
[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]],
[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]],
[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]]], dtype=torch.uint8) # [5, 3, 3]
>>> batch0.ntokens # 15
>>> batch0.src.shape # (5, 4)
"""
for _ in range(nbatches):
data = torch.from_numpy(np.random.randint(low=1, high=V, size=(batch, max_words_in_sentence)))
data[:, 0] = 1 # 1 for first column
src = Variable(data, requires_grad=False).type(torch.long)
tgt = Variable(data, requires_grad=False).type(torch.long)
yield BatchP(src, tgt, 0)
| 14,069
|
def score_game(game_core):
"""Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число"""
count_ls = []
np.random.seed(1) # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!
random_array = np.random.randint(1, 101, 1000)
for number in random_array:
count_ls.append(game_core(number))
score = int(np.mean(count_ls))
print(f"Ваш алгоритм угадывает число в среднем за {score} попыток")
return score
| 14,070
|
def add_plot(
lon, lat, kind=None, props=None, ax=None, break_on_change=False, transform=identity
):
"""Add a plot with different props for different 'kind' values to an existing map
Parameters
----------
lon : sequence of float
lat : sequence of float
kind : sequence of hashable, optional
Controls what props are used. Length must match lon/lat and values
are used to index into the `props` map.
props : dict, optional.
Maps `kind` of first and last point of each segment to plot style.
By default, sorted values from `kind`
are mapped to 'axes.prop_cycle'. `props` for segments between
points with different `kind` value are looked up under `None`.
If `None` is missing, these points are not plotted.
ax : matplotlib axes object, optional
break_on_change : bool, optional
Whether to create a new segment when kind changes. Generally True for fishing plots
and False for vessel plots.
transform : cartopy.crs.Projection, optional
Returns
-------
dict mapping keys to Line2D
Values are suitable for passing to legend.
"""
if ax is None:
ax = plt.gca()
assert len(lon) == len(lat)
if kind is None:
kind = np.ones(len(lon))
else:
kind = np.asarray(kind)
assert len(kind) == len(lon)
if props is None:
props = styles.create_props(np.unique(kind))
handles = {}
for k1, k2 in sorted(props.keys()):
mask = _build_mask(kind, k1, k2, break_on_change)
if mask.sum():
ml_coords = _build_multiline_string_coords(lon, lat, mask, break_on_change)
mls = MultiLineString(ml_coords)
p = props[k1, k2].copy()
if "legend" in p:
key = p.pop("legend")
else:
key = k1 if (k1 == k2) else f"{k1}-{k2}"
ax.add_geometries([mls], crs=transform, **p)
if key:
handles[key] = Line2D(
[0], [0], color=p["edgecolor"], lw=p.get("linewidth", 1)
)
return handles
| 14,071
|
def polyExtrudeFacet(*args, **kwargs):
"""
Extrude faces. Faces can be extruded separately or together, and manipulations can be performed either in world or
object space.
Flags:
- attraction : att (float) [create,query,edit]
This flag specifies the attraction, related to magnet. C: Default is 0.0. The range is [-2.0, 2.0]. Q: When queried,
this flag returns a float.
- caching : cch (bool) [create,edit]
Toggle caching for all attributes so that no recomputation is needed.
- constructionHistory : ch (bool) [create,query]
Turn the construction history on or off (where applicable). If construction history is on then the corresponding node
will be inserted into the history chain for the mesh. If construction history is off then the operation will be
performed directly on the object. Note:If the object already has construction history then this flag is ignored and the
node will always be inserted into the history chain.
- createCurve : cc (bool) []
- divisions : d (int) [create,query,edit]
This flag specifies the number of subdivisions. C: Default is 1 Q: When queried, this flag returns an int.
- frozen : fzn (bool) []
- gain : ga (float) []
- gravity : g (float, float, float) [create,query,edit]
This flag specifies the gravity vector. C: Default is 0.0 -1.0 0.0. Q: When queried, this flag returns a float[3].
- gravityX : gx (float) [create,query,edit]
This flag specifies X for the gravity vector. C: Default is 0.0. Q: When queried, this flag returns a float.
- gravityY : gy (float) [create,query,edit]
This flag specifies Y for the gravity vector. C: Default is -1.0. Q: When queried, this flag returns a float.
- gravityZ : gz (float) [create,query,edit]
This flag specifies Z for the gravity vector. C: Default is 0.0. Q: When queried, this flag returns a float.
- inputCurve : inc (PyNode) [create]
This flag specifies the name of the curve to be used as input for extrusion C: The selected faces will be extruded along
the curve. It will be useful to set a higher value (greater than 4) for the '-d/-divisions' flag, to get good results.
The normal of the surface has to be aligned with the direction of the curve. The extrusion is evenly distributed in the
curve's parameter space, and not on the curve's geometry space
- keepFacesTogether : kft (bool) [create,query,edit]
This flag specifies how to extrude faces. If on, faces are pulled together (connected ones stay connected and only
outside edges form new faces), otherwise they are pulled independently (each edge on selected faces creates a new face
and manipulations are performed on each selected face separately). C: Default is on. Q: When queried, this flag returns
an int.
- keepFacetTogether : xft (bool) []
- localCenter : lc (int) []
- localDirection : ld (float, float, float) [create,query,edit]
This flag specifies the local slant axis (see local rotation). C: Default is 0.0 0.0 1.0. Q: When queried, this flag
returns a float[3].
- localDirectionX : ldx (float) [create,query,edit]
This flag specifies X for the local slant axis. C: Default is 0.0. Q: When queried, this flag returns a float.
- localDirectionY : ldy (float) [create,query,edit]
This flag specifies Y for the local slant axis. C: Default is 0.0. Q: When queried, this flag returns a float.
- localDirectionZ : ldz (float) [create,query,edit]
This flag specifies Z for the local slant axis. C: Default is 1.0. Q: When queried, this flag returns a float.
- localRotate : lr (float, float, float) [create,query,edit]
This flag specifies the local rotations : (slantRot, slantRoll, twistRot). C: Default is 0.0 0.0 0.0. Q: When queried,
this flag returns a float[3]. Local rotation (slantRot, slantRoll, twistRot).
- localRotateX : lrx (float) [create,query,edit]
This flag specifies local rotation X angle (Slant Rot around slantAxis). C: Default is 0.0. The range is [0, 360]. Q:
When queried, this flag returns a float.
- localRotateY : lry (float) [create,query,edit]
This flag specifies local rotation Y angle (Slant Roll of slantAxis). C: Default is 0.0. The range is [0, 180]. Q: When
queried, this flag returns a float.
- localRotateZ : lrz (float) [create,query,edit]
This flag specifies local rotation Z angle (Twist around normal). C: Default is 0.0. The range is [0, 360]. Q: When
queried, this flag returns a float.
- localScale : ls (float, float, float) [create,query,edit]
This flag specifies the local scaling vector. C: Default is 1.0 1.0 1.0. Q: When queried, this flag returns a float[3].
- localScaleX : lsx (float) [create,query,edit]
This flag specifies X for local scaling vector. C: Default is 1.0. Q: When queried, this flag returns a float.
- localScaleY : lsy (float) [create,query,edit]
This flag specifies Y for local scaling vector. C: Default is 1.0. Q: When queried, this flag returns a float.
- localScaleZ : lsz (float) [create,query,edit]
This flag specifies Z for local scaling vector : Flattening. C: Default is 1.0. The range is [0.0, 1.0]. Q: When
queried, this flag returns a float. Dynamic Values
- localTranslate : lt (float, float, float) [create,query,edit]
This flag specifies the local translation vector. C: Default is 0.0 0.0 0.0. Q: When queried, this flag returns a
float[3].
- localTranslateX : ltx (float) [create,query,edit]
This flag specifies the X local translation vector. C: Default is 0.0. Q: When queried, this flag returns a float.
- localTranslateY : lty (float) [create,query,edit]
This flag specifies the Y local translation vector. C: Default is 0.0. Q: When queried, this flag returns a float.
- localTranslateZ : ltz (float) [create,query,edit]
This flag specifies the Z local translation vector. C: Default is 0.0. Q: When queried, this flag returns a float.
- magnX : mx (float) [create,query,edit]
This flag specifies X for the magnet vector. C: Default is 0.0. Q: When queried, this flag returns a float.
- magnY : my (float) [create,query,edit]
This flag specifies Y for the magnet vector. C: Default is 0.0. Q: When queried, this flag returns a float.
- magnZ : mz (float) []
- magnet : m (float, float, float) [create,query,edit]
This flag specifies the magnet vector. C: Default is 0.0 0.0 0.0. Q: When queried, this flag returns a float[3].
- maya2012 : m12 (bool) []
- name : n (unicode) [create]
Give a name to the resulting node.
- nodeState : nds (int) [create,query,edit]
Defines how to evaluate the node. 0: Normal1: PassThrough2: Blocking3: Internally disabled. Will return to Normal state
when enabled4: Internally disabled. Will return to PassThrough state when enabled5: Internally disabled. Will return to
Blocking state when enabledFlag can have multiple arguments, passed either as a tuple or a list.
- offset : off (float) [create,query,edit]
This flag specifies the local offset. Each edge of each selected face moves towards the inside of the face by given
distance (in local reference). C: Default is 0.0.
- pivot : pvt (float, float, float) [create,query,edit]
This flag specifies the pivot for scaling and rotation. C: Default is 0.0 0.0 0.0. Q: When queried, this flag returns a
float[3].
- pivotX : pvx (float) [create,query,edit]
This flag specifies the X pivot for scaling and rotation. C: Default is 0.0. Q: When queried, this flag returns a float.
- pivotY : pvy (float) [create,query,edit]
This flag specifies the Y pivot for scaling and rotation. C: Default is 0.0. Q: When queried, this flag returns a float.
- pivotZ : pvz (float) [create,query,edit]
This flag specifies the Z pivot for scaling and rotation. C: Default is 0.0. Q: When queried, this flag returns a float.
Local Values
- random : ran (float) [create,query,edit]
This flag specifies the random value for all parameters. C: Default is 0.0. The range is [-10.0, 10.0]. Q: When queried,
this flag returns a float.
- rotate : ro (float, float, float) [create,query,edit]
This flag specifies the rotation angles around X, Y, Z. C: Default is 0.0 0.0 0.0. Q: When queried, this flag returns a
float[3].
- rotateX : rx (float) [create,query,edit]
This flag specifies the rotation angle around X. C: Default is 0.0. Q: When queried, this flag returns a float.
- rotateY : ry (float) [create,query,edit]
This flag specifies the rotation angle around Y. C: Default is 0.0. Q: When queried, this flag returns a float.
- rotateZ : rz (float) [create,query,edit]
This flag specifies the rotation angle around Z. C: Default is 0.0. Q: When queried, this flag returns a float.
- scale : s (float, float, float) [create,query,edit]
This flag specifies the scaling vector. C: Default is 1.0 1.0 1.0. Q: When queried, this flag returns a float[3].
- scaleX : sx (float) [create,query,edit]
This flag specifies X for scaling vector. C: Default is 1.0. Q: When queried, this flag returns a float.
- scaleY : sy (float) [create,query,edit]
This flag specifies Y for scaling vector. C: Default is 1.0. Q: When queried, this flag returns a float.
- scaleZ : sz (float) [create,query,edit]
This flag specifies Z for scaling vector. C: Default is 1.0. Q: When queried, this flag returns a float.
- smoothingAngle : sma (float) [create,query,edit]
This flag specifies smoothingAngle threshold used to determine whether newly created edges are hard or soft. C: Default
is 30.0. The range is [0, 180]. Q: When queried, this flag returns a float. Global Values
- taper : tp (float) []
- taperCurve_FloatValue : cfv (float) []
- taperCurve_Interp : ci (int) []
- taperCurve_Position : cp (float) []
- thickness : tk (float) []
- translate : t (float, float, float) [create,query,edit]
This flag specifies the translation vector. C: Default is 0.0 0.0 0.0. Q: When queried, this flag returns a float[3].
- translateX : tx (float) [create,query,edit]
This flag specifies the X translation vector. C: Default is 0.0. Q: When queried, this flag returns a float.
- translateY : ty (float) [create,query,edit]
This flag specifies the Y translation vector. C: Default is 0.0. Q: When queried, this flag returns a float.
- translateZ : tz (float) [create,query,edit]
This flag specifies the Z translation vector. C: Default is 0.0. Q: When queried, this flag returns a float.
- twist : twt (float) []
- weight : w (float) [create,query,edit]
This flag specifies the weight, related to gravity. C: Default is 0.0. Q: When queried, this flag returns a float.
- worldSpace : ws (bool) [create,query,edit]
This flag specifies which reference to use. If on: all geometrical values are taken in world reference. If off: all
geometrical values are taken in object reference. C: Default is off. Q: When queried, this flag returns an int.
Common flags
Derived from mel command `maya.cmds.polyExtrudeFacet`
"""
pass
| 14,072
|
def create_testcase_list_file(testcase_file_paths, input_directory):
"""Store list of fuzzed testcases from fuzzer in a bot specific
testcase list file."""
if not testcase_file_paths:
logs.log_error('No testcases found, skipping list file.')
return
bot_testcases_file_path = utils.get_bot_testcases_file_path(input_directory)
with open(bot_testcases_file_path, 'wb') as bot_testcases_file_handle:
bot_testcases_file_handle.write('\n'.join(testcase_file_paths))
| 14,073
|
def led_flash(e, t):
"""flash the led continously"""
while not e.isSet():
time.sleep(t)
GPIO.output(16, GPIO.LOW)
time.sleep(t)
GPIO.output(16, GPIO.HIGH)
| 14,074
|
def test_lsp_code_action2() -> None:
"""Tests edge case for code actions.
Identified in: https://github.com/pappasam/jedi-language-server/issues/96
"""
with session.LspSession() as ls_session:
ls_session.initialize()
uri = as_uri((REFACTOR_TEST_ROOT / "code_action_test2.py"))
actual = ls_session.text_document_code_action(
{
"textDocument": {"uri": uri},
"range": {
"start": {"line": 2, "character": 6},
"end": {"line": 2, "character": 6},
},
"context": {"diagnostics": []},
}
)
assert_that(actual, is_(None))
| 14,075
|
def can_review_faults(user):
"""
users can review faults if one of the the following applies:
a) No fault review groups exist and they have can_review permissions
b) Fault review groups exist, they are a member of one, and they have
review permissions
"""
can_review = user.has_perm("faults.can_review")
review_groups = [frg.group for frg in FaultReviewGroup.objects.select_related("group")]
if review_groups:
can_review = can_review and len(set(review_groups) & set(user.groups.all())) > 0
return can_review
| 14,076
|
def test_line_insert_start(tempfile_name, get_body):
"""
Test for file.line for insertion at the beginning of the file
:return:
"""
cfg_content = "everything: fantastic"
file_content = os.linesep.join(
["file_roots:", " base:", " - /srv/salt", " - /srv/sugar"]
)
file_modified = os.linesep.join(
[
cfg_content,
"file_roots:",
" base:",
" - /srv/salt",
" - /srv/sugar",
]
)
isfile_mock = MagicMock(
side_effect=lambda x: True if x == tempfile_name else DEFAULT
)
with patch("os.path.isfile", isfile_mock), patch(
"os.stat", MagicMock(return_value=DummyStat())
), patch("salt.utils.files.fopen", mock_open(read_data=file_content)), patch(
"salt.utils.atomicfile.atomic_open", mock_open()
) as atomic_open_mock:
filemod.line(
tempfile_name, content=cfg_content, location="start", mode="insert"
)
handles = atomic_open_mock.filehandles[tempfile_name]
# We should only have opened the file once
open_count = len(handles)
assert open_count == 1, open_count
# We should only have invoked .writelines() once...
writelines_content = handles[0].writelines_calls
writelines_count = len(writelines_content)
assert writelines_count == 1, writelines_count
# ... with the updated content
expected = get_body(file_modified)
assert writelines_content[0] == expected, (writelines_content[0], expected)
| 14,077
|
def create_freshservice_object(obj_type, data):
"""Use the Freshservice v2 API to create an object.
Accepts an object name (string) and a dict of key values.
"""
url = '{}/{}'.format(settings.FRESHSERVICE_ENDPOINT, obj_type)
resp = requests.post(url, auth=FRESHSERVICE_AUTH, json=data)
return resp
| 14,078
|
def print_begin(*args, sep=' ', end='\n', file=None, ret_value='') -> str:
"""Print the function name and start."""
print(_prefix('begin'), *args, sep=sep, end=end, file=file, flush=True)
return ret_value
| 14,079
|
def scale_bounding_box(bounding_box,scale):
"""Scales bounding box coords (in dict from {x1,y1,x2,y2}) by x and y given by sclae in dict form {x,y}"""
scaled_bounding_box = {
"x1" : int(round(bounding_box["x1"]*scale["x"]))
,"y1" : int(round(bounding_box["y1"]*scale["y"]))
,"x2" : int(round(bounding_box["x2"]*scale["x"]))
,"y2" : int(round(bounding_box["y2"]*scale["y"]))
}
return scaled_bounding_box
| 14,080
|
def is_command(obj) -> bool:
"""
Return whether ``obj`` is a click command.
:param obj:
"""
return isinstance(obj, click.Command)
| 14,081
|
def GetContigs(orthologs):
"""get map of contigs to orthologs.
An ortholog can be part of only one contig, but the same ortholog_id can
be part of several contigs.
"""
contigs = {}
for id, oo in orthologs.items():
for o in oo:
if o.contig not in contigs:
contigs[o.contig] = []
contigs[o.contig].append(o)
return contigs
| 14,082
|
def _save_training_results(
mltk_model:MltkModel,
keras_model:KerasModel,
training_history,
logger: logging.Logger,
show:bool = False
) -> TrainingResults:
"""Save the training history as .json and .png"""
results = TrainingResults(mltk_model, keras_model, training_history)
metric, best_val = results.get_best_metric()
logger.info(f'\n\n*** Best training {metric} = {best_val:.3f}\n\n')
try:
history_json_path = f'{mltk_model.log_dir}/train/training-history.json'
logger.debug(f'Generating {history_json_path}')
with open(history_json_path, 'w') as f:
json.dump(results.asdict(), f, indent=2)
except Exception as e:
logger.warning(f'Error while saving training results to {history_json_path}, err: {e}')
# See https://github.com/keras-team/keras/blob/master/keras/losses.py
supported_metrics = {}
supported_metrics['accuracy'] = 'Accuracy'
supported_metrics['loss'] = 'Loss'
supported_metrics['mse'] = 'Mean Square Error'
supported_metrics['mae'] = 'Mean Absolute Error'
supported_metrics['mape'] = 'Mean Absolute Percentage Error'
supported_metrics['msle '] = 'Mean Square Logarithmic Error'
supported_metrics['bce '] = 'Binary Cross-entropy'
supported_metrics['cce'] = 'Categorical Cross-entropy'
found_metrics = []
history = results.history
for metric in history:
if not metric in supported_metrics:
continue
if not f'val_{metric}' in history:
continue
found_metrics.append(dict(
name=metric,
train=history[metric],
validation=history[f'val_{metric}'],
))
fig, _ = plt.subplots(figsize=(6, 6), clear=True)
fig.suptitle(f'{mltk_model.name} Training History')
# %% Plot training and validation metrics
for i, metric in enumerate(found_metrics):
plt.subplot(len(found_metrics), 1, i + 1)
plt.plot(metric['train'])
plt.plot(metric['validation'])
plt.title(f'{supported_metrics[metric["name"]]}')
plt.ylabel(supported_metrics[metric['name']])
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.subplots_adjust(hspace=.5)
training_results_path = f'{mltk_model.log_dir}/train/training-history.png'
logger.debug(f'Generating {training_results_path}')
plt.savefig(training_results_path)
if show:
plt.show(block=False)
else:
fig.clear()
plt.close(fig)
return results
| 14,083
|
def creating_windows(alpha):
"""
This function draws the last lines for the program to fill the box
"""
#First Window
alpha.setpos(25,25)
alpha.pencolor("purple")
alpha.fillcolor("purple")
alpha.begin_fill()
alpha.pendown()
for x in range(4): #making square
alpha.forward(20)
alpha.left(90)
alpha.end_fill()
alpha.penup()
#Second Window
alpha.setpos(-45, 25)
alpha.pendown()
alpha.begin_fill()
for y in range(4): #making square
alpha.forward(20)
alpha.left(90)
alpha.end_fill()
alpha.penup()
| 14,084
|
def client(client, inp):
"""Client handler for module.
Args:
client: instance of PoetClient class
inp: command string sent from server
"""
# your code goes here
pass
| 14,085
|
def set_backwards_pass(op, backwards):
"""
Returns new operation which behaves like `op` in the forward pass but
like `backwards` in the backwards pass.
"""
return backwards + tf.stop_gradient(op - backwards)
| 14,086
|
def remove_hydrogens(list_of_lines):
"""
Removes hydrogen from the pdb file.
To add back the hydrogens, run the reduce program on the file.
"""
return (line for line in list_of_lines if line['element']!=" H")
| 14,087
|
def crash_random_instance(org: str, space: str, appname: str, configuration: Configuration, count: int = 1):
"""
Crash one or more random application instances.
:param org: String; Cloud Foundry organization containing the application.
:param space: String; Cloud Foundry space containing the application.
:param appname: String; Application in Cloud Foundry which is to be targeted.
:param count: int; Number of instances to kill.
:param configuration: Configuration; Configuration details, see `README.md`.
:return: A JSON Object representing the application which was targeted.
"""
return run_ctk(
lambda app: app.crash_random_instance(count=count),
configuration, org, space, appname,
"Crashing {} random app instance(s)...".format(count)
)
| 14,088
|
def test_lildhtm():
"""Test lildhtm.pft database lilacs"""
config.load(join('fixtures','lilacs.ini'))
current_path = join(os.getcwd(),'fixtures')
collection = pyisis.files.IsisCollection('fixtures',[current_path],config=config)
database = collection['lilacs']
expr = ''.join(open(join('fixtures','lildhtm.pft')).readlines())
result_mx = open(join('fixtures','mx-lildhtm')).read()
session = Session(config)
result_mxpy = format(session, expr, database, 1, 3226, config, stdout=0)
#open('mxpyd.txt','w').write(result_mxpy)
assert result_mxpy == result_mx, "Failed for lildhtm.pft"
| 14,089
|
def spc_dict_from_spc_info(spc_info: dict, resonance: bool = True) -> dict:
"""
Generate a species dictionary from species info.
Args:
spc_info (dict): Species info contains the label and species geom info.
resonance (bool): Whether generate resonance geom in the species dictionary.
Returns:
dict: The species dictionary generated from the spc_info.
"""
spc_dict = {}
for label, spc in spc_info.items():
species = species_from_spc_info(spc)
if not species:
continue
if resonance:
species.generate_resonance_structures()
spc_dict[label] = species
return spc_dict
| 14,090
|
def test_pos_invalid_input_4():
"""
Tests get_pos function for NLPFrame with wrong column specified
"""
initial_df = nlp.NLPFrame({'text_col' : ['Today is a beautiful Monday and I would love getting a coffee. However, startbucks is closed.','It has been an amazing day today!']}, index = [0,1], column = 'non_existing')
try:
initial_df.get_part_of_speech(column = 'non_existing')
assert False
except ValueError:
pass
| 14,091
|
def label_smooth_loss(log_prob, label, confidence=0.9):
"""
:param log_prob: log probability
:param label: one hot encoded
:param confidence: we replace one (in the one hot) with confidence. 0 <= confidence <= 1.
:return:
"""
N = log_prob.size(0)
C = log_prob.size(1)
smoothed_label = torch.full(size=(N, C), fill_value=(1-confidence) / (C - 1)).to(log_prob)
smoothed_label.scatter_(dim=1, index=torch.unsqueeze(label, dim=1), value=confidence)
loss = - torch.sum(log_prob * smoothed_label) / N
return loss
| 14,092
|
def update(self, using=None, **kwargs):
"""
Updates specified attributes on the current instance.
"""
assert self.pk, "Cannot update an instance that has not yet been created."
using = using or router.db_for_write(self.__class__, instance=self)
for field in self._meta.fields:
if getattr(field, 'auto_now', False) and field.name not in kwargs:
kwargs[field.name] = field.pre_save(self, False)
affected = self.__class__._base_manager.using(using).filter(pk=self.pk).update(**kwargs)
for k, v in kwargs.iteritems():
if isinstance(v, ExpressionNode):
v = resolve_expression_node(self, v)
setattr(self, k, v)
if affected == 1:
signals.post_save.send(sender=self.__class__, instance=self, created=False)
return True
elif affected == 0:
return False
elif affected < 0:
raise ValueError("Somehow we have updated a negative amount of rows, you seem to have a problem with your db backend.")
else:
raise ValueError("Somehow we have updated multiple rows, and you are now royally fucked.")
| 14,093
|
def differential_privacy_with_risk( dfg_freq, dfg_time, delta, precision, aggregate_type=AggregateType.SUM):
"""
This method adds the differential privacy to the DFG of both time and frequencies.
* It calculates the epsilon using the guessing advantage technique.
* It adds laplace noise to the DFGs.
* It calculates the distance resulted from the noise
"""
accuracy=1
# calculate epsilon
epsilon_freq,senstivity_freq=calculate_epsilon_freq(dfg_freq,delta)
epsilon_time,senstivity_time=calculate_epsilon_time(dfg_time,delta,precision, aggregate_type)
# adding laplace noise to DFG freq
dfg_freq_new = add_laplace_noise_freq(dfg_freq, epsilon_freq)
# adding laplace noise to DFG time
dfg_time, dfg_time_new = add_laplace_noise_time(aggregate_type, dfg_time, epsilon_time)
# Calculate earth moving distance
emd_freq=earth_mover_dist(dfg_freq,dfg_freq_new)
emd_time=earth_mover_dist(dfg_time,dfg_time_new)
#calculating the APE, MAPE, and SMAPE
MAPE_freq, SMAPE_freq, APE_dist_freq,SMAPE_dist_freq=error_calculation(dfg_freq,dfg_freq_new)
MAPE_time, SMAPE_time, APE_dist_time, SMAPE_dist_time = error_calculation(dfg_time,dfg_time_new)
# return dfg_freq_new, dfg_time_new, epsilon_freq,epsilon_time, emd_freq, emd_time, percent_freq,percent_time,percent_freq_dist,percent_time_dist
return dfg_freq_new, dfg_time_new, epsilon_freq,epsilon_time, MAPE_freq, SMAPE_freq, APE_dist_freq, MAPE_time, SMAPE_time, APE_dist_time, SMAPE_dist_freq, SMAPE_dist_time
| 14,094
|
def get_all_values(string: str) -> Iterable[int]:
"""Return all kinds of candidates, with ordering: Dec, Hex, Oct, Bin."""
if string.startswith('0x'):
return filter(bool, [parse_hex(string[2:])]) # type: ignore[list-item]
if string.startswith('0o'):
return filter(bool, [parse_oct(string[2:])]) # type: ignore[list-item]
if string.startswith('0b'):
return filter(bool, [parse_bin(string[2:])]) # type: ignore[list-item]
# try each base when no prefix
return Counter(filter(bool, map(lambda f: f(string), # type: ignore[arg-type,return-value]
[parse_dec, parse_hex, parse_oct, parse_bin])))
| 14,095
|
def svn_auth_get_simple_provider(*args):
"""svn_auth_get_simple_provider(apr_pool_t pool)"""
return _core.svn_auth_get_simple_provider(*args)
| 14,096
|
def preferred_language():
""" It just returns first language from acceptable
"""
return acceptable_languages()[0]
| 14,097
|
def get_frequencies(trial = 1):
"""
get frequency lists
"""
if trial =="run_fast_publish":
lb_targ, ub_targ, obs_hz = 340, 350, 10
elif trial == 1:
lb_targ, ub_targ, obs_hz = 210, 560, int(320 / 2)
elif trial == 2:
lb_targ, ub_targ, obs_hz = 340, 640, 280
elif trial == 3:
lb_targ, ub_targ, obs_hz = 340, 350, 20#40
elif trial == 4:
lb_targ, ub_targ, obs_hz = 60, 350, 40
elif trial == 5:
lb_targ, ub_targ, obs_hz = 50, 200, 40
if trial == 6:
lb_targ, ub_targ, obs_hz = 130, 530, 130
if trial == 7:
lb_targ, ub_targ, obs_hz = 500, 900, 250
obs_list = list( range( lb_targ - obs_hz, lb_targ))
obs_list += list( range( ub_targ, ub_targ + obs_hz))
resp_list = list( range( lb_targ, ub_targ))
return obs_list, resp_list
| 14,098
|
def load_csv(file_path: str = None, clean: bool = True) -> pd.DataFrame:
"""Load the dataset CSV file.
Args:
file_path (str, optional): Path to CSV file. Can be omitted, to load the default dataset. Defaults to None.
drop (list[str], optional): Optionally supply a list of column names to drop. Defaults to None.
Returns:
pd.DataFrame: A Pandas dataframe representing the dataset.
"""
if file_path is None:
file_path = get_path()
if not path.exists(file_path):
_generate_minimized(file_path)
df = pd.DataFrame(pd.read_csv(file_path))
if "datetime" in df.columns:
df.index = pd.to_datetime(df.pop("datetime"))
if clean:
df = _clean_data(df)
return df
| 14,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.