content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def new_vk_group(update: Update, context: CallbackContext):
"""Добавляет группу ВК, если включен соответствующий режим."""
with models.session_scope() as session:
chat: models.Chat = session.query(models.Chat).get(update.effective_chat.id)
if chat.state != models.ChatState.VK_CONFIG:
return
invalid_links = []
valid_groups = []
for line in update.message.text.splitlines():
match = vk_client.VK_GROUP_REGEX.match(line)
if not match:
invalid_links.append(line)
continue
short_name = match.group(1)
log.debug(f'Got group {short_name}')
try:
response = vk_client.vk.groups.getById(group_id=short_name, fields='id',
version=vk_client.VK_VER)
except (vk_api.ApiError, vk_api.ApiHttpError) as e:
log.debug(f'...but test request failed', exc_info=True)
invalid_links.append(line)
continue
raw_group = response[0]
log.debug(f'Got VK response: {raw_group}')
group = models.get_or_create(
session,
models.VkGroup,
url_name=raw_group['screen_name'],
name=raw_group['name']
)
valid_groups.append(group)
chat.vk_groups.extend(valid_groups)
msg = 'Для остановки нажмите /abort\n'
if valid_groups:
msg += '<b>Добавлено:</b>\n'
for entry in valid_groups:
msg += str(entry) + '\n'
if invalid_links:
msg += '<b>Не добавлено:</b>\n'
for entry in invalid_links:
msg += entry + '\n'
update.message.reply_text(msg, parse_mode=ParseMode.HTML) | 33,900 |
def to_short_site_cname(user, site):
"""
订阅源显示名称,最多 10 个汉字,支持用户自定义名称
"""
if isinstance(site, dict):
site_id = site['id']
site_cname = site['cname']
else:
site_id = site.id
site_cname = site.cname
if user:
cname = get_user_site_cname(user.oauth_id, site_id)
if cname:
return cut_to_short(cname, 20)
return cut_to_short(site_cname, 20) | 33,901 |
def load_roed_data(full_output=False):
""" Load master table with all labels """
mtab = load_master_table()
df1 = table.Table.read("roed14_stars.fits").to_pandas()
def renamer(x):
""" match master table Star with Name """
x = x.strip()
if x.startswith("BD") or x.startswith("CD"): return x.replace(" ","_")
return x.replace(" ","")
df1.index = map(renamer, df1["Name"])
star_labels = ["Teff", "logg", "Vt", "__M_H_"] # Should include some other columns eventually
# to test how individual stars might affect things
star_labels += ["SN3950","SN4550","SN5200","SN6750","Cl"]
for label in star_labels:
mtab.add_column(mtab.Column(df1.loc[mtab["Star"]][label], label))
mtab.rename_column("__M_H_", "[M/H]")
# Set up abundances
df2 = table.Table.read("roed14_abunds.fits").to_pandas()
df2["Name"] = map(renamer, df2["Name"])
df2.loc[:,"log_e_"][df2["l_log_e_"] == "<"] = np.nan
df2.loc[:,"__X_Fe_"][df2["l_log_e_"] == "<"] = np.nan
all_ions = np.unique(df2["Ion"])
groups = df2.groupby("Ion")
_abunds = []
for ion in all_ions:
tdf = groups.get_group(ion)
xfe = pd.Series(tdf["__X_Fe_"], name=ion.strip(), copy=True)
xfe.index = tdf["Name"]
_abunds.append(xfe)
abunds = pd.DataFrame(_abunds).transpose()
for ion in all_ions:
ion = ion.strip()
if "(" in ion and ")" in ion:
newname = ion.split("(")[1][:-1]
assert len(newname) == 2, newname
newname = "[{}/Fe]".format(newname)
elif "Fe " in ion:
newname = "[{}/H]".format(ion)
else:
newname = "[{}/Fe]".format(ion)
mtab.add_column(mtab.Column(abunds.loc[mtab["Star"]][ion], name=newname, copy=True))
if full_output:
return mtab, df1, abunds
return mtab | 33,902 |
def calculate_distance(p1, p2):
"""
Calculate distance between two points
param p1: tuple (x,y) point1
param p2: tuple (x,y) point2
return: distance between two points
"""
x1, y1 = p1
x2, y2 = p2
d = math.sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))
return d | 33,903 |
def get_multi_user_timeline(data: dict) -> None:
"""
:param data: the full output of get_user_timeline, {uid: {table: df}}
"""
uid2name = {u: data[u]['users']["display_name"][0] for u in data}
table_date = {t: d for t, d in zip(table_name, date_str)}
qid = []
qid2t = {}
raw = []
for uid, user_data in data.items():
for tname, table in user_data.items():
if tname == 'users':
continue
for _, i in table.iterrows():
raw_single = [i[table_date[tname]], tname, uid]
if tname == 'badges':
raw_single += [[i['name']]]
elif tname == 'comments':
raw_single += [[i['post_id']]]
qid.append(i['post_id'])
elif tname == 'posts_answers':
raw_single += [[i['parent_id']]]
qid.append(i['parent_id'])
elif tname == 'posts_questions':
raw_single += [[i['id']]]
qid2t[i['id']] = i['title']
raw.append(raw_single)
if len(qid) > 0:
qid2t.update(qid2title(qid))
raw = sorted(raw)[::-1]
stack = []
for info in raw:
if len(stack) == 0:
stack.append(info)
continue
tm1, tname1, uid1, info1 = info
tm0, tname0, uid0, info0 = stack[-1]
if tm1 - tm0 >= pd.Timedelta(days=2) or tname1 != tname0 or uid1 != uid0:
stack.append(info)
continue
# merge
stack[-1] = [tm1, tname0, uid0, info0 + info1]
# build html
template = """
<div class="card border-light">
<div class="card-body">
<h5 class="card-title"><i class="fa ICON" aria-hidden="true"></i> USERNAME</h5>
<p class="card-text">TEXT</p>
<p class="card-text text-right"><small class="text-muted">TIME</small></p>
</div>
</div>
<div class="container py-1"></div>"""
content = ''
for info in stack:
tm, tname, uid, info = info
username = f'<a href="https://stackoverflow.com/users/{uid}/" target="_blank">{uid2name[uid]}</a>'
if tname == 'badges':
icon = 'fa-check-square-o'
info = 'Your friend got ' + ', '.join([f'{v}x {k} badge{"s" if v > 1 else ""}' for k, v in Counter(info).items()]) + ', congratulations!'
elif tname == 'comments':
icon = 'fa-commenting-o'
info = 'Your friend posted ' + ', '.join([f'{v} comment{"s" if v > 1 else ""} in <a href="https://stackoverflow.com/questions/{k}" target="_blank">{convert(qid2t.get(k, "this question"))}</a>' for k, v in Counter(info).items()]) + '.'
elif tname == 'posts_answers':
icon = 'fa-align-left'
info = 'Your friend posted ' + ', '.join([f'{v} answer{"s" if v > 1 else ""} in <a href="https://stackoverflow.com/questions/{k}" target="_blank">{convert(qid2t.get(k, "this question"))}</a>' for k, v in Counter(info).items()]) + '.'
elif tname == 'posts_questions':
icon = 'fa-question-circle-o'
info = f'Your friend posted {len(info)} question{"s" if len(info) > 1 else ""}: ' + ', '.join([f'<a href="https://stackoverflow.com/questions/{k}" target="_blank">{convert(qid2t[k])}</a>' for k in info]) + '.'
content += template.replace('ICON', icon).replace('USERNAME', username).replace(
'TIME', tm.strftime("%Y/%m/%d")).replace('TEXT', info)
st.components.v1.html("""
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css" integrity="sha384-wvfXpqpZZVQGK6TAh5PVlGOfQNHSoD2xbE+QkPxCAFlNEevoEH3Sl0sibVcOQVnN" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" crossorigin="anonymous"></script>
<style>
.card{border-radius: 4px;background: #fff;box-shadow: 0 6px 10px rgba(0,0,0,.08), 0 0 6px rgba(0,0,0,.05);transition: .3s transform cubic-bezier(.155,1.105,.295,1.12),.3s box-shadow,.3s -webkit-transform cubic-bezier(.155,1.105,.295,1.12);padding: 14px 80px 18px 36px;cursor: pointer;}
.card:hover{transform: scale(1.05);box-shadow: 0 10px 20px rgba(0,0,0,.12), 0 4px 8px rgba(0,0,0,.06);}
</style>
<div class="text-center py-5"><button type="button" class="btn btn-primary" data-toggle="modal" data-target=".bd-example-modal-lg">Launch App!</button></div>
<div class="modal fade bd-example-modal-lg" tabindex="-1" role="dialog" aria-labelledby="myLargeModalLabel" aria-hidden="true">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="mx-5 py-5">
<div class="card border-light">
<div class="card-body">
<h3 class="card-text">Social Overflow (friends' page)</h3>
</div>
</div>
<div class="container py-1"></div>
CONTENT
<div class="text-center py-1"><small class="text-muted">No more result</small></div>
</div>
</div>
</div>
</div>
""".replace('CONTENT', content), height=900, scrolling=True) | 33,904 |
def statistics(request, network):
""" some nice statistics for the whole pool """
# some basic statistics
days = 1
current_height, all_blocks, pool_blocks, pool_blocks_percent, bbp_mined = get_basic_statistics(network, days)
miners_count = get_miner_count(network, days)
graph_days = 7
top_miners = get_top_miners(network)
# the solution and block statistics
share_statistics = get_solution_statistics(network, days=graph_days)
block_statistics = list(get_block_statistics(network, days=graph_days))
statistics = []
# now we join the statistics
for i, share_stat in enumerate(list(share_statistics)):
statistics.append([share_stat[0], share_stat[1], block_statistics[i][1]])
# and finally the forecast for the blocks
blocks_two_days = statistics[-2][2] + statistics[-1][2]
blocks_per_hour = blocks_two_days / (24 + timezone.now().hour)
forecast_blocks = int(round(blocks_per_hour * 24))
last_blocks = Block.objects.filter(network=network, pool_block=True).values('height', 'inserted_at').order_by('-height')[0:50]
return render(request, 'purepool/statistics.html', {
'network': network,
'statistics': statistics,
'top_miners': top_miners,
'last_blocks': last_blocks,
'forecast_blocks': forecast_blocks,
'days': days,
'current_height': current_height,
'all_blocks': all_blocks,
'pool_blocks': pool_blocks,
'pool_blocks_percent': pool_blocks_percent,
'bbp_mined': bbp_mined,
'miners_count': miners_count,
}) | 33,905 |
def sim_log(dlevel, env, caller, action, affected):
"""
Parameters
----------
dlevel= int -- debug level
env= simpy.Environment
caller= string -- name of the sim component acting
action= string
affected= any -- whatever component being acted on/with e.g., packet
"""
if DEBUG_LEVEL <= dlevel:
print("{} t: {:.2f}] {} {}\n\t{}".format(debug_level__string_map[dlevel], env.now, caller, action, affected) ) | 33,906 |
def klSigmode(self):
"""查找模式"""
if self.mode == 'deal':
self.canvas.updateSig(self.signalsOpen)
self.mode = 'dealOpen'
else:
self.canvas.updateSig(self.signals)
self.mode = 'deal' | 33,907 |
def save_sig(sig, conf):
"""
save the signature into the database path under user_defined
"""
filename = "{}/{}.pasta".format(conf["config"]["penne_folders"]["user_defined"].format(HOME), random_string())
log.info("saving signature to user defined database under: {}".format(filename))
with open(filename, "wb") as file_:
file_.write("pastadb:{}".format(sig).encode()) | 33,908 |
def package_load_instructions(inst_distributions):
"""Load instructions, displayed in the package notes"""
per_package_inst = ''
for dist in inst_distributions:
if dist.type == 'zip':
per_package_inst += dedent(
"""
# Loading the ZIP Package
Zip packages are compressed, so large resources may load faster.
import metapack as mp
pkg = mp.open_package('{url}')
""".format(url=dist.package_url.inner))
elif dist.type == 'csv':
per_package_inst += dedent(
"""
# Loading the CSV Package
CSV packages load resources individually, so small resources may load faster.
import metapack as mp
pkg = mp.open_package('{url}')
""".format(url=dist.package_url.inner))
if per_package_inst:
return '\n---\n'+per_package_inst
else:
return '' | 33,909 |
def specialize_transform(graph, args):
"""Specialize on provided non-None args.
Parameters that are specialized on are removed.
"""
mng = graph.manager
graph = transformable_clone(graph, relation=f'sp')
mng.add_graph(graph)
for p, arg in zip(graph.parameters, args):
if arg is not None:
mng.replace(p, Constant(arg))
new_parameters = [p for p, arg in zip(graph.parameters, args)
if arg is None]
mng.set_parameters(graph, new_parameters)
return graph | 33,910 |
def Multiplication(k):
"""
Generate a function that performs a polynomial multiplication and return coefficients up to degree k
"""
assert isinstance(k, int) and k > 0
def isum(factors):
init = next(factors)
return reduce(operator.iadd, factors, init)
def mul_function(x1, x2):
# prepare indices for convolution
l1, l2 = len(x1), len(x2)
M = min(k + 1, l1 + l2 - 1)
indices = [[] for _ in range(M)]
for (i, j) in itertools.product(range(l1), range(l2)):
if i + j >= M:
continue
indices[i + j].append((i, j))
# wrap with log-tensors for stability
X1 = [LogTensor(x1[i]) for i in range(l1)]
X2 = [LogTensor(x2[i]) for i in range(l2)]
# perform convolution
coeff = []
for c in range(M):
coeff.append(isum(X1[i] * X2[j] for (i, j) in indices[c]).torch())
return coeff
return mul_function | 33,911 |
def safe_divide(a, b):
"""
Avoid divide by zero
http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero
"""
with np.errstate(divide='ignore', invalid='ignore'):
c = np.true_divide(a, b)
c[c == np.inf] = 0
c = np.nan_to_num(c)
return c | 33,912 |
def report_and_plot():
"""
Scenario: do some reporting, or fine-tuning of analysis --> finding the right parameters
:return:
"""
analyzer = get_default_tk(context)
analyzer.load_analysis()
analyzer.frames[89].report() | 33,913 |
def plot_lc(data=None, model=None, bands=None, zp=25., zpsys='ab', pulls=True,
xfigsize=None, yfigsize=None, figtext=None, model_label=None,
errors=None, ncol=2, figtextsize=1., show_model_params=True,
tighten_ylim=False, fname=None, **kwargs):
"""Plot light curve data or model light curves.
Parameters
----------
data : astropy `~astropy.table.Table` or similar
Table of photometric data points.
model : `~sncosmo.Model` or list thereof, optional
If given, model light curve is plotted. If a string, the corresponding
model is fetched from the registry. If a list or tuple of
`~sncosmo.Model`, multiple models are plotted.
model_label : str or list, optional
If given, model(s) will be labeled in a legend in the upper left
subplot. Must be same length as model.
errors : dict, optional
Uncertainty on model parameters. If given, along with exactly one
model, uncertainty will be displayed with model parameters at the top
of the figure.
bands : list, optional
List of Bandpasses, or names thereof, to plot.
zp : float, optional
Zeropoint to normalize the flux. Default is 25.
zpsys : str, optional
Zeropoint system for ``zp``. Default is ``'ab'``.
pulls : bool, optional
If True (and if model and data are given), plot pulls. Default is
``True``.
figtext : str, optional
Text to add to top of figure. If a list of strings, each item is
placed in a separate "column". Use newline separators for multiple
lines.
ncol : int, optional
Number of columns of axes. Default is 2.
xfigsize, yfigsize : float, optional
figure size in inches in x or y. Specify one or the other, not both.
Default is to set axes panel size to 3.0 x 2.25 inches.
figtextsize : float, optional
Space to reserve at top of figure for figtext (if not None).
Default is 1 inch.
show_model_params : bool, optional
If there is exactly one model plotted, the parameters of the model
are added to ``figtext`` by default (as two additional columns) so
that they are printed at the top of the figure. Set this to False to
disable this behavior.
tighten_ylim : bool, optional
If true, tighten the y limits so that the model is visible (if any
models are plotted).
fname : str, optional
Filename to pass to savefig. If `None` (default), figure is returned.
kwargs : optional
Any additional keyword args are passed to `~matplotlib.pyplot.savefig`.
Popular options include ``dpi``, ``format``, ``transparent``. See
matplotlib docs for full list.
Returns
-------
fig : matplotlib `~matplotlib.figure.Figure`
Only returned if `fname` is `None`. Display to screen with
``plt.show()`` or save with ``fig.savefig(filename)``. When creating
many figures, be sure to close with ``plt.close(fig)``.
Examples
--------
>>> import sncosmo
>>> import matplotlib.pyplot as plt
Load some example data:
>>> data = sncosmo.load_example_data()
Plot the data, displaying to the screen:
>>> fig = plot_lc(data)
>>> plt.show() # doctest: +SKIP
Plot a model along with the data:
>>> model = sncosmo.Model('salt2') # doctest: +SKIP
>>> model.set(z=0.5, c=0.2, t0=55100., x0=1.547e-5) # doctest: +SKIP
>>> sncosmo.plot_lc(data, model=model) # doctest: +SKIP
.. image:: /pyplots/plotlc_example.png
Plot just the model, for selected bands:
>>> sncosmo.plot_lc(model=model, # doctest: +SKIP
... bands=['sdssg', 'sdssr']) # doctest: +SKIP
Plot figures on a multipage pdf:
>>> from matplotlib.backends.backend_pdf import PdfPages # doctest: +SKIP
>>> pp = PdfPages('output.pdf') # doctest: +SKIP
...
>>> # Do the following as many times as you like:
>>> sncosmo.plot_lc(data, fname=pp, format='pdf') # doctest: +SKIP
...
>>> # Don't forget to close at the end:
>>> pp.close() # doctest: +SKIP
"""
if data is None and model is None:
raise ValueError('must specify at least one of: data, model')
if data is None and bands is None:
raise ValueError('must specify bands to plot for model(s)')
# Get the model(s).
if model is None:
models = []
elif isinstance(model, (tuple, list)):
models = model
else:
models = [model]
if not all([isinstance(m, Model) for m in models]):
raise TypeError('model(s) must be Model instance(s)')
# Get the model labels
if model_label is None:
model_labels = [None] * len(models)
elif isinstance(model_label, basestring):
model_labels = [model_label]
else:
model_labels = model_label
if len(model_labels) != len(models):
raise ValueError('if given, length of model_label must match '
'that of model')
# Standardize and normalize data.
if data is not None:
data = standardize_data(data)
data = normalize_data(data, zp=zp, zpsys=zpsys)
# Bands to plot
if data is None:
bands = set(bands)
elif bands is None:
bands = set(data['band'])
else:
bands = set(data['band']) & set(bands)
# Build figtext (including model parameters, if there is exactly 1 model).
if errors is None:
errors = {}
if figtext is None:
figtext = []
elif isinstance(figtext, basestring):
figtext = [figtext]
if len(models) == 1 and show_model_params:
model = models[0]
lines = []
for i in range(len(model.param_names)):
name = model.param_names[i]
lname = model.param_names_latex[i]
v = format_value(model.parameters[i], errors.get(name), latex=True)
lines.append('${0} = {1}$'.format(lname, v))
# Split lines into two columns.
n = len(model.param_names) - len(model.param_names) // 2
figtext.append('\n'.join(lines[:n]))
figtext.append('\n'.join(lines[n:]))
if len(figtext) == 0:
figtextsize = 0.
# Calculate layout of figure (columns, rows, figure size). We have to
# calculate these explicitly because plt.tight_layout() doesn't space the
# subplots as we'd like them when only some of them have xlabels/xticks.
wspace = 0.6 # All in inches.
hspace = 0.3
lspace = 1.0
bspace = 0.7
trspace = 0.2
nrow = (len(bands) - 1) // ncol + 1
if xfigsize is None and yfigsize is None:
hpanel = 2.25
wpanel = 3.
elif xfigsize is None:
hpanel = (yfigsize - figtextsize - bspace - trspace -
hspace * (nrow - 1)) / nrow
wpanel = hpanel * 3. / 2.25
elif yfigsize is None:
wpanel = (xfigsize - lspace - trspace - wspace * (ncol - 1)) / ncol
hpanel = wpanel * 2.25 / 3.
else:
raise ValueError('cannot specify both xfigsize and yfigsize')
figsize = (lspace + wpanel * ncol + wspace * (ncol - 1) + trspace,
bspace + hpanel * nrow + hspace * (nrow - 1) + trspace +
figtextsize)
# Create the figure and axes.
fig, axes = plt.subplots(nrow, ncol, figsize=figsize, squeeze=False)
fig.subplots_adjust(left=lspace / figsize[0],
bottom=bspace / figsize[1],
right=1. - trspace / figsize[0],
top=1. - (figtextsize + trspace) / figsize[1],
wspace=wspace / wpanel,
hspace=hspace / hpanel)
# Write figtext at the top of the figure.
for i, coltext in enumerate(figtext):
if coltext is not None:
xpos = (trspace / figsize[0] +
(1. - 2.*trspace/figsize[0]) * (i/len(figtext)))
ypos = 1. - trspace / figsize[1]
fig.text(xpos, ypos, coltext, va="top", ha="left",
multialignment="left")
# If there is exactly one model, offset the time axis by the model's t0.
if len(models) == 1 and data is not None:
toff = models[0].parameters[1]
else:
toff = 0.
# Global min and max of time axis.
tmin, tmax = [], []
if data is not None:
tmin.append(np.min(data['time']) - 10.)
tmax.append(np.max(data['time']) + 10.)
for model in models:
tmin.append(model.mintime())
tmax.append(model.maxtime())
tmin = min(tmin)
tmax = max(tmax)
tgrid = np.linspace(tmin, tmax, int(tmax - tmin) + 1)
# Add
# Loop over bands
bands = list(bands)
waves = [get_bandpass(b).wave_eff for b in bands]
waves_and_bands = sorted(zip(waves, bands))
for axnum in xrange(ncol * nrow):
row = axnum // ncol
col = axnum % ncol
ax = axes[row, col]
if axnum >= len(waves_and_bands):
ax.set_visible(False)
ax.set_frame_on(False)
continue
wave, band = waves_and_bands[axnum]
bandname_coords = (0.92, 0.92)
bandname_ha = 'right'
color = _cmap((_cmap_wavelims[1] - wave) /
(_cmap_wavelims[1] - _cmap_wavelims[0]))
# Plot data if there are any.
if data is not None:
mask = data['band'] == band
time = data['time'][mask]
flux = data['flux'][mask]
fluxerr = data['fluxerr'][mask]
ax.errorbar(time - toff, flux, fluxerr, ls='None',
color=color, marker='.', markersize=3.)
# Plot model(s) if there are any.
lines = []
labels = []
mflux_ranges = []
for i, model in enumerate(models):
if model.bandoverlap(band):
mflux = model.bandflux(band, tgrid, zp=zp, zpsys=zpsys)
mflux_ranges.append((mflux.min(), mflux.max()))
l, = ax.plot(tgrid - toff, mflux,
ls=_model_ls[i%len(_model_ls)],
marker='None', color=color)
lines.append(l)
else:
# Add a dummy line so the legend displays all models in the
# first panel.
lines.append(plt.Line2D([0, 1], [0, 1],
ls=_model_ls[i%len(_model_ls)],
marker='None', color=color))
labels.append(model_labels[i])
# Add a legend, if this is the first axes and there are two
# or more models to distinguish between.
if row == 0 and col == 0 and model_label is not None:
leg = ax.legend(lines, labels, loc='upper right',
fontsize='small', frameon=True)
bandname_coords = (0.08, 0.92) # Move bandname to upper left
bandname_ha = 'left'
# Band name in corner
ax.text(bandname_coords[0], bandname_coords[1], band,
color='k', ha=bandname_ha, va='top', transform=ax.transAxes)
ax.axhline(y=0., ls='--', c='k') # horizontal line at flux = 0.
ax.set_xlim((tmin-toff, tmax-toff))
# If we plotted any models, narrow axes limits so that the model
# is visible.
if tighten_ylim and len(mflux_ranges) > 0:
mfluxmin = min([r[0] for r in mflux_ranges])
mfluxmax = max([r[1] for r in mflux_ranges])
ymin, ymax = ax.get_ylim()
ymax = min(ymax, 4. * mfluxmax)
ymin = max(ymin, mfluxmin - (ymax - mfluxmax))
ax.set_ylim(ymin, ymax)
if col == 0:
ax.set_ylabel('flux ($ZP_{{{0}}} = {1}$)'
.format(get_magsystem(zpsys).name.upper(), zp))
show_pulls = (pulls and
data is not None and
len(models) == 1 and models[0].bandoverlap(band))
# steal part of the axes and plot pulls
if show_pulls:
divider = make_axes_locatable(ax)
axpulls = divider.append_axes('bottom', size='30%' , pad=0.15,
sharex=ax)
mflux = models[0].bandflux(band, time, zp=zp, zpsys=zpsys)
fluxpulls = (flux - mflux) / fluxerr
axpulls.axhspan(ymin=-1., ymax=1., color='0.95')
axpulls.axhline(y=0., color=color)
axpulls.plot(time - toff, fluxpulls, marker='.',
markersize=5., color=color, ls='None')
# Ensure y range is centered at 0.
ymin, ymax = axpulls.get_ylim()
absymax = max(abs(ymin), abs(ymax))
axpulls.set_ylim((-absymax, absymax))
# Set x limits to global values.
axpulls.set_xlim((tmin-toff, tmax-toff))
# Set small number of y ticks so tick labels don't overlap.
axpulls.yaxis.set_major_locator(MaxNLocator(5))
# Label the y axis and make sure ylabels align between axes.
if col == 0:
axpulls.set_ylabel('pull')
axpulls.yaxis.set_label_coords(-0.75 * lspace / wpanel, 0.5)
ax.yaxis.set_label_coords(-0.75 * lspace / wpanel, 0.5)
# Set top axis ticks invisible
for l in ax.get_xticklabels():
l.set_visible(False)
# Set ax to axpulls in order to adjust plots.
bottomax = axpulls
else:
bottomax = ax
# If this axes is one of the last `ncol`, set x label and rotate
# tick labels. Otherwise don't show tick labels.
if (len(bands) - axnum - 1) < ncol:
if toff == 0.:
bottomax.set_xlabel('time')
else:
bottomax.set_xlabel('time - {0:.2f}'.format(toff))
#for l in bottomax.get_xticklabels():
# l.set_rotation(22.5)
else:
for l in bottomax.get_xticklabels():
l.set_visible(False)
if fname is None:
return fig
plt.savefig(fname, **kwargs)
plt.close() | 33,914 |
def read_image(filepath, gray=False):
"""
read image
:param filepath:
:param gray:
:return:
"""
if gray:
return cv2.cvtColor(cv2.imread(filepath), cv2.COLOR_BGR2GRAY)
else:
return cv2.cvtColor(cv2.imread(filepath), cv2.COLOR_BGR2RGB) | 33,915 |
def mixin_hub_pull_parser(parser):
"""Add the arguments for hub pull to the parser
:param parser: the parser configure
"""
def hub_uri(uri: str) -> str:
from ...hubble.helper import parse_hub_uri
parse_hub_uri(uri)
return uri
parser.add_argument(
'uri',
type=hub_uri,
help='The URI of the executor to pull (e.g., jinahub[+docker]://NAME)',
)
mixin_hub_pull_options_parser(parser) | 33,916 |
def status(ctx, branch, repo):
"""Return the status of the given branch of each project in each CI."""
ctx.obj = Config()
if ctx.invoked_subcommand is None:
ctx.invoke(travis, branch=branch, repo=repo)
ctx.invoke(circle, branch=branch, repo=repo)
ctx.invoke(appveyor, branch=branch, repo=repo)
ctx.invoke(buddy, branch=branch, repo=repo)
ctx.invoke(drone, branch=branch, repo=repo)
pass | 33,917 |
def load_config() -> Tuple[List, List]:
"""Get configuration from config file.
Returns repo_paths and bare_repo_dicts.
"""
if config_file.exists():
with open(config_file, "r") as ymlfile:
config = yaml.load(ymlfile, Loader=yaml.Loader)
repo_paths = flatten_list(
[expand_path(i) for i in config.get('repo_paths', [])]
)
bare_repo_dicts: List[Dict] = config.get('bare_repos', [])
bare_repo: Dict[str, str]
for bare_repo in bare_repo_dicts:
bare_repo['git_dir'] = expand_path(bare_repo['git_dir'])[0]
bare_repo['work_tree'] = expand_path(bare_repo['work_tree'])[0]
else:
repo_paths = []
bare_repo_dicts = []
return repo_paths, bare_repo_dicts | 33,918 |
def deindented_source(src):
"""De-indent source if all lines indented.
This is necessary before parsing with ast.parse to avoid "unexpected
indent" syntax errors if the function is not module-scope in its
original implementation (e.g., staticmethods encapsulated in classes).
Parameters
----------
src : str
input source
Returns
-------
str :
de-indented source; the first character of at least one line is
non-whitespace, and all other lines are deindented by the same
"""
lines = src.splitlines()
n_chars = float("inf")
for line in lines:
len_line = len(line)
idx = 0
# we're Python 3, so we assume you're not mixing tabs and spaces
while idx < n_chars and idx < len_line and line[idx] in [" ", '\t']:
idx += 1
if len_line > idx:
n_chars = min(idx, n_chars)
lines = [line[n_chars:] for line in lines]
src = "\n".join(lines)
return src | 33,919 |
def test_read_csv_file_handle(all_parsers, io_class, encoding):
"""
Test whether read_csv does not close user-provided file handles.
GH 36980
"""
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
content = "a,b\n1,2"
handle = io_class(content.encode("utf-8") if io_class == BytesIO else content)
tm.assert_frame_equal(parser.read_csv(handle, encoding=encoding), expected)
assert not handle.closed | 33,920 |
def test_parquet_conversion(datadir_mgr):
"""Test parquet-to-TSV conversion."""
with datadir_mgr.in_tmp_dir(
inpathlist=[TSV_TEST_FILE],
save_outputs=False,
):
args = ["-q", "-e", SUBCOMMAND, "-w", TSV_TEST_FILE]
print(f"azulejo {' '.join(args)}")
try:
azulejo(
args,
_out=sys.stdout,
)
except sh.ErrorReturnCode as errors:
print(errors)
try:
azulejo(["-q", "-e", SUBCOMMAND, "-w", TSV_TEST_FILE])
except sh.ErrorReturnCode as errors:
print(errors)
pytest.fail("Parquet-to-TSV conversion failed")
assert Path(TSV_OUTPUT_FILE).exists() | 33,921 |
def home():
"""Render the home page."""
form = SearchForm()
search_results = None
if form.validate_on_submit():
search_term = form.username.data
cur = conn.cursor()
cur.execute(f"SELECT * FROM student WHERE name = '{search_term}';")
search_results = cur.fetchall()
cur.close()
return render_template(
"home.html", form=form, search_results=search_results) | 33,922 |
def capacity():
"""
Returns the raw capacity of the filesystem
Returns:
filesystem capacity (int)
"""
return hdfs.capacity() | 33,923 |
async def deploy(current_user: User = Depends(auth.get_current_user)):
""" This function is used to deploy the model of the currently trained chatbot """
response = mongo_processor.deploy_model(bot=current_user.get_bot(), user=current_user.get_user())
return {"message": response} | 33,924 |
def test_logic():
""" Test logic on known input """
# -------- TRIANGULAR --------
# Input
input_2d = np.random.normal(size=(1000, 20))
labels = np.random.randint(low=0, high=2, size=1000)
# Create model
model = tf.keras.Sequential([tf.keras.layers.Input(shape=(20,)),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)])
model.compile(optimizer=tf.keras.optimizers.SGD(), loss='binary_crossentropy')
# Create callback
clr = CyclicLearningRateCallback()
expected_lr_values = list(np.linspace(0.001, 0.006, 2000))
expected_lr_values = expected_lr_values + expected_lr_values[::-1] # Triangle
# Run model
model.fit(input_2d, labels, batch_size=10, epochs=5, callbacks=[clr], verbose=0)
assert all(math.isclose(a, b, rel_tol=0.001) for a, b in zip(clr.history['lr'], expected_lr_values))
# -------- TRIANGULAR2 --------
# Create model
model = tf.keras.Sequential([tf.keras.layers.Input(shape=(20,)),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)])
model.compile(optimizer=tf.keras.optimizers.SGD(), loss='binary_crossentropy')
clr = CyclicLearningRateCallback(scale_scheme='triangular2')
model.fit(input_2d, labels, batch_size=10, epochs=5, callbacks=[clr], verbose=0)
# -------- EXPONENT RANGE --------
# Create model
model = tf.keras.Sequential([tf.keras.layers.Input(shape=(20,)),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)])
model.compile(optimizer=tf.keras.optimizers.SGD(), loss='binary_crossentropy')
clr = CyclicLearningRateCallback(scale_scheme='exp_range')
model.fit(input_2d, labels, batch_size=10, epochs=5, callbacks=[clr], verbose=0)
# -------- CUSTOM SCALING --------
# Create model
model = tf.keras.Sequential([tf.keras.layers.Input(shape=(20,)),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)])
model.compile(optimizer=tf.keras.optimizers.SGD(), loss='binary_crossentropy')
scale_fn = lambda x: 0.5 * (1 + np.sin(x * np.pi / 2.))
clr = CyclicLearningRateCallback(scale_mode='cycle', scale_fn=scale_fn)
model.fit(input_2d, labels, batch_size=10, epochs=5, callbacks=[clr], verbose=0) | 33,925 |
def find_rocks(img,rgb_thresh=(100, 100, 60)):
""" Find rock in given image frame"""
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all three threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
above_thresh = (img[:,:,0] > rgb_thresh[0]) \
& (img[:,:,1] > rgb_thresh[1]) \
& (img[:,:,2] < rgb_thresh[2])
# Index the array of zeros with the boolean array and set to 1
color_select[above_thresh] = 1
return color_select | 33,926 |
def assert_same_structure_up_to(shallow_nest, deep_nest):
"""(C++)
Asserts that ``deep_nest`` has same structure as ``shallow_nest`` up the
depths of ``shallow_nest``. Every sub-nest of each of ``nests`` beyond the
depth of the corresponding sub-nest in ``shallow_nest`` will be treated as a
leaf.
Examples:
.. code-block:: python
assert_same_structure_up_to(([2], None), ([1], [1, 2, 3]))
# success
assert_same_structure_up_to(([2], []), ([1], [1, 2, 3]))
# failure
Args:
shallow_nest (nest): a shallow nested structure.
deep_nest (nest): a variable length of nested structures.
"""
try:
cnest.map_structure_up_to(shallow_nest, lambda _: None, deep_nest)
except Exception as e:
logging.error(
"assert_same_structure_up_to() fails for {} and {}. Error message: "
"'{}'".format(shallow_nest, deep_nest, str(e)))
raise e | 33,927 |
def test_ba_validation_right_size_valid_number_8_digits_and_second_digit_different_6_7_9():
"""Test if a valid number is really valid with 8 digits"""
valid_number = '74694200'
assert ba.start(valid_number) == True | 33,928 |
def maybe_start_with_home_prefix(p: Path) -> Path:
"""
If the input path starts with the home directory path string, then return
a path that starts with the home directory and points to the same location.
Otherwise, return the path unchanged.
"""
try:
return Path("~", p.relative_to(Path.home()))
except ValueError:
return p | 33,929 |
def parse_nrc_lexicon():
"""Extract National Resource Council Canada emotion lexicon from http://saifmohammad.com/WebPages/lexicons.html
Returns:
{str: [str]} A defaultdict of emotion to list of associated words
"""
emotion2words = defaultdict(list)
with open(NRC_LEXICON) as lexicon_file:
lexicon_file.__next__()
for line in lexicon_file:
word, emotion, associated = line.split()
if associated == '1':
emotion2words[emotion].append(word)
return emotion2words | 33,930 |
def rouge_2_fscore(predictions, labels, **unused_kwargs):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
# Convert the outputs and labels to a [batch_size, input_length] tensor.
outputs = tf.squeeze(outputs, axis=[-1, -2])
labels = tf.squeeze(labels, axis=[-1, -2])
rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0) | 33,931 |
def clear_log():
"""clear log file"""
if MODE == "debug":
log_line = f"start {str(datetime.today())} \n\n"
with open("temp/log.txt", "w", encoding="latin-1") as myfile:
myfile.write(log_line)
return () | 33,932 |
def enhancedFeatureExtractorDigit(datum):
"""
Your feature extraction playground.
You should return a util.Counter() of features
for this datum (datum is of type samples.Datum).
## DESCRIBE YOUR ENHANCED FEATURES HERE...
##
"""
features = basicFeatureExtractorDigit(datum)
"*** YOUR CODE HERE ***"
return features | 33,933 |
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes" : True, "y" : True, "ye": True, "no" : False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer:" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
break
elif choice in valid:
return valid[choice]
break
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | 33,934 |
def wipe_cache():
"""
if path is absolute, just remove the directory
if the path is relative, recursively look from current directory down
looking for matching paths. This can take a long time looking for
:return:
"""
cache_name = os.path.expanduser(CONFIG.get('jit', 'COMPILE_PATH'))
if os.path.isabs(cache_name):
if os.path.exists(cache_name):
result = shutil.rmtree(cache_name)
print("removed cache directory {} {}".format(
cache_name, result if result else ""))
exit(0)
splitted = cache_name.split(os.sep)
while splitted:
first = splitted[0]
if first == '.':
splitted.pop(0)
elif first == '..':
os.chdir('../')
splitted.pop(0)
else:
cache_name = os.sep.join(splitted)
break
wipe_queue = collections.deque([os.path.abspath(p) for p in os.listdir(os.getcwd())])
print("vhdl_vhdl_ctree looking for relative cache directories named {}, checking directories under this one".format(
cache_name))
while wipe_queue:
directory = wipe_queue.popleft()
if not os.path.isdir(directory):
continue
if os.path.split(directory)[-1] == cache_name:
shutil.rmtree(directory)
else:
#print("{} ".format(directory))
for sub_item in os.listdir(directory):
wipe_queue.append(os.path.join(directory, sub_item))
print() | 33,935 |
def tested_function(x):
"""
Testovana funkce
Da se sem napsat vselijaka cunarna
"""
freq = 1
damp_fac = 0.1
val = np.sin(freq * x)
damp = np.exp(-1 * damp_fac * abs(x))
return val * damp | 33,936 |
def populatePlaylists():
"""Populates the playlist tables with dummy data.
"""
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
user1 = User(email="kaan@kaan.ca", password="1234")
user2 = User(email="ryan@ryan.ca", password="1234")
user3 = User(email="paul@paul.ca", password="1234")
session.add(user1)
session.add(user2)
session.add(user3)
p1 = Playlist(name="Kaan's list", user_id=user1.id, user=user1)
p2 = Playlist(name="Ryan's list", user_id=user1.id, user=user2)
p3 = Playlist(name="Paul's list", user_id=user1.id, user=user3)
session.add(p1)
session.add(p2)
session.add(p3)
session.commit() | 33,937 |
def parse_nkpts(xml_file):
"""
Extract the number of kpoints used in the given xml file
"""
from masci_tools.util.xml.xml_getters import get_nkpts
xmltree, schema_dict = _load_xml_file(xml_file)
nkpts = get_nkpts(xmltree, schema_dict)
echo.echo_info(f'Number of k-points: {nkpts}') | 33,938 |
def parse_line(line):
"""
Parses a (non-comment) line of a GFF3 file. The attribute field is parsed into a dict.
:param line: line to parse as string
:return: dict with for each column (key) the corresponding value
"""
parts = line.strip().split('\t')
output = {}
if len(parts) != len(COLUMNS):
raise Exception('Incorrect number of columns in line.', parts, COLUMNS)
for key, value in zip(COLUMNS, parts):
if key == 'attributes':
output[key] = parse_attributes(value)
elif key == 'start' or key == 'stop':
output[key] = int(value)
else:
output[key] = value
return output | 33,939 |
def get_time_string(place: str = "Europe/Moscow"):
"""
Get time data from worldtimeapi.org and return simple string
Parameters
----------
place : str
Location, i.e. 'Europe/Moscow'.
Returns
-------
string
Time in format '%Y-%m-%d %H:%M:%S'
Examples
--------
>>> get_time_string()
2021-08-16 16:03:34
"""
url = "http://worldtimeapi.org/api/timezone/" + place
data = requests.get(url).json()
date = datetime.fromisoformat(data["datetime"])
string = date.strftime("%Y-%m-%d %H:%M:%S")
return string | 33,940 |
def new_worker_qthread(
Worker: Type[WorkerProtocol],
*args,
_start_thread: bool = False,
_connect: Dict[str, Callable] = None,
**kwargs,
):
"""This is a convenience function to start a worker in a Qthread.
In most cases, the @thread_worker decorator is sufficient and preferable.
But this allows the user to completely customize the Worker object.
However, they must then maintain control over the thread and clean up
appropriately.
It follows the pattern described here:
https://www.qt.io/blog/2010/06/17/youre-doing-it-wrong
and
https://doc.qt.io/qt-5/qthread.html#details
see also:
https://mayaposch.wordpress.com/2011/11/01/how-to-really-truly-use-qthreads-the-full-explanation/
A QThread object is not a thread! It should be thought of as a class to
*manage* a thread, not as the actual code or object that runs in that
thread. The QThread object is created on the main thread and lives there.
Worker objects which derive from QObject are the things that actually do
the work. They can be moved to a QThread as is done here.
.. note:: Mostly ignorable detail
While the signals/slots syntax of the worker looks very similar to
standard "single-threaded" signals & slots, note that inter-thread
signals and slots (automatically) use an event-based QueuedConnection,
while intra-thread signals use a DirectConnection. See `Signals and
Slots Across Threads
<https://doc.qt.io/qt-5/threads-qobject.html#signals-and-slots-across-threads>`_
Parameters
----------
Worker : QObject
QObject type that implements a `work()` method. The Worker should also
emit a finished signal when the work is done.
_start_thread : bool
If True, thread will be started immediately, otherwise, thread must
be manually started with thread.start().
_connect : dict, optional
Optional dictionary of {signal: function} to connect to the new worker.
for instance: _connect = {'incremented': myfunc} will result in:
worker.incremented.connect(myfunc)
*args
will be passed to the Worker class on instantiation.
**kwargs
will be passed to the Worker class on instantiation.
Returns
-------
worker : WorkerBase
The created worker.
thread : QThread
The thread on which the worker is running.
Examples
--------
Create some QObject that has a long-running work method:
.. code-block:: python
class Worker(QObject):
finished = Signal()
increment = Signal(int)
def __init__(self, argument):
super().__init__()
self.argument = argument
@Slot()
def work(self):
# some long running task...
import time
for i in range(10):
time.sleep(1)
self.increment.emit(i)
self.finished.emit()
worker, thread = new_worker_qthread(
Worker,
'argument',
_start_thread=True,
_connect={'increment': print},
)
"""
if _connect and not isinstance(_connect, dict):
raise TypeError("_connect parameter must be a dict")
thread = QThread()
worker = Worker(*args, **kwargs)
worker.moveToThread(thread)
thread.started.connect(worker.work)
worker.finished.connect(thread.quit)
worker.finished.connect(worker.deleteLater)
thread.finished.connect(thread.deleteLater)
if _connect:
[getattr(worker, key).connect(val) for key, val in _connect.items()]
if _start_thread:
thread.start() # sometimes need to connect stuff before starting
return worker, thread | 33,941 |
def contains(filename, value=None, fnvalue=None):
""" If a string is contained within a yaml (and is not a comment or key), return where we found it """
if filename in ALL_STRINGS:
for el in ALL_STRINGS[filename]:
if (value and value in el[0]) or (fnvalue and fnmatch.fnmatch(el[0], fnvalue)):
return el[1].strip() | 33,942 |
def plot_with_overview(
ds,
tn,
forcing_vars=["dqdt_adv", "dtdt_adv"],
domain_var="q",
overview_window_width=4,
):
"""
Produce a forcing plot with timestep `tn` highlighted together with
overview plots of domain data variable `domain_var`. The width over the
overview plot is set with `overview_window_width` (in degrees)
"""
ds_forcing = ds
ds_domain = domain_load.load_data(
root_data_path="data", name=ds_forcing.domain_name
)
ds_traj = trajectory_load.load_data(
root_data_path="data", name=ds_forcing.trajectory_name
)
N_vars = len(forcing_vars)
figwidth = 12
subplot_height = 3
traj_color = "red"
N_vars = len(forcing_vars)
figsize = (figwidth, 4 + subplot_height * N_vars)
fig = plt.figure(figsize=figsize)
gs = GridSpec(2 + N_vars, 2)
ds_forc_tn = ds_forcing.isel(time=tn)
lat0, lon0 = ds_forc_tn.lat, ds_forc_tn.lon
domain_window = dict(
lat=slice(lat0 - overview_window_width / 2, lat0 + overview_window_width / 2),
lon=slice(lon0 - overview_window_width / 2, lon0 + overview_window_width / 2),
time=ds_forc_tn.time,
)
da_domain = ds_domain[domain_var].sel(**domain_window).sum(dim="level")["q"]
ax_domain = _add_overview_axes(fig=fig, gs=gs[:2, 0])
da_domain.plot(ax=ax_domain)
ax_satellite = _add_overview_axes(fig=fig, gs=gs[0:2, 1])
ax_satellite.set_extent(ax_domain.get_extent())
traj_plot_kwargs = dict(
ds=ds_traj,
add_ref="eurec4a_circle",
color=traj_color,
reference_time=ds_forc_tn.time,
)
trajectory_plot.main(ax=ax_domain, **traj_plot_kwargs)
trajectory_plot.main(ax=ax_satellite, **traj_plot_kwargs)
ax = None
for n, v in enumerate(forcing_vars):
ax = fig.add_subplot(gs[n + 2, :], sharex=ax)
ds_forcing[v].plot(ax=ax, y="level")
# .item() doesn't return a np.datetime64 object sadly, so we have to
# make our own...
t0 = np.datetime64(ds_forc_tn[v].time.item(), "ns")
ax.axvline(x=t0, color="black", linestyle="--", alpha=0.5)
fig.tight_layout()
title = f"{ds.name} {ds.trajectory_type} trajectory\n{ds.domain_name} domain\n"
if hasattr(ds, "velocity_method"):
title += (
f"{ds.velocity_method} velocity method using "
"{ds.velocity_method_kwargs_height}m height\n"
)
plt.suptitle(title, y=1.01)
return fig | 33,943 |
def output_all_in_folder_to_csv(folder_name:str, output_folder:str, db_connection: db.engine.base.Connection):
"""
# TODO incorporate the file output regex so folders are only outputtted if they match
# then match all folders to regex and run this function rather than init_from_script function
Running all the commands in a given folder and outputting their results to csv
:param folder_name:
:param output_folder:
:param db_connection:
:return:
"""
for file in os.listdir(folder_name):
file_path = join(folder_name, file)
output_sql_statements_to_csv(file_path, output_folder, db_connection)
logger.info("All files output to csv") | 33,944 |
def log_result(repository):
"""Catch results given by subprocesses."""
logger.info('Received %s from worker.', repository.name)
RESULTS.append(repository) | 33,945 |
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# Flatten last layer and training labels
logits = tf.reshape(nn_last_layer, (-1, num_classes))
labels = tf.reshape(correct_label, (-1, num_classes))
# Create cross entropy loss function and optimizer
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Include regularizers to the loss function
l2_loss = tf.losses.get_regularization_losses()
cross_entropy_loss += tf.reduce_sum(l2_loss)
adam_op = tf.train.AdamOptimizer(learning_rate)
# Create training operation with defined optimizer and loss function
train_op = adam_op.minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss | 33,946 |
def addbatchinfo_32PCAs(fam, individuals_annotation, evec_file, eval_file, new_evec_file, new_eval_file):
""" add batch information to final evec file """
try:
fh1 = file(fam, "r")
except IOError, e:
print e
sys.exit(1)
id2fid = {}
line = fh1.readline().rstrip('\n')
while line:
list = re.split("\s+",line)
fid = list[0]
indivID = list[1]
id2fid[indivID] = fid
line = fh1.readline().rstrip('\n')
fh1.close()
try:
fh1 = file(individuals_annotation, "r")
except IOError, e:
print e
sys.exit(1)
id2batch = {}
line = fh1.readline().rstrip('\n')
line = fh1.readline().rstrip('\n')
while line:
list = re.split("\s+",line)
indivID = list[1]
batch = list[6]
id2batch[indivID] = batch
line = fh1.readline().rstrip('\n')
fh1.close()
try:
fh2 = file(evec_file, "r")
fh3 = file(new_evec_file, "w")
except IOError, e:
print e
sys.exit(1)
line = fh2.readline().rstrip('\n')
fh3.writelines("FID\tIID" +line.replace("indivID","") + "\tbatch\n")
line = fh2.readline().rstrip('\n')
while line:
list = re.split("\s+",line)
id = list[0]
fh3.writelines(id2fid[id] +"\t"+\
list[0] +"\t"+\
list[1] +"\t"+\
list[2] +"\t"+\
list[3] +"\t"+\
list[4] +"\t"+\
list[5] +"\t"+\
list[6] +"\t"+\
list[7] +"\t"+\
list[8] +"\t"+\
list[9] +"\t"+\
list[10] +"\t"+\
list[11] +"\t"+\
list[12] +"\t"+\
list[13] +"\t"+\
list[14] +"\t"+\
list[15] +"\t"+\
list[16] +"\t"+\
list[17] +"\t"+\
list[18] +"\t"+\
list[19] +"\t"+\
list[20] +"\t"+\
list[21] +"\t"+\
list[22] +"\t"+\
list[23] +"\t"+\
list[24] +"\t"+\
list[25] +"\t"+\
list[26] +"\t"+\
list[27] +"\t"+\
list[28] +"\t"+\
list[29] +"\t"+\
list[30] +"\t"+\
list[31] +"\t"+\
list[32] +"\t"+\
id2batch[id] +"\n")
line = fh2.readline().rstrip('\n')
fh2.close()
fh3.close()
os.system("cp %s %s" %(eval_file, new_eval_file)) | 33,947 |
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError("too many indices")
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key) | 33,948 |
def __put_buttons_in_buttonframe(choices):
"""Put the buttons in the buttons frame"""
global __widgetTexts, __firstWidget, buttonsFrame
__firstWidget = None
__widgetTexts = {}
i = 0
for buttonText in choices:
tempButton = tk.Button(buttonsFrame, takefocus=1, text=buttonText)
_bindArrows(tempButton)
tempButton.pack(expand=tk.YES, side=tk.LEFT, padx='1m', pady='1m', ipadx='2m', ipady='1m')
# remember the text associated with this widget
__widgetTexts[tempButton] = buttonText
# remember the first widget, so we can put the focus there
if i == 0:
__firstWidget = tempButton
i = 1
# for the commandButton, bind activation events to the activation event handler
commandButton = tempButton
handler = __buttonEvent
for selectionEvent in STANDARD_SELECTION_EVENTS:
commandButton.bind('<%s>' % selectionEvent, handler)
if CANCEL_TEXT in choices:
commandButton.bind('<Escape>', __cancelButtonEvent) | 33,949 |
def downsample(
data, sampling_freq=None, target=None, target_type="samples", method="mean"
):
"""Downsample pandas to a new target frequency or number of samples
using averaging.
Args:
data: (pd.DataFrame, pd.Series) data to downsample
sampling_freq: (float) Sampling frequency of data in hertz
target: (float) downsampling target
target_type: type of target can be [samples,seconds,hz]
method: (str) type of downsample method ['mean','median'],
default: mean
Returns:
out: (pd.DataFrame, pd.Series) downsmapled data
"""
if not isinstance(data, (pd.DataFrame, pd.Series)):
raise ValueError("Data must by a pandas DataFrame or Series instance.")
if not (method == "median") | (method == "mean"):
raise ValueError("Metric must be either 'mean' or 'median' ")
if target_type == "samples":
n_samples = target
elif target_type == "seconds":
n_samples = target * sampling_freq
elif target_type == "hz":
n_samples = sampling_freq / target
else:
raise ValueError('Make sure target_type is "samples", "seconds", ' ' or "hz".')
idx = np.sort(np.repeat(np.arange(1, data.shape[0] / n_samples, 1), n_samples))
# if data.shape[0] % n_samples:
if data.shape[0] > len(idx):
idx = np.concatenate([idx, np.repeat(idx[-1] + 1, data.shape[0] - len(idx))])
if method == "mean":
return data.groupby(idx).mean().reset_index(drop=True)
elif method == "median":
return data.groupby(idx).median().reset_index(drop=True) | 33,950 |
def available_commands(mod, ending="_command"):
"""Just returns the available commands, rather than the whole long list."""
commands = []
for key in mod.__dict__:
if key.endswith(ending):
commands.append(key.split(ending)[0])
return commands | 33,951 |
def retrieve_browse(browse_location, config):
""" Retrieve browse image and get the local path to it.
If location is a URL perform download.
"""
# if file_name is a URL download browse first and store it locally
validate = URLValidator()
try:
validate(browse_location)
input_filename = abspath(get_storage_path(
basename(browse_location), config=config))
logger.info("URL given, downloading browse image from '%s' to '%s'.",
browse_location, input_filename)
if not exists(input_filename):
start = time()
try:
# timeout in seconds
setdefaulttimeout(120)
remote_browse = urlopen(browse_location)
with open(input_filename, "wb") as local_browse:
local_browse.write(remote_browse.read())
except HTTPError, error:
raise IngestionException("HTTP error downloading '%s': %s"
% (browse_location, error.code))
except URLError, error:
raise IngestionException("URL error downloading '%s': %s"
% (browse_location, error.reason))
logger.info(
"Retrieved %s %dB in %.3fs", browse_location,
getsize(input_filename), time() - start,
)
else:
raise IngestionException("File to download already exists locally "
"as '%s'" % input_filename)
except ValidationError:
input_filename = abspath(get_storage_path(browse_location,
config=config))
logger.info("Filename given, using local browse image '%s'.",
input_filename)
# check that the input filename is valid -> somewhere under the storage dir
storage_path = get_storage_path()
if commonprefix((input_filename, storage_path)) != storage_path:
raise IngestionException("Input path '%s' points to an invalid "
"location." % browse_location)
try:
models.FileNameValidator(input_filename)
except ValidationError, error:
raise IngestionException("%s" % str(error), "ValidationError")
return input_filename | 33,952 |
def add_suffix(input_dict, suffix):
"""Add suffix to dict keys."""
return dict((k + suffix, v) for k,v in input_dict.items()) | 33,953 |
def ModelPrediction(
df_train,
forecast_length: int,
transformation_dict: dict,
model_str: str,
parameter_dict: dict,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
"""Feed parameters into modeling pipeline
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
n_jobs (int): number of processes
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object
"""
transformationStartTime = datetime.datetime.now()
from autots.tools.transform import GeneralTransformer
try:
coerce_integer = transformation_dict['coerce_integer']
grouping = transformation_dict['grouping']
if grouping == 'user' and grouping_ids is None:
grouping = 'kmeans5'
transformation_dict['grouping'] = 'kmeans5'
reconciliation = transformation_dict['reconciliation']
except Exception:
coerce_integer = False
grouping = None
grouping_ids = None
reconciliation = None
transformer_object = GeneralTransformer(
outlier_method=transformation_dict['outlier_method'],
outlier_threshold=transformation_dict['outlier_threshold'],
outlier_position=transformation_dict['outlier_position'],
fillna=transformation_dict['fillna'],
transformation=transformation_dict['transformation'],
detrend=transformation_dict['detrend'],
second_transformation=transformation_dict['second_transformation'],
transformation_param=transformation_dict['transformation_param'],
third_transformation=transformation_dict['third_transformation'],
transformation_param2=transformation_dict['transformation_param2'],
fourth_transformation=transformation_dict['fourth_transformation'],
discretization=transformation_dict['discretization'],
n_bins=transformation_dict['n_bins'],
grouping=grouping,
grouping_ids=grouping_ids,
reconciliation=reconciliation,
coerce_integer=coerce_integer,
).fit(df_train)
df_train_transformed = transformer_object.transform(df_train)
# slice the context, ie shorten the amount of data available.
if transformation_dict['context_slicer'] not in [None, 'None']:
from autots.tools.transform import simple_context_slicer
df_train_transformed = simple_context_slicer(
df_train_transformed,
method=transformation_dict['context_slicer'],
forecast_length=forecast_length,
)
# make sure regressor has same length. This could be a problem if wrong size regressor is passed.
if len(future_regressor_train) > 0:
future_regressor_train = future_regressor_train.tail(
df_train_transformed.shape[0]
)
transformation_runtime = datetime.datetime.now() - transformationStartTime
# from autots.evaluator.auto_model import ModelMonster
model = ModelMonster(
model_str,
parameters=parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
)
model = model.fit(df_train_transformed, future_regressor=future_regressor_train)
df_forecast = model.predict(
forecast_length=forecast_length, future_regressor=future_regressor_forecast
)
if df_forecast.forecast.isnull().all(axis=0).astype(int).sum() > 0:
raise ValueError(
"Model {} returned NaN for one or more series".format(model_str)
)
transformationStartTime = datetime.datetime.now()
# Inverse the transformations
df_forecast.forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.lower_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.lower_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.upper_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.upper_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.transformation_parameters = transformation_dict
# Remove negatives if desired
# There's df.where(df_forecast.forecast > 0, 0) or df.clip(lower = 0), not sure which faster
if no_negatives:
df_forecast.lower_forecast = df_forecast.lower_forecast.clip(lower=0)
df_forecast.forecast = df_forecast.forecast.clip(lower=0)
df_forecast.upper_forecast = df_forecast.upper_forecast.clip(lower=0)
if constraint is not None:
if verbose > 2:
print("Using constraint.")
constraint = float(constraint)
train_std = df_train.std(axis=0)
train_min = df_train.min(axis=0) - (constraint * train_std)
train_max = df_train.max(axis=0) + (constraint * train_std)
df_forecast.forecast = df_forecast.forecast.clip(lower=train_min, axis=1)
df_forecast.forecast = df_forecast.forecast.clip(upper=train_max, axis=1)
transformation_runtime = transformation_runtime + (
datetime.datetime.now() - transformationStartTime
)
df_forecast.transformation_runtime = transformation_runtime
return df_forecast | 33,954 |
def new_config(logger, arg):
"""
Method that turns the pycom to an access point for the user to connect and update the configurations.
The device automatically reboots and applies modifications upon successful configuration.
Takes an extra dummy argument required by the threading library.
:param logger: status logger
:type logger: LoggerFactory
"""
config = Configuration.Configuration(logger) #TODO: fix
# Only one of this thread is allowed to run at a time
if not wifi_lock.locked():
with wifi_lock:
logger.info("New configuration setup started")
# Config uses LED colours to indicate the state of the connection - lock is necessary to disable error pings
led_lock.acquire(1)
unique_id = ubinascii.hexlify(machine.unique_id()).decode()
# set pycom up as access point
wlan = network.WLAN(mode=WLAN.AP, ssid=config.get_config("device_name")+ unique_id)
# Connect to PmSensor using password set by the user
wlan.init(mode=WLAN.AP, ssid=config.get_config("device_name")+ unique_id, auth=(WLAN.WPA2, config.get_config("password")), channel=1,
antenna=WLAN.INT_ANT)
# Load HTML via entering 192,168.4.10 to browser
wlan.ifconfig(id=1, config=('192.168.4.10', '255.255.255.0', '192.168.4.1', '192.168.4.1'))
logger.info('Access point turned on as {}'.format(config.get_config("device_name") + unique_id))
logger.info('Configuration website can be accessed at 192.168.4.10')
address = socket.getaddrinfo('0.0.0.0', 80)[0][-1] # Accept stations from all addresses
sct = socket.socket() # Create socket for communication
sct.settimeout(int(float(config.get_config("config_timeout")) * 60)) # session times out after x seconds
gc.collect() # frees up unused memory if there was a previous connection
sct.bind(address) # Bind address to socket
sct.listen(1) # Allow one station to connect to socket
pycom.rgbled(0x000055) # Blue LED - waiting for connection
get_new_config(sct, logger)
wlan.deinit() # turn off wifi
gc.collect()
logger.info('rebooting...')
machine.reset() | 33,955 |
def make_year_key(year):
"""A key generator for sorting years."""
if year is None:
return (LATEST_YEAR, 12)
year = str(year)
if len(year) == 4:
return (int(year), 12)
if len(year) == 6:
return (int(year[:4]), int(year[4:]))
raise ValueError('invalid year %s' % year) | 33,956 |
def test_example(default_rules_collection):
"""example.yml is expected to have 15 match errors inside."""
result = Runner(default_rules_collection, 'examples/example.yml', [], [], []).run()
assert len(result) == 16 | 33,957 |
def test_field_name(test_page):
"""Fields should report their intended class name, not 'Performer'."""
with pytest.raises(TypeError) as e:
test_page.button[0]
assert 'Button' in str(e.value)
with pytest.raises(TypeError) as e:
test_page.input_area.input[0]
assert 'Input' in str(e.value)
with pytest.raises(TypeError) as e:
test_page.link[0]
assert 'Link' in str(e.value) | 33,958 |
def set_stereo_from_geometry(gra, geo, geo_idx_dct=None):
""" set graph stereo from a geometry
(coordinate distances need not match connectivity -- what matters is the
relative positions at stereo sites)
"""
gra = without_stereo_parities(gra)
last_gra = None
atm_keys = sorted(atom_keys(gra))
geo_idx_dct = (geo_idx_dct if geo_idx_dct is not None
else {atm_key: idx for idx, atm_key in enumerate(atm_keys)})
# set atom and bond stereo, iterating to self-consistency
atm_keys = set()
bnd_keys = set()
while last_gra != gra:
last_gra = gra
atm_keys.update(stereogenic_atom_keys(gra))
bnd_keys.update(stereogenic_bond_keys(gra))
gra = _set_atom_stereo_from_geometry(gra, atm_keys, geo, geo_idx_dct)
gra = _set_bond_stereo_from_geometry(gra, bnd_keys, geo, geo_idx_dct)
return gra | 33,959 |
def multiply_MPOs(op0, op1):
"""Multiply two MPOs (composition along physical dimension)."""
# number of lattice sites must agree
assert op0.nsites == op1.nsites
L = op0.nsites
# physical quantum numbers must agree
assert np.array_equal(op0.qd, op1.qd)
# initialize with dummy tensors and bond quantum numbers
op = MPO(op0.qd, (L+1)*[[0]])
# combine virtual bond quantum numbers
for i in range(L + 1):
op.qD[i] = qnumber_flatten([op0.qD[i], op1.qD[i]])
for i in range(L):
# multiply physical dimensions and reorder dimensions
op.A[i] = np.tensordot(op0.A[i], op1.A[i], (1, 0)).transpose((0, 3, 1, 4, 2, 5))
# merge virtual bonds
s = op.A[i].shape
assert len(s) == 6
op.A[i] = op.A[i].reshape((s[0], s[1], s[2]*s[3], s[4]*s[5]))
# consistency check
assert is_qsparse(op.A[i], [op.qd, -op.qd, op.qD[i], -op.qD[i+1]]), \
'sparsity pattern of MPO tensor does not match quantum numbers'
return op | 33,960 |
def build_list_of_dicts(val):
"""
Converts a value that can be presented as a list of dict.
In case top level item is not a list, it is wrapped with a list
Valid values examples:
- Valid dict: {"k": "v", "k2","v2"}
- List of dict: [{"k": "v"}, {"k2","v2"}]
- JSON decodable string: '{"k": "v"}', or '[{"k": "v"}]'
- List of JSON decodable strings: ['{"k": "v"}', '{"k2","v2"}']
Invalid values examples:
- ["not", "a", "dict"]
- [123, None],
- [["another", "list"]]
:param val: Input value
:type val: Union[list, dict, str]
:return: Converted(or original) list of dict
:raises: ValueError in case value cannot be converted to a list of dict
"""
if val is None:
return []
if isinstance(val, str):
# use OrderedDict to preserve order
val = json.loads(val, object_pairs_hook=OrderedDict)
if isinstance(val, dict):
val = [val]
for index, item in enumerate(val):
if isinstance(item, str):
# use OrderedDict to preserve order
val[index] = json.loads(item, object_pairs_hook=OrderedDict)
if not isinstance(val[index], dict):
raise ValueError("Expected a list of dicts")
return val | 33,961 |
def add_borders_to_DataArray_U_points(da_u, da_v):
"""
A routine that adds a column to the "right" of the 'u' point
DataArray da_u so that every tracer point in the tile
will have a 'u' point to the "west" and "east"
After appending the border the length of da_u in x
will be +1 (one new column)
This routine is pretty general. Any tiles can be in the da_u and
da_v DataArrays but if the tiles to the "right" of the da_u tiles
are not available then the new rows will be filled with nans.
Parameters
----------
da_u : DataArray
The `DataArray` object that has tiles of a u-point variable
Tiles of the must be in their original llc layout.
da_v : DataArray
The `DataArray` object that has tiles of the v-point variable that
corresponds with da_u. (e.g., VVEL corresponds with UVEL)
Tiles of the must be in their original llc layout.
Returns
-------
da_u_new: DataArray
a new `DataArray` object that has the appended values of 'u' along
its right edge. The lon_u and lat_u coordinates are lost but all
other coordinates remain.
"""
#%%
# the i_g dimension will be incremented by one.
i_g = np.arange(1, len(da_u.i_g)+2)
# the j dimension is unchanged.
j = da_u['j'].values
llcN = len(j)
# the k dimension, if it exists, is unchanged.
if 'k' in da_u.dims:
nk = len(da_u.k)
k = da_u['k'].values
else:
nk = 0
# the time dimension, if it exists, is unchanged
if 'time' in da_u.dims:
time = da_u['time'].values
#%%
#print "\n>>> ADDING BORDERS TO U POINT TILES\n"
# tiles whose tile to the right are rotated 90 degrees counter clockwise
# to add borders from tiles 4, 5, or 6 we need to use the da_v fields
rot_tiles = {4, 5, 6}
# the new arrays will be one longer in the j direction, +1 column
pad_j = 1 # add one to the second dimension (x)
pad_i = 0 # we do not pad in first dimension (y)
# set the number of processed tiles counter to zero
num_proc_tiles = 0
# find the number of non-tile dimensions
if 'tile' in da_u.dims:
num_dims = da_u.ndim - 1
else:
num_dims = da_u.ndim
# loop through all tiles in da_u
for tile_index in da_u.tile.values:
# find out which tile is to the right of this da_u tile
right_tile_index, top_tile_index, corner_tile_index = \
get_llc_tile_border_mapping(tile_index)
# if 'tile' exists as a dimension, select and copy the proper da_u tile
if 'tile' in da_u.dims:
ref_arr = deepcopy(da_u.sel(tile=tile_index))
else:
# otherwise we have a single da_u tile so make a copy of it
ref_arr = deepcopy(da_u)
# the append_border flag will be true if we have a tile to the right.
append_border = False
#print '\ncurrent da_u tile ', tile_index
#print 'right tile index ', right_tile_index
# check to see if there even is a tile to the right of da_u tile_index
# tiles 10 and 13 don't have one!
if right_tile_index > 0:
#print 'there is a tile to the right of da_u tile ', tile_index
# determine whether the tile to the right is rotated relative
# to da_u tile_index. if so we'll need da_v!
if tile_index in rot_tiles:
#print 'append with da_v tile ', right_tile_index
if right_tile_index in da_v.tile.values:
#print 'we have da_v tile ', right_tile_index
# see if we have multiple da_v tiles.
if len(da_v.tile) > 1:
# pull out the one we need.
right_arr = da_v.sel(tile=right_tile_index)
append_border = True
#print 'appending from da_v tile ', right_tile_index
# there is only one da_v tile
elif da_v.tile == right_tile_index:
# it is the one we need.
right_arr = da_v
append_border = True
#print 'appending from da_v tile ', right_tile_index
# something may have gone wrong.
else:
print('something is wrong with the da_v tile')
# if we do not have the da_v tile, then we can't append!
else:
print('we do not have da_v tile ', right_tile_index)
# the values to append to the top come from another da_u tile
else:
#print 'append with da_u tile ', right_tile_index
# see if we have the required da_u tile
if right_tile_index in da_u.tile.values:
#print 'we have da_u tile ', right_tile_index
# see if we have multiple da_u tiles
if len(da_u.tile) > 1:
# pull out the one we need.
right_arr = da_u.sel(tile=right_tile_index)
append_border = True
#print 'appending from da_u tile ', right_tile_index
# if we only have one tile then something is wrong because
# the tile to the right of this da_u tile cannot be itself
else:
print('tile to the right cannot be tile_index')
# we do not have the required da_u tile.
else:
print('we do not have da_u tile ', right_tile_index)
# there is no tile to the right
#else:
# print 'there is no tile to the right of da_u tile ', tile_index
# if we have found a tile to the right we can do the appending
if append_border:
new_arr=append_border_to_tile(ref_arr, tile_index,
'u', llcN,
right = right_arr)
# if not then we will append an array of nans
else:
if num_dims == 2:
pad = ((0, pad_i), (0, pad_j))
elif num_dims == 3:
pad = ((0, 0), (0, pad_i), (0, pad_j))
elif num_dims == 4:
pad = ((0, 0), (0, 0), (0, pad_i), (0, pad_j))
new_arr = np.pad(ref_arr, pad_width = pad, mode='constant',
constant_values = np.nan)
# create a new DataArray
if num_dims == 2:
new_coords = [('j', j), ('i_g', i_g)]
elif num_dims == 3 and nk > 0:
new_coords = [('k', k), ('j', j), ('i_g', i_g)]
elif num_dims == 3 and nk == 0:
new_coords = [('time', time),('j', j), ('i_g', i_g)]
elif num_dims == 4:
new_coords = [('time', time), ('k', k), ('j', j), ('i_g',i_g)]
tmp_DA = xr.DataArray(new_arr, name = da_u.name, coords=new_coords)
# give the new DataArray the same attributes as da_u
tmp_DA.attrs = da_u.attrs
# give the new DataArray a tile coordinate
tmp_DA.coords['tile'] = tile_index
# increment the number of processed tiles counter by one
num_proc_tiles += 1
# set da_u_new equal to tmp_DA if this is the first processed tile
if num_proc_tiles == 1:
da_u_new = tmp_DA
# otherwise, concatentate tmp_DA with da_u_new along the 'tile' dim
else:
da_u_new = xr.concat([da_u_new, tmp_DA],'tile')
# reset tmp_DA
tmp_DA = []
# add all time coordinates to_da_u_new from da_u.
for idx, var in enumerate(da_u.coords):
if 'tim' in var:
da_u_new[var] = da_u[var]
da_u_new.attrs['padded'] = True
#%%
return da_u_new
#%% | 33,962 |
def arg(prevs, newarg):
""" Joins arguments to list """
retval = prevs
if not isinstance(retval, list):
retval = [retval]
return retval + [newarg] | 33,963 |
def print_data(uniprot_ids):
"""
For each protein possessing the N-glycosylation motif, prints given access ID followed
by a list of locations in the protein string where the motif can be found.
Args:
uniprot_ids (list): list of UniProt Protein Database access IDs.
Returns:
None
"""
for id in uniprot_ids:
locations = [str(location) for location in get_locations(id)]
# with map(str, get_locations(id)), I had empty string when motif was not in protein
if locations:
print(id)
print(" ".join(locations)) | 33,964 |
def quote_plus(s, safe='', encoding=None, errors=None):
"""Quote the query fragment of a URL; replacing ' ' with '+'"""
if ' ' in s:
s = quote(s, safe + ' ', encoding, errors)
return s.replace(' ', '+')
return quote(s, safe, encoding, errors) | 33,965 |
def mod(x, y):
"""Implement `mod`."""
return x % y | 33,966 |
def _lower_batch_matmul(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a batch_matmul using cuBLAS."""
return cublas.batch_matmul(
inputs[0],
inputs[1],
transa=op.attrs["transpose_a"],
transb=op.attrs["transpose_b"],
dtype=op.checked_type.dtype,
) | 33,967 |
def cross_entropy_loss(inputs, labels, rescale_loss=1):
""" cross entropy loss with a mask """
criterion = mx.gluon.loss.SoftmaxCrossEntropyLoss(weight=rescale_loss)
loss = criterion(inputs, labels)
mask = S.var('mask')
loss = loss * S.reshape(mask, shape=(-1,))
return S.make_loss(loss.mean()) | 33,968 |
def as_wrapping_formatters(objs, fields, field_labels, formatters, no_wrap=None, no_wrap_fields=[]):
"""This function is the entry point for building the "best guess"
word wrapping formatters. A best guess formatter guesses what the best
columns widths should be for the table celldata. It does this by collecting
various stats on the celldata (min, max average width of column celldata) and from
this celldata decides the desired widths and the minimum widths.
Given a list of formatters and the list of objects (objs), this function
first determines if we need to augment the passed formatters with word wrapping
formatters. If the no_wrap parameter or global no_wrap flag is set,
then we do not build wrapping formatters. If any of the formatters within formatters
is a word wrapping formatter, then it is assumed no more wrapping is required.
:param objs:
:param fields:
:param field_labels:
:param formatters:
:param no_wrap:
:param no_wrap_fields:
:return: When no wrapping is required, the formatters parameter is returned
-- effectively a NOOP in this case
When wrapping is required, best-guess word wrapping formatters are returned
with original parameter formatters embedded in the word wrapping formatters
"""
no_wrap = is_nowrap_set(no_wrap)
if not needs_wrapping_formatters(formatters, no_wrap):
return formatters
format_spec = build_best_guess_formatters_using_average_widths(objs, fields, field_labels, formatters, no_wrap_fields)
formatters = build_wrapping_formatters(objs, fields, field_labels, format_spec)
return formatters | 33,969 |
def classification_metrics(n_classes: int = 2):
"""Function to set up the classification metrics"""
logger.info(f"Setting up metrics for: {n_classes}")
metrics_dict_train = torch.nn.ModuleDict(
{
"accuracy": Accuracy(),
"recall": Recall(),
"precision": Precision(),
"F1": F1(),
}
)
metrics_dict_val = torch.nn.ModuleDict(
{
"accuracy": Accuracy(),
"recall": Recall(),
"precision": Precision(),
"F1": F1(),
}
)
return metrics_dict_train, metrics_dict_val | 33,970 |
def ping(device,
address,
ttl=None,
timeout=None,
tos=None,
dscp=None,
size=None,
count=None,
source=None,
rapid=False,
do_not_fragment=False,
validate=False,
vrf=None,
command=None,
output=None):
""" execute ping and parse ping result and return structure data
Args:
device ('obj'): Device object
address ('str'): Address value
tos ('int'): type of service value
dscp (`str`): DSCP value
size ('str'): data bytes expected
ttl ('int'): Not supported
timeout ('int'): timeout interval
count ('int'): repeat count
source ('str'): source address or interface, default: None
rapid ('bool'): Not supported
do_not_fragment ('bool'): enable do not fragment bit in IP header, default: False
validate (`bool`): validate reply data, default: False
vrf ('str'): VRF name
command (`str`): ping command. This will ignore all other arguments
output (`str`): ping command output. no parser call involved
Returns:
Boolean
Raises:
None
"""
try:
obj = Ping(device=device)
return obj.parse(addr=address,
vrf=vrf,
tos=tos,
dscp=dscp,
size=size,
ttl=ttl,
timeout=timeout,
count=count,
source=source,
rapid=rapid,
do_not_fragment=do_not_fragment,
validate=validate,
command=command,
output=output)
except SchemaEmptyParserError:
log.info('parsed_output was empty')
return {}
except Exception as e:
log.warning(e)
return {} | 33,971 |
def log_prov_es(job, prov_es_info, prov_es_file):
"""Log PROV-ES document. Create temp PROV-ES document to populate
attributes that only the worker has access to (e.g. PID)."""
# create PROV-ES doc to generate attributes that only verdi know
ps_id = "hysds:%s" % get_uuid(job["job_id"])
bundle_id = "hysds:%s" % get_uuid("bundle-%s" % job["job_id"])
doc = ProvEsDocument()
# get bundle
# bndl = doc.bundle(bundle_id)
bndl = None
# create sofware agent
sa_label = "hysds:pge_wrapper/%s/%d/%s" % (
job["job_info"]["execute_node"],
job["job_info"]["pid"],
datetime.utcnow().isoformat(),
)
sa_id = "hysds:%s" % get_uuid(sa_label)
doc.softwareAgent(
sa_id,
str(job["job_info"]["pid"]),
job["job_info"]["execute_node"],
role=job.get("username", None),
label=sa_label,
bundle=bndl,
)
# create processStep
doc.processStep(
ps_id,
job["job_info"]["cmd_start"],
job["job_info"]["cmd_end"],
[],
sa_id,
None,
[],
[],
bundle=bndl,
prov_type="hysds:%s" % job["type"],
)
# get json
pd = json.loads(doc.serialize())
# update software agent and process step
if "bundle" in prov_es_info:
if len(prov_es_info["bundle"]) == 1:
bundle_id_orig = list(prov_es_info["bundle"].keys())[0]
# update software agent
prov_es_info["bundle"][bundle_id_orig].setdefault("agent", {}).update(
pd["bundle"][bundle_id]["agent"]
)
# update wasAssociatedWith
prov_es_info["bundle"][bundle_id_orig].setdefault(
"wasAssociatedWith", {}
).update(pd["bundle"][bundle_id]["wasAssociatedWith"])
# update activity
if "activity" in prov_es_info["bundle"][bundle_id_orig]:
if len(prov_es_info["bundle"][bundle_id_orig]["activity"]) == 1:
ps_id_orig = list(
prov_es_info["bundle"][bundle_id_orig]["activity"].keys()
)[0]
prov_es_info["bundle"][bundle_id_orig]["activity"][ps_id_orig][
"prov:startTime"
] = pd["bundle"][bundle_id]["activity"][ps_id]["prov:startTime"]
prov_es_info["bundle"][bundle_id_orig]["activity"][ps_id_orig][
"prov:endTime"
] = pd["bundle"][bundle_id]["activity"][ps_id]["prov:endTime"]
prov_es_info["bundle"][bundle_id_orig]["activity"][ps_id_orig][
"hysds:job_id"
] = job["job_id"]
prov_es_info["bundle"][bundle_id_orig]["activity"][ps_id_orig][
"hysds:job_type"
] = job["type"]
prov_es_info["bundle"][bundle_id_orig]["activity"][ps_id_orig][
"hysds:job_url"
] = job["job_info"]["job_url"]
prov_es_info["bundle"][bundle_id_orig]["activity"][ps_id_orig][
"hysds:mozart_url"
] = app.conf.MOZART_URL
if (
"prov:type"
not in prov_es_info["bundle"][bundle_id_orig]["activity"][
ps_id_orig
]
):
prov_es_info["bundle"][bundle_id_orig]["activity"][ps_id_orig][
"prov:type"
] = pd["bundle"][bundle_id]["activity"][ps_id]["prov:type"]
# update wasAssociatedWith activity ids
for waw_id in prov_es_info["bundle"][bundle_id_orig][
"wasAssociatedWith"
]:
if (
prov_es_info["bundle"][bundle_id_orig]["wasAssociatedWith"][
waw_id
]["prov:activity"]
== ps_id
):
prov_es_info["bundle"][bundle_id_orig]["wasAssociatedWith"][
waw_id
]["prov:activity"] = ps_id_orig
else:
prov_es_info["bundle"][bundle_id_orig]["activity"].update(
pd["bundle"][bundle_id]["activity"]
)
else:
prov_es_info["bundle"][bundle_id_orig]["activity"] = pd["bundle"][
bundle_id
]["activity"]
else:
# update software agent
prov_es_info.setdefault("agent", {}).update(pd["agent"])
# update wasAssociatedWith
prov_es_info.setdefault("wasAssociatedWith", {}).update(pd["wasAssociatedWith"])
# update process step
if "activity" in prov_es_info:
if len(prov_es_info["activity"]) == 1:
ps_id_orig = list(prov_es_info["activity"].keys())[0]
prov_es_info["activity"][ps_id_orig]["prov:startTime"] = pd["activity"][
ps_id
]["prov:startTime"]
prov_es_info["activity"][ps_id_orig]["prov:endTime"] = pd["activity"][
ps_id
]["prov:endTime"]
prov_es_info["activity"][ps_id_orig]["hysds:job_id"] = job["job_id"]
prov_es_info["activity"][ps_id_orig]["hysds:job_type"] = job["type"]
prov_es_info["activity"][ps_id_orig]["hysds:job_url"] = job["job_info"][
"job_url"
]
prov_es_info["activity"][ps_id_orig][
"hysds:mozart_url"
] = app.conf.MOZART_URL
if "prov:type" not in prov_es_info["activity"][ps_id_orig]:
prov_es_info["activity"][ps_id_orig]["prov:type"] = pd["activity"][
ps_id
]["prov:type"]
# update wasAssociatedWith activity ids
for waw_id in prov_es_info["wasAssociatedWith"]:
if (
prov_es_info["wasAssociatedWith"][waw_id]["prov:activity"]
== ps_id
):
prov_es_info["wasAssociatedWith"][waw_id][
"prov:activity"
] = ps_id_orig
else:
prov_es_info["activity"].update(pd["activity"])
else:
prov_es_info["activity"] = pd["activity"]
# write prov
with open(prov_es_file, "w") as f:
json.dump(prov_es_info, f, indent=2) | 33,972 |
def createHub(target, genomes, opts):
"""Main method that organizes the creation of the meta-compartive hub."""
# Create the necessary hub files
if not os.path.isdir(opts.hubDir):
os.makedirs(opts.hubDir)
writeHubFile(os.path.join(opts.hubDir, 'hub.txt'),
hubName="_vs_".join(opts.labels))
writeGenomesFile(os.path.join(opts.hubDir, 'genomes.txt'), opts.hals[0], genomes)
for genome in genomes:
target.addChildFn(writeSequenceData, (genome, opts.hals[0], opts.hubDir))
relativeHalPaths = linkHals(opts.hubDir, opts.hals)
# Liftover all genomes
for genome1 in genomes:
for genome2 in genomes:
pass
# target.addChildFn(liftoverEntireGenome, (opts.hal1, genome1, genome2))
# target.addChildFn(liftoverEntireGenome, (opts.hal2, genome1, genome2))
# Create trackDbs
for genome in genomes:
target.addChildFn(createTrackDb, (genome, genomes, relativeHalPaths, opts.labels, opts.hubDir))
# Create the bed files that display differential coverage | 33,973 |
def find_host(connection, sd_name):
"""
Check if we can preform a transfer using the local host and return a host
instance. Return None if we cannot use this host.
Using the local host for an image transfer allows optimizing the connection
using unix socket. This speeds up the transfer significantly and minimizes
the network bandwidth.
However using the local host is possible only if:
- The local host is a oVirt host
- The host is Up
- The host is in the same DC of the storage domain
Consider this setup:
laptop1
dc1
host1 (down)
host2 (up)
sd1
disk1
dc2
host3 (up)
sd2
disk2
- If we run on laptop1 we cannot use the local host for any transfer.
- If we run on host1, we cannot use the local host for any transfer.
- If we run on host2, we can use use host2 for transferring disk1.
- If we run on host3, we can use host3 for transferring disk2.
Arguments:
connection (ovirtsdk4.Connection): Connection to ovirt engine
sd_name (str): Storage domain name
Returns:
ovirtsdk4.types.Host
"""
# Try to read this host hardware id.
try:
with open("/etc/vdsm/vdsm.id") as f:
vdsm_id = f.readline().strip()
except FileNotFoundError:
log.debug("Not running on oVirt host, using any host")
return None
except OSError as e:
# Unexpected error when running on ovirt host. Since choosing a host is
# an optimization, log and continue.
log.warning("Cannot read /etc/vdsm/vdsm.id, using any host: %s", e)
return None
log.debug("Found host hardware id: %s", vdsm_id)
# Find the data center by storage domain name.
system_service = connection.system_service()
data_centers = system_service.data_centers_service().list(
search='storage.name=%s' % sd_name,
case_sensitive=True,
)
if len(data_centers) == 0:
raise RuntimeError(
"Storage domain {} is not attached to a DC"
.format(sd_name))
data_center = data_centers[0]
log.debug("Found data center: %s", data_center.name)
# Validate that this host is up and in data center.
hosts_service = system_service.hosts_service()
hosts = hosts_service.list(
search="hw_id={} and datacenter={} and status=Up".format(
vdsm_id, data_center.name),
case_sensitive=True,
)
if len(hosts) == 0:
log.debug(
"Cannot use host with hardware id %s, host is not up, or does "
"not belong to data center %s",
vdsm_id, data_center.name)
return None
host = hosts[0]
log.debug("Using host id %s", host.id)
return host | 33,974 |
def _subSquare(vectors, var, full=False):
"""
given a series of vectors, this function calculates:
(variances,vectors)=numpy.linalg.eigh(vectors.H*vectors)
it's a seperate function because if there are less vectors
than dimensions the process can be accelerated, it just takes some dancing
it is based on this:
>>> vectors=Matrix(helpers.ascomplex(numpy.random.randn(
... numpy.random.randint(1,10),numpy.random.randint(1,10),2
... )))
>>> cov = vectors.H*vectors
>>> Xcov = vectors*vectors.H
>>> (Xval,Xvec) = numpy.linalg.eigh(Xcov)
>>> vec = Xvec.H*vectors
>>> assert vec.H*vec == cov
"""
vectors = Matrix(vectors)
shape = vectors.shape
if not all(shape):
val = numpy.zeros([0])
vec = numpy.zeros([0, shape[1]])
return (val, vec)
eig = numpy.linalg.eigh
if shape[0] >= shape[1] or full or not vectors.any() or (var < 0).any():
scaled = Matrix(var[:, None]*numpy.array(vectors))
cov = vectors.H*scaled
(val, vec) = eig(cov)
vec = vec.H
elif not var.any():
cov = vectors.H*vectors
(_,vec) = eig(cov)
vec = vec.H
val = numpy.zeros(vec.shape[0])
else:
scaled = Matrix(scipy.sqrt(var)[:, None]*numpy.array(vectors))
Xcov = scaled*scaled.H
#Xcov = var[:,None]*numpy.array(vectors)*vectors.H
(_, Xvec) = eig(Xcov)
Xscaled = (Xvec.H*scaled)
val = helpers.mag2(Xscaled)
vec = numpy.array(Xscaled)/scipy.sqrt(val[:, numpy.newaxis])
return (val, vec) | 33,975 |
def to_pickle(data):
"""
This prepares data on arbitrary form to be pickled. It handles any nested
structure and returns data on a form that is safe to pickle (including
having converted any database models to their internal representation).
We also convert any Saver*-type objects back to their normal
representations, they are not pickle-safe.
"""
def process_item(item):
"Recursive processor and identification of data"
dtype = type(item)
if dtype in (basestring, int, long, float, bool):
return item
elif dtype == tuple:
return tuple(process_item(val) for val in item)
elif dtype in (list, _SaverList):
return [process_item(val) for val in item]
elif dtype in (dict, _SaverDict):
return dict((process_item(key), process_item(val)) for key, val in item.items())
elif dtype in (set, _SaverSet):
return set(process_item(val) for val in item)
elif hasattr(item, '__item__'):
# we try to conserve the iterable class, if not convert to list
try:
return item.__class__([process_item(val) for val in item])
except (AttributeError, TypeError):
return [process_item(val) for val in item]
return pack_dbobj(item)
return process_item(data) | 33,976 |
def remove_source_identifier_from_subscription(SubscriptionName=None, SourceIdentifier=None):
"""
Removes a source identifier from an existing event notification subscription.
See also: AWS API Documentation
Exceptions
:example: response = client.remove_source_identifier_from_subscription(
SubscriptionName='string',
SourceIdentifier='string'
)
:type SubscriptionName: string
:param SubscriptionName: [REQUIRED]\nThe name of the event notification subscription you want to remove a source identifier from.\n
:type SourceIdentifier: string
:param SourceIdentifier: [REQUIRED]\nThe source identifier to be removed from the subscription, such as the DB instance identifier for a DB instance or the name of a security group.\n
:rtype: dict
ReturnsResponse Syntax
{
'EventSubscription': {
'CustomerAwsId': 'string',
'CustSubscriptionId': 'string',
'SnsTopicArn': 'string',
'Status': 'string',
'SubscriptionCreationTime': 'string',
'SourceType': 'string',
'SourceIdsList': [
'string',
],
'EventCategoriesList': [
'string',
],
'Enabled': True|False,
'EventSubscriptionArn': 'string'
}
}
Response Structure
(dict) --
EventSubscription (dict) --
Contains the results of a successful invocation of the DescribeEventSubscriptions action.
CustomerAwsId (string) --
The AWS customer account associated with the event notification subscription.
CustSubscriptionId (string) --
The event notification subscription Id.
SnsTopicArn (string) --
The topic ARN of the event notification subscription.
Status (string) --
The status of the event notification subscription.
Constraints:
Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist
The status "no-permission" indicates that Neptune no longer has permission to post to the SNS topic. The status "topic-not-exist" indicates that the topic was deleted after the subscription was created.
SubscriptionCreationTime (string) --
The time the event notification subscription was created.
SourceType (string) --
The source type for the event notification subscription.
SourceIdsList (list) --
A list of source IDs for the event notification subscription.
(string) --
EventCategoriesList (list) --
A list of event categories for the event notification subscription.
(string) --
Enabled (boolean) --
A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.
EventSubscriptionArn (string) --
The Amazon Resource Name (ARN) for the event subscription.
Exceptions
Neptune.Client.exceptions.SubscriptionNotFoundFault
Neptune.Client.exceptions.SourceNotFoundFault
:return: {
'EventSubscription': {
'CustomerAwsId': 'string',
'CustSubscriptionId': 'string',
'SnsTopicArn': 'string',
'Status': 'string',
'SubscriptionCreationTime': 'string',
'SourceType': 'string',
'SourceIdsList': [
'string',
],
'EventCategoriesList': [
'string',
],
'Enabled': True|False,
'EventSubscriptionArn': 'string'
}
}
:returns:
(string) --
"""
pass | 33,977 |
def spot_silver_benchmark_sge() -> pd.DataFrame:
"""
上海黄金交易所-数据资讯-上海银基准价-历史数据
https://www.sge.com.cn/sjzx/mrhq
:return: 历史数据
:rtype: pandas.DataFrame
"""
url = "https://www.sge.com.cn/graph/DayilyShsilverJzj"
payload = {}
r = requests.post(url, data=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json["wp"])
temp_df.columns = [
"交易时间",
"晚盘价",
]
temp_df["交易时间"] = pd.to_datetime(temp_df["交易时间"], unit="ms").dt.date
temp_zp_df = pd.DataFrame(data_json["zp"])
temp_zp_df.columns = [
"交易时间",
"早盘价",
]
temp_zp_df["交易时间"] = pd.to_datetime(temp_zp_df["交易时间"], unit="ms").dt.date
temp_df["早盘价"] = temp_zp_df["早盘价"]
return temp_df | 33,978 |
def requires_all_permissions(permission, login_url=None, raise_exception=False):
"""
Decorator for views that defines what permissions are required, and also
adds the required permissions as a property to that view function.
The permissions added to the view function can then be used by the sidebar
template to know whether to render the sidebar menu item that links to that
view function
"""
def decorator(function):
if isinstance(permission, str):
permissions = (permission, )
else:
permissions = permission
function.permissions = permissions
@wraps(function)
@permission_required(permission, login_url, raise_exception)
def wrap(request, *args, **kwargs):
return function(request, *args, **kwargs)
return wrap
return decorator | 33,979 |
def hasTable(cur, table):
"""checks to make sure this sql database has a specific table"""
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='table_name'")
rows = cur.fetchall()
if table in rows:
return True
else:
return False | 33,980 |
def _check_wkt_load(x):
"""Check if an object is a loaded polygon or not. If not, load it."""
if isinstance(x, str):
try:
x = loads(x)
except WKTReadingError:
warn('{} is not a WKT-formatted string.'.format(x))
return x | 33,981 |
def _yielddefer(function, *args, **kwargs):
"""
Called if a function decorated with :func:`yieldefer` is invoked.
"""
try:
retval = function(*args, **kwargs)
except:
return defer.fail()
if isinstance(retval, defer.Deferred):
return retval
if not (hasattr(retval, '__iter__') and
hasattr(retval, 'next') and
hasattr(retval, 'send') and
hasattr(retval, 'throw')):
return defer.succeed(retval)
iterator = retval
def maybe_deferred(val):
# We don't want exceptions to become twisted failures
# because exceptions thrown by the generator methods
# indicate exceptions thrown by the code _between_ the
# yield statements or it indicates the end of the
# iteration.
if isinstance(val, defer.Deferred):
return val
else:
return defer.succeed(val)
def success(value):
try:
d = maybe_deferred(iterator.send(value))
d.addCallbacks(success, fail)
return d
except StopIteration:
return None
except defer._DefGen_Return as e:
return e.value
def fail(failure):
try:
d = maybe_deferred(failure.throwExceptionIntoGenerator(iterator))
#d = iterator.throw(failure.value)
d.addCallbacks(success, fail)
return d
except StopIteration:
return None
except defer._DefGen_Return as e:
return e.value
try:
d = maybe_deferred(iterator.next())
d.addCallbacks(success, fail)
except StopIteration:
d = defer.succeed(None)
except defer._DefGen_Return as e:
d = defer.succeed(e.value)
except:
d = defer.fail()
return d | 33,982 |
def dimension_parameters(time_series, nr_steps=100, literature_value=None,
plot=False, r_minmin=None, r_maxmax=None,
shortness_weight=0.5, literature_weight=1.):
""" Estimates parameters r_min and r_max for calculation of correlation
dimension using the algorithm by Grassberger and Procaccia and uses them
to calculate it.
This experimental function performs a simple grid search on r_min and r_max
in the intervall given by r_minmin, r_maxmax and nr_steps. The performance
of the parameters is measured by a combination of NRMSE, a penalty for small
intervalls relative to given r_minmin and r_maxmax and a quadratic penalty
for the difference from the literature value if given.
For calculating the dimension of a high number of similar time_series in a
row it is advisable to use this function only once to get the parameters
and then use the function dimension with them in the subsequent computations.
Might fail for short time_series or unreasonable choices of parameters.
It is recommended to use the plot option to double check the plausibility
of the results.
Args:
time_series (np.ndarray): time series to calculate dimension of, shape (T, d)
r_minmin (float): minimum radius in grid search
r_maxmax (float): maximum radius in grid search
nr_steps (int): number of steps in grid search
plot (boolean): flag for plotting loglog plot
Returns:
tuple: 3-element tuple containing:
- **best_r_min** (*float*): Estimation for r_min
- **best_r_max** (*float*): Estimation for r_max
- **dimension** (*float*): Estimation for dimension using
the parameters best_r_min and best_r_max
"""
if r_maxmax is None:
expansion = []
for d in range(time_series.shape[1]):
expansion.append(np.max(time_series[:, d] - np.min(time_series[:, d])))
r_maxmax = np.max(expansion)
if r_minmin is None:
r_minmin = 0.001 * r_maxmax
literature_cost = 0
nr_points = float(time_series.shape[0])
radii = np.logspace(np.log10(r_minmin), np.log10(r_maxmax), nr_steps)
tree = scipy.spatial.cKDTree(time_series)
N_r = np.array(tree.count_neighbors(tree, radii), dtype=float) / nr_points
N_r = np.vstack((radii, N_r))
loss = None
for start_index in range(nr_steps - 1):
for end_index in range(start_index + 1, nr_steps):
# print(str(start_index)+', '+ str(end_index))
current_N_r = N_r[:, start_index:end_index]
current_r_min = radii[start_index]
current_r_max = radii[end_index]
# linear fit based on loglog scale, to get slope/dimension:
slope, intercept = np.polyfit(np.log(current_N_r[0]),
np.log(current_N_r[1]), deg=1)[0:2]
dimension = slope
estimated_line = intercept + slope * np.log(current_N_r[0])
error = rmse(np.log(current_N_r[1]), estimated_line,
normalization="historic")
shortness_cost = nr_steps / (end_index - start_index) ** 3
if literature_value is not None:
literature_cost = np.sqrt(literature_value - dimension)
new_loss = error + shortness_weight * shortness_cost + literature_weight * literature_cost * 5.
if loss is None:
loss = new_loss
best_r_min = current_r_min
best_r_max = current_r_max
best_slope = slope
best_intercept = intercept
elif new_loss < loss:
loss = new_loss
best_r_min = current_r_min
best_r_max = current_r_max
best_slope = slope
best_intercept = intercept
dimension = best_slope
# ###plotting
# if plot:
#
# plt.loglog(N_r[0], N_r[1], 'x', basex=10., basey=10.,label='data')
# plt.loglog(N_r[0], best_intercept + best_slope*N_r[1],
# label='fit: r_min ='+str(round(best_r_min,3))+', r_max = '+
# str(round(best_r_max,3)))
# plt.axvline(x=best_r_min)
# plt.axvline(x=best_r_max)
# plt.title('loglog plot of the N_r(radius), slope/dim = ' + str(dimension))
# plt.legend()
# plt.show()
if plot:
warn_string = "Plotting was removed in the entirety of the rescomp package.\n" \
"The 'plot' paramter will be removed in future releases as well."
warnings.warn(warn_string, UserWarning)
return best_r_min, best_r_max, dimension | 33,983 |
def ensure_port_cleanup(
bound_addresses, maxtries=30, sleeptime=2): # pragma: no cover
"""
This makes sure any open ports are closed.
Does this by connecting to them until they give connection
refused. Servers should call like::
ensure_port_cleanup([80, 443])
"""
atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries,
sleeptime=sleeptime) | 33,984 |
def GetAllCmdOutput(args, cwd=None, quiet=False):
"""Open a subprocess to execute a program and returns its output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
Returns:
Captures and returns the command's stdout.
Prints the command's stderr to logger (which defaults to stdout).
"""
if not quiet:
logging.debug(' '.join(args) + ' ' + (cwd or ''))
with open(os.devnull, 'w') as devnull:
p = subprocess.Popen(args=args,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=devnull)
stdout, stderr = p.communicate()
if not quiet:
logging.debug(' > stdout=[%s], stderr=[%s]', stdout, stderr)
return stdout, stderr | 33,985 |
def seir_model_with_soc_dist(init_vals, params, t):
"""
SEIR infection model with social distancing.
rho = social distancing factor.
"""
S_0, E_0, I_0, R_0 = init_vals
S, E, I, R = [S_0], [E_0], [I_0], [R_0]
alpha, beta, gamma, rho = params
dt = t[1] - t[0]
for _ in t[1:]:
next_S = S[-1] - (rho * beta * S[-1] * I[-1]) * dt
next_E = E[-1] + (rho * beta * S[-1] * I[-1] - alpha * E[-1]) * dt
next_I = I[-1] + (alpha * E[-1] - gamma * I[-1]) * dt
next_R = R[-1] + (gamma * I[-1]) * dt
S.append(next_S)
E.append(next_E)
I.append(next_I)
R.append(next_R)
return np.stack([S, E, I, R]).T | 33,986 |
def greeq(data, transmit=None, receive=None, opt=None, **kwopt):
"""Fit a non-linear relaxometry model to multi-echo Gradient-Echo data.
Parameters
----------
data : sequence[GradientEchoMulti]
Observed GRE data.
transmit : sequence[PrecomputedFieldMap], optional
Map(s) of the transmit field (b1+). If a single map is provided,
it is used to correct all contrasts. If multiple maps are
provided, there should be one for each contrast.
receive : sequence[PrecomputedFieldMap], optional
Map(s) of the receive field (b1-). If a single map is provided,
it is used to correct all contrasts. If multiple maps are
provided, there should be one for each contrast.
If no receive map is provided, the output `pd` map will have
a remaining b1- bias field.
opt : GREEQOptions or dict, optional
Algorithm options.
{'preproc': {'register': True}, # Co-register contrasts
'optim': {'nb_levels': 1, # Number of pyramid levels
'max_iter_rls': 10, # Max reweighting iterations
'max_iter_gn': 5, # Max Gauss-Newton iterations
'max_iter_cg': 32, # Max Conjugate-Gradient iterations
'tolerance_rls': 1e-05, # Tolerance for early stopping (RLS)
'tolerance_gn': 1e-05, ""
'tolerance_cg': 1e-03}, ""
'backend': {'dtype': torch.float32, # Data type
'device': 'cpu'}, # Device
'penalty': {'norm': 'jtv', # Type of penalty: {'tkh', 'tv', 'jtv', None}
'factor': {'r1': 10, # Penalty factor per (log) map
'pd': 10,
'r2s': 2,
'mt': 2}},
'verbose': 1}
Returns
-------
pd : ParameterMap
Proton density
r1 : ParameterMap
Longitudinal relaxation rate
r2s : ParameterMap
Apparent transverse relaxation rate
mt : ParameterMap, optional
Magnetisation transfer saturation
Only returned is MT-weighted data is provided.
"""
opt = GREEQOptions().update(opt, **kwopt)
dtype = opt.backend.dtype
device = opt.backend.device
backend = dict(dtype=dtype, device=device)
# --- estimate noise / register / initialize maps ---
data, transmit, receive, maps = preproc(data, transmit, receive, opt)
vx = spatial.voxel_size(maps.affine)
has_mt = hasattr(maps, 'mt')
# --- prepare penalty factor ---
lam = opt.penalty.factor
if isinstance(lam, dict):
lam = [lam.get('pd', 0), lam.get('r1', 0),
lam.get('r2s', 0), lam.get('mt', 0)]
lam = core.utils.make_vector(lam, 4, **backend) # PD, R1, R2*, MT
# --- initialize weights (RLS) ---
if str(opt.penalty.norm).lower() == 'none' or all(lam == 0):
opt.penalty.norm = ''
opt.penalty.norm = opt.penalty.norm.lower()
mean_shape = maps[0].shape
rls = None
sumrls = 0
if opt.penalty.norm in ('tv', 'jtv'):
rls_shape = mean_shape
if opt.penalty.norm == 'tv':
rls_shape = (len(maps),) + rls_shape
rls = torch.ones(rls_shape, **backend)
sumrls = 0.5 * core.py.prod(rls_shape)
if opt.penalty.norm:
print(f'With {opt.penalty.norm.upper()} penalty:')
print(f' - PD: {lam[0]:.3g}')
print(f' - R1: {lam[1]:.3g}')
print(f' - R2*: {lam[2]:.3g}')
if has_mt:
print(f' - MT: {lam[3]:.3g}')
else:
print('Without penalty')
if opt.penalty.norm not in ('tv', 'jtv'):
# no reweighting -> do more gauss-newton updates instead
opt.optim.max_iter_gn *= opt.optim.max_iter_rls
opt.optim.max_iter_rls = 1
printer = CritPrinter(max_levels=opt.optim.nb_levels,
max_rls=opt.optim.max_iter_rls,
max_gn=opt.optim.max_iter_gn,
penalty=opt.penalty.norm,
verbose=opt.verbose)
printer.print_head()
shape0 = shape = maps.shape[1:]
aff0 = aff = maps.affine
vx0 = vx = spatial.voxel_size(aff0)
vol0 = vx0.prod()
vol = vx.prod() / vol0
for level in range(opt.optim.nb_levels, 0, -1):
printer.level = level
if opt.optim.nb_levels > 1:
aff, shape = _get_level(level, aff0, shape0)
vx = spatial.voxel_size(aff)
vol = vx.prod() / vol0
maps, rls = _resize(maps, rls, aff, shape)
if opt.penalty.norm in ('tv', 'jtv'):
sumrls = 0.5 * vol * rls.reciprocal().sum(dtype=torch.double)
# --- compute derivatives ---
nb_prm = len(maps)
nb_hes = nb_prm * (nb_prm + 1) // 2
grad = torch.empty((nb_prm,) + shape, **backend)
hess = torch.empty((nb_hes,) + shape, **backend)
ll_rls = []
ll_max = core.constants.ninf
max_iter_rls = max(opt.optim.max_iter_rls // level, 1)
for n_iter_rls in range(max_iter_rls):
# --- Reweighted least-squares loop ---
printer.rls = n_iter_rls
multi_rls = rls if opt.penalty.norm == 'tv' else [rls] * len(maps)
# --- Gauss Newton loop ---
ll_gn = []
for n_iter_gn in range(opt.optim.max_iter_gn):
printer.gn = n_iter_gn
crit = 0
grad.zero_()
hess.zero_()
# --- loop over contrasts ---
for contrast, b1m, b1p in zip(data, receive, transmit):
# compute gradient
crit1, g1, h1 = _nonlin_gradient(contrast, maps, b1m, b1p, opt)
# increment
if hasattr(maps, 'mt') and not contrast.mt:
# we optimize for mt but this particular contrast
# has no information about mt so g1/h1 are smaller
# than grad/hess.
grad[:-1] += g1
hind = list(range(nb_prm-1))
cnt = nb_prm
for i in range(nb_prm):
for j in range(i+1, nb_prm):
if i != nb_prm-1 and j != nb_prm-1:
hind.append(cnt)
cnt += 1
hess[hind] += h1
crit += crit1
else:
grad += g1
hess += h1
crit += crit1
del g1, h1
# --- penalty ---
reg = 0.
if opt.penalty.norm:
for i, (map, weight, l) in enumerate(zip(maps, multi_rls, lam)):
if not l:
continue
reg1, g1 = _nonlin_reg(map.fdata(**backend), vx, weight, l * vol)
reg += reg1
grad[i] += g1
del g1
# --- gauss-newton ---
if not hess.isfinite().all():
print('WARNING: NaNs in hess')
if not grad.isfinite().all():
print('WARNING: NaNs in hess')
if opt.penalty.norm:
hess = hessian_sym_loaddiag(hess, 1e-5, 1e-8)
deltas = _nonlin_solve(hess, grad, multi_rls, lam * vol, vx, opt)
else:
hess = hessian_sym_loaddiag(hess, 1e-3, 1e-4)
deltas = hessian_sym_solve(hess, grad)
if not deltas.isfinite().all():
print('WARNING: NaNs in delta')
for map, delta in zip(maps, deltas):
map.volume -= delta
if map.min is not None or map.max is not None:
map.volume.clamp_(map.min, map.max)
del deltas
# --- Compute gain ---
ll = crit + reg + sumrls
ll_max = max(ll_max, ll)
ll_prev = ll_gn[-1] if ll_gn else ll_max
gain = (ll_prev - ll) / (ll_max - ll_prev)
ll_gn.append(ll)
printer.print_crit(crit, reg, sumrls, gain)
if gain < opt.optim.tolerance_gn:
print('GN converged: ', ll_prev.item(), '->', ll.item())
break
# --- Update RLS weights ---
if opt.penalty.norm in ('tv', 'jtv'):
del multi_rls
rls = _nonlin_rls(maps, lam, opt.penalty.norm)
sumrls = (0.5 * vol) * rls.sum(dtype=torch.double)
eps = core.constants.eps(rls.dtype)
rls = rls.clamp_min_(eps).reciprocal_()
# --- Compute gain ---
# (we are late by one full RLS iteration when computing the
# gain but we save some computations)
ll = ll_gn[-1]
ll_prev = ll_rls[-1][-1] if ll_rls else ll_max
ll_rls.append(ll_gn)
gain = (ll_prev - ll) / (ll_max - ll_prev)
if abs(gain) < opt.optim.tolerance_rls:
print(f'RLS converged ({gain:7.2g})')
break
del grad
if opt.uncertainty:
multi_rls = rls if opt.penalty.norm == 'tv' else [rls] * len(maps)
uncertainty = _nonlin_uncertainty(hess, multi_rls, lam * vol, vx, opt)
maps.pd.uncertainty = uncertainty[0]
maps.r1.uncertainty = uncertainty[1]
maps.r2s.uncertainty = uncertainty[2]
if hasattr(maps, 'mt'):
maps.mt.uncertainty = uncertainty[3]
# --- Prepare output ---
return postproc(maps) | 33,987 |
def _add_aliases_to_namespace(namespace, *exprs):
"""
Given a sequence of sympy expressions,
find all aliases in each expression and add them to the namespace.
"""
for expr in exprs:
if hasattr(expr, 'alias') and isinstance(expr, sympy.FunctionClass):
if namespace.has_key(str(expr)):
if namespace[str(expr)] != expr.alias:
warnings.warn('two aliases with the same name were found')
namespace[str(expr)] = expr.alias
if hasattr(expr, 'func'):
if isinstance(expr.func, sympy.FunctionClass) and hasattr(expr.func, 'alias'):
if namespace.has_key(expr.func.__name__):
if namespace[expr.func.__name__] != expr.func.alias:
warnings.warn('two aliases with the same name were found')
namespace[expr.func.__name__] = expr.func.alias
if hasattr(expr, 'args'):
try:
_add_aliases_to_namespace(namespace, *expr.args)
except TypeError:
pass
return namespace | 33,988 |
def rotICA(V, kmax=6, learnrate=.0001, iterations=10000):
""" ICA rotation (using basicICA) with default parameters and normalization of
outputs.
:Example:
>>> Vica, W = rotICA(V, kmax=6, learnrate=.0001, iterations=10000)
"""
V1 = V[:, :kmax].T
[W, changes_s] = basicICA(V1, learnrate, iterations)
Vica = (W.dot(V1)).T
for n in range(kmax):
imax = abs(Vica[:, n]).argmax()
Vica[:, n] = np.sign(Vica[imax, n]) * Vica[:, n] / np.linalg.norm(
Vica[:, n])
return Vica, W | 33,989 |
def getipbyhost(hostname):
""" return the IP address for a hostname
"""
return socket.gethostbyname(hostname) | 33,990 |
def reduce_mem_usage(df) -> pd.DataFrame:
"""DataFrameのメモリ使用量を節約するための関数.
Arguments:
df {DataFrame} -- 対象のDataFrame
Returns:
[DataFrame] -- メモリ節約後のDataFrame
"""
numerics = [
'int8', 'int16', 'int32', 'int64', 'float16', 'float32', 'float64'
]
start_mem = df.memory_usage(deep=True).sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type not in numerics:
continue
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
np_int_type_list = [np.int8, np.int16, np.int32, np.int64]
for np_int_type in np_int_type_list:
if c_min > np.iinfo(np_int_type).min and c_max < np.iinfo(
np_int_type).max:
df[col] = df[col].astype(np_int_type)
else:
np_float_type_list = [np.float16, np.float32, np.float64]
for np_float_type in np_float_type_list:
if c_min > np.finfo(np_float_type).min and c_max < np.finfo(
np_float_type).max:
df[col] = df[col].astype(np_float_type)
end_mem = df.memory_usage(deep=True).sum() / 1024**2
if (start_mem - end_mem) > 0:
print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(
end_mem, 100 * (start_mem - end_mem) / start_mem))
return df | 33,991 |
def poke_jenkins_hook(ui, repo, node, **kwargs):
"""Filter out the incoming heads and start a Jenkins job for them.
:param ui: Mercurial ui object
:param repo: Mercurial repository object
:param node: Mercurial node object (eg commit)
"""
jenkins_base_url = ui.config('poke_jenkins', 'jenkins_base_url', default=None, untrusted=False)
if not jenkins_base_url:
raise util.Abort(
'You have to specify the parameter jenkins_base_url '
'in the section poke_jenkins.'
)
timeout = int(ui.config('poke_jenkins', 'timeout', default=10, untrusted=False))
repo_url = ui.config('poke_jenkins', 'repo_url', default=None, untrusted=False)
if not repo_url:
raise util.Abort(
'You have to specify the parameter repo_url '
'in the section poke_jenkins.'
)
jobs = ui.configlist('poke_jenkins', 'jobs', default=[], untrusted=False)
tag = ui.config('poke_jenkins', 'tag', default='', untrusted=False)
username = ui.config('poke_jenkins', 'username', default='', untrusted=False)
password = ui.config('poke_jenkins', 'password', default='', untrusted=False)
branch_regex = ui.config('poke_jenkins', 'branch_regex', default=None, untrusted=False)
if branch_regex:
branch_regex = re.compile(branch_regex)
branches = {}
# Collect the incoming heads that don't have any children.
for rev in xrange(repo[node].rev(), len(repo)):
ctx = repo[rev]
branch = ctx.branch()
if not any(ctx.children()):
branches[branch] = ctx.hex()
if username and password:
headers = {
'Authorization':
'Basic {0}'.format('{0}:{1}'.format(username, password).encode('base64').replace('\n', ''))
}
else:
headers = {}
# For every head start a Jenkins job.
for branch, rev in sorted(branches.items()):
if branch_regex is None or branch_regex.match(branch):
for job in jobs:
base = urlparse.urljoin(jenkins_base_url, BUILD_URL.format(job=job))
args = urlencode([('TAG', tag), ('NODE_ID', rev), ('REPO_URL', repo_url), ('BRANCH', branch)])
url = '?'.join([base, args])
request = urllib2.Request(url, '', headers)
with closing(urllib2.urlopen(request, timeout=timeout)) as f:
ui.write('Starting the job {job} for the branch: {branch}, revision: {rev}\n'.format(
job=job, branch=branch, rev=rev))
f.read() | 33,992 |
def unquote_specific_tokens(tokens: List[str], tokens_to_unquote: List[str]) -> None:
"""
Unquote specific tokens in a list
:param tokens: token list being edited
:param tokens_to_unquote: the tokens, which if present in tokens, to unquote
"""
for i, token in enumerate(tokens):
unquoted_token = strip_quotes(token)
if unquoted_token in tokens_to_unquote:
tokens[i] = unquoted_token | 33,993 |
def cfg_from_file(filename):
"""
Load a config file and merge it into the default options.
"""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, root) | 33,994 |
def findparam(
parameters: _TYPE_FINDITER_PARAMETERS,
selector: _TYPE_FINDITER_SELECTOR
) -> typing.Iterator[_T_PARAM]:
"""
Return an iterator yielding those parameters (of type
:class:`inspect.Parameter` or :class:`~forge.FParameter`) that are
mached by the selector.
:paramref:`~forge.findparam.selector` is used differently based on what is
supplied:
- str: a parameter is found if its :attr:`name` attribute is contained
- Iterable[str]: a parameter is found if its :attr:`name` attribute is
contained
- callable: a parameter is found if the callable (which receives the
parameter), returns a truthy value.
:param parameters: an iterable of :class:`inspect.Parameter` or
:class:`~forge.FParameter`
:param selector: an identifier which is used to determine whether a
parameter matches.
:returns: an iterator yield parameters
"""
if isinstance(selector, str):
return filter(lambda param: param.name == selector, parameters)
elif isinstance(selector, typing.Iterable):
selector = list(selector)
return filter(
lambda param: param.name in selector, # type: ignore
parameters,
)
return filter(selector, parameters) | 33,995 |
def test_extract_position_null_case(null_input):
"""Reads in a log that's None"""
# NoneType Input
assert e4.extract_position(null_input) == None | 33,996 |
def reset_syspath():
"""
Return a function to remove given path from sys.path.
This is to use at the end (after all assertions) of test which
use ``setup_project.setup_project`` to add base directory to sys.path
and avoid clash with next tests doing the same.
"""
def reset_func(path):
if path in sys.path:
del sys.path[sys.path.index(path)]
return reset_func | 33,997 |
def free_display():
"""Stop virtual display (if it is up)"""
from .. import config
config.stop_display() | 33,998 |
def FSA(profile_exp, profile_sm, diffsys, time, Xlim=[], n=[400, 500], w=None, f=None, alpha=0.3, name=''):
"""
Forward Simulation Analysis
Extract diffusion coefficients based on a diffusion profile.
Please do not close any plot window during the FSA process.
This is the final step of FSA.
Parameters
----------
profile_exp : DiffProfile
Experimental diffusion profile, used for comparison with simulation
results.
profile_sm : DiffProfile
Diffusion profile after data smooth on experimental profile.
diffsys : DiffSystem
Diffusion coefficients
time : float
Diffusion time in seconds
Xlim : list (float), optional
Passed to 'pydiffusion.Dtools.SF', 'pydiffusion.utils.step'.
Indicates the left and right concentration limits for calculation.
Default value = [profile.X[0], profile.X[-1]].
n : list. optional
Passed to 'pydiffusion.utils.automesh'.
Meshing number range, default = [400, 500].
w : list, optional
Weights of each phase to calculate error.
Passed to 'pydiffusion.utils.error_profile'.
f : function of Meshing
Keyword argument of automesh()
alpha : float
Keyword argument of automesh()
name : str, optional
Name the output DiffProfile
Returns
-------
profile_sim : DiffProfile
Simulated diffusion profile after FSA.
diffsys_sim : DiffSystem
Calculated diffusion efficients by FSA.
Examples
--------
After datasmooth() and Dmodel(), FSA can be performed to calculate accurate diffusion coefficients:
>>> ds = datasmooth(exp)
>>> dsys = Dmodel(ds, time)
>>> fsa = FSA(exp, ds, dsys, time)
"""
# Create step profile on meshed grids
dism = automesh(profile=profile_sm, diffsys=diffsys, n=n, f=f, alpha=alpha)
matano = matanocalc(profile_sm, Xlim)
if Xlim == [] and profile_sm.X[-1] < profile_sm.X[0]:
profile_init = step(dism, matano, diffsys, [diffsys.Xr[-1, 1], diffsys.Xr[0, 0]])
else:
profile_init = step(dism, matano, diffsys, Xlim)
# Determine the stop criteria of forward simulations
error_sm = error_profile(profile_sm, profile_exp)
ipt = input('Default error = %.6f\nInput the stop criteria of error: [%.6f]\n'
% (error_sm, error_sm*2))
error_stop = error_sm*2 if ipt == '' else float(ipt)
# If there is no Xspl info in diffsys, use Phase Mode
# else: ask if use Phase or Point Mode
if diffsys.Xspl is not None:
ipt = input('Use Phase Mode? [n]\n(The shape of diffusivity curve does not change)\n')
pp = False if 'y' in ipt or 'Y' in ipt else True
else:
pp = False
if name == '':
name = profile_exp.name+'_FSA'
# Diffusion coefficients used for forward simulations
diffsys_sim = DiffSystem(diffsys.Xr, diffsys.Dfunc, Xspl=diffsys.Xspl, name=name)
# Plot FSA status
fig = plt.figure('FSA', figsize=(16, 6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
profileplot(profile_exp, ax1, ls='none', marker='o', c='b', fillstyle='none')
profileplot(profile_sm, ax1, ls='-', c='g', lw=1)
SFplot(profile_sm, time, Xlim, ax2, ls='none', c='b', marker='.')
DCplot(diffsys_sim, ax2, ls='-', c='r', lw=2)
plt.draw()
plt.tight_layout()
plt.pause(0.1)
n_sim = 0
while True:
# Simulation
n_sim += 1
profile_sim = mphSim(profile_init, diffsys_sim, time, name=name)
error_sim = error_profile(profile_sim, profile_exp, w)
print('Simulation %i, error = %f(%f)' % (n_sim, error_sim, error_stop))
# Plot simulation results
ax1.cla()
ax2.cla()
profileplot(profile_exp, ax1, ls='none', marker='o', c='b', fillstyle='none')
profileplot(profile_sm, ax1, ls='-', c='g', lw=1)
profileplot(profile_sim, ax1, ls='-', c='r', lw=2)
SFplot(profile_sm, time, Xlim, ax2, ls='none', c='b', marker='.')
DCplot(diffsys_sim, ax2, ls='-', c='r', lw=2)
plt.draw()
plt.tight_layout()
# DC adjust
Dfunc_adjust = [0] * diffsys_sim.Np
# If error > stop criteria, continue simulation by auto DC adjustment
if error_sim > error_stop:
for ph in range(diffsys_sim.Np):
try:
Dfunc_adjust[ph] = Dadjust(profile_sm, profile_sim, diffsys_sim, ph, pp)
except (ValueError, TypeError) as error:
ita_finish()
raise error
diffsys_sim.Dfunc = Dfunc_adjust
# If error < stop criteria or simulate too many times
if error_sim <= error_stop or n_sim > 9:
ita_start()
# Ask if exit
ipt = ask_input('Satisfied with FSA? [n]')
if 'y' in ipt or 'Y' in ipt:
ita_finish()
break
# If use Point Mode
if diffsys_sim.Xspl is not None:
ipt = ask_input('Use Point Mode (y) or Phase Mode (n)? [y]')
pp = False if 'n' in ipt or 'N' in ipt else True
if pp:
for ph in range(diffsys_sim.Np):
try:
Dfunc_adjust[ph] = Dadjust(profile_sm, profile_sim, diffsys_sim, ph, pp)
except (ValueError, TypeError) as error:
ita_finish()
raise error
diffsys_sim.Dfunc = Dfunc_adjust
DCplot(diffsys_sim, ax2, ls='-', c='m', lw=2)
plt.draw()
plt.pause(0.1)
ita_finish()
continue
# Phase Mode, ask if use manual input for each phase
pp = False
ipt = input('Phase Mode\nManually input for each phase? [n]')
manual = True if 'y' in ipt or 'Y' in ipt else False
for ph in range(diffsys_sim.Np):
if manual:
ipt = input('Input deltaD for phase # %i:\n(DC = DC * 10^deltaD, default deltaD = auto)\n' % (ph+1))
deltaD = float(ipt) if ipt != '' else None
else:
deltaD = None
try:
Dfunc_adjust[ph] = Dadjust(profile_sm, profile_sim, diffsys_sim, ph, pp, deltaD)
except (ValueError, TypeError) as error:
ita_finish()
raise error
# Apply the adjustment to diffsys_sim
diffsys_sim.Dfunc = Dfunc_adjust
DCplot(diffsys_sim, ax2, ls='-', c='m', lw=2)
plt.draw()
plt.pause(0.1)
ita_finish()
return profile_sim, diffsys_sim | 33,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.