content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def test_empty_x_field():
"""
Expect the same response is if x-fields header is not used
"""
r = _get_with_additional_headers({X_FIELDS: ''})
assert r['total']['value'] == NUMBER_OF_ADS
assert len(r) == 7
assert len(r['hits']) == 10
assert len(r['hits'][0]) == 35 | 26,300 |
def test_md025_bad_front_matter_title_top_level_atx():
"""
Test to make sure we get the expected behavior after scanning a good file from the
test/resources/rules/md004 directory that has consistent asterisk usage on a single
level list.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"extensions.front-matter.enabled=$!True",
"scan",
"test/resources/rules/md025/bad_front_matter_title_top_level_atx.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md025/bad_front_matter_title_top_level_atx.md:7:1: "
+ "MD025: Multiple top level headings in the same document (single-title,single-h1)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
) | 26,301 |
def main():
"""Go Main Go"""
for i in range(1, 15):
runner(i) | 26,302 |
def getCaffemodelFromSolverstate(solverstate):
""" Parse the filename of the caffemodel file from the solverstate file.
"""
from backend.caffe.path_loader import PathLoader
proto = PathLoader().importProto()
try:
state = proto.SolverState()
with open(solverstate, 'rb') as f:
state.ParseFromString(f.read())
return state.learned_net
except Exception as e:
print(str(e)) | 26,303 |
def get_L_from_D(line):
"""
Assume line contains one or more <Dn>
Return list of all n
"""
a = []
for m in re.finditer(r'<D([0-9]+)>',line):
a.append(m.group(1))
return a | 26,304 |
def test_base_url_error():
"""Test the base URL setting on init."""
url = utils.random_lower_string()
with pytest.raises(ValidationError):
_ = DriPostal(url) | 26,305 |
def per_pixel_mean_stddev(dataset, image_size):
"""
Compute the mean of each pixel over the entire dataset.
"""
#NOTE: Replace "3" by the number of channels
initial_state = tf.constant(0., dtype=tf.float32, shape=[image_size, image_size, 3])
dataset = dataset.map(lambda x: resize(x, image_size))
count = dataset.reduce(0, lambda x, _: x+1)
pixel_sum = dataset.reduce(initial_state, lambda x, y: tf.add(x, y))
pixel_mean = tf.divide(pixel_sum, tf.to_float(count))
return pixel_mean, count | 26,306 |
def get_indexes_from_list(lst, find, exact=True):
"""
Helper function that search for element in a list and
returns a list of indexes for element match
E.g.
get_indexes_from_list([1,2,3,1,5,1], 1) returns [0,3,5]
get_indexes_from_list(['apple','banana','orange','lemon'], 'orange') -> returns [2]
get_indexes_from_list(['apple','banana','lemon',['orange', 'peach']], 'orange') -> returns []
get_indexes_from_list(['apple','banana','lemon',['orange', 'peach']], ['orange'], False) -> returns [3]
Parameters
----------
lst: list
The list to look in
find: any
the element to find, can be a list
exact: bool
If False then index are returned if find in lst-item otherwise
only if find = lst-item
Returns
-------
list of int
"""
if exact == True:
return [index for index, value in enumerate(lst) if value == find]
else:
if isinstance(find,list):
return [index for index, value in enumerate(lst) if set(find).intersection(set(value))]
else:
return [index for index, value in enumerate(lst) if find in value] | 26,307 |
def simplify_polygon(polygon, tolerance=0.01):
"""Remove doubles coords from a polygon."""
assert isinstance(polygon, Polygon) or isinstance(polygon, MultiPolygon)
# Get the coordinates
coords = []
if isinstance(polygon, Polygon):
coords = polygon.exterior.coords
elif isinstance(polygon, MultiPolygon):
for geom in polygon.geoms:
coords += geom.exterior.coords
else:
return None
# remove the doubled coordinates
newCoords = []
v0 = Vector2D(float('inf'), float('inf'))
for coord in coords:
v = Vector2D(coord[0], coord[1])
if (v0 - v).norm() > tolerance:
v0 = v
newCoords += [[coord[0], coord[1]]]
return Polygon(newCoords) | 26,308 |
def plot_wordcloud(text):
"""
Draw wordcloud with matplotlib
:param text:
:return:
"""
wordcloud = WordCloud(max_font_size=50, max_words=100,
background_color="white",
relative_scaling=1.0,
stopwords=stop_words_es).generate(
text)
plt.imshow(wordcloud)
plt.imshow(wordcloud, interpolation="bilinear")
# bilinear is to make the displayed image appear more smoothly
plt.axis("off")
plt.show() | 26,309 |
def test_get_all_namespace_when_no_descriptors():
"""test_get_all_namespace_when_no_descriptors."""
descriptors = dict()
res = get_all_namespaces(descriptors)
assert len(res) == 0 | 26,310 |
def candplot(canddatalist, snrs=None, outname=''):
""" Takes output of search_thresh (CandData objects) to make
candidate plots.
Expects pipeline state, candidate location, image, and
phased, dedispersed data (cut out in time, dual-pol).
snrs is array for an (optional) SNR histogram plot.
Written by Bridget Andersen and modified by Casey for rfpipe.
"""
if not isinstance(canddatalist, list):
logger.debug('Wrapping solo CandData object')
canddatalist = [canddatalist]
logger.info('Making {0} candidate plots.'.format(len(canddatalist)))
for i in range(len(canddatalist)):
canddata = canddatalist[i]
st = canddata.state
candloc = canddata.loc
im = canddata.image
data = canddata.data
scan = st.metadata.scan
segment, candint, dmind, dtind, beamnum = candloc
# calc source location
# imstd = util.madtostd(im) # outlier resistant
imstd = im.std() # consistent with rfgpu
snrim = im.max()/imstd
l1, m1 = st.pixtolm(np.where(im == im.max()))
logger.info('Plotting candloc {0} with SNR {1:.1f} and image/data shapes: {2}/{3}'
.format(str(candloc), snrim, str(im.shape), str(data.shape)))
# either standard radec or otf phasecenter radec
pc = st.get_pc(segment)
pt_ra, pt_dec = st.get_radec(pc=pc)
src_ra, src_dec = source_location(pt_ra, pt_dec, l1, m1, format='hourstr')
logger.info('Peak (RA, Dec): ({0}, {1})'.format(src_ra, src_dec))
# convert l1 and m1 from radians to arcminutes
l1arcm = np.degrees(l1)*60
m1arcm = np.degrees(m1)*60
# build overall plot
fig = plt.Figure(figsize=(12.75, 8))
# add metadata in subfigure
ax = fig.add_subplot(2, 3, 1, facecolor='white')
# calculate the overall dispersion delay: dd
f1 = st.metadata.freq_orig[0]
f2 = st.metadata.freq_orig[-1]
dd = 4.15*st.dmarr[dmind]*(f1**(-2)-f2**(-2))
# add annotating info
# set spacing and location of the annotating information
start = 1.1
space = 0.07
left = 0.0
ax.text(left, start, st.fileroot, fontname='sans-serif',
transform=ax.transAxes, fontsize='small')
ax.text(left, start-space, 'Peak (arcmin): ('
+ str(np.round(l1arcm, 3)) + ', '
+ str(np.round(m1arcm, 3)) + ')',
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
# split the RA and Dec and display in a nice format
ax.text(left, start-2*space, 'Peak (RA, Dec): (' + src_ra + ', ' + src_dec + ')',
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-3*space, 'Source: ' + str(st.metadata.source),
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-4*space, 'scan: ' + str(scan),
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-5*space, 'segment: ' + str(segment),
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-6*space, 'integration: ' + str(candint),
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-7*space, 'DM = ' + str(st.dmarr[dmind])
+ ' (index ' + str(dmind) + ')',
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-8*space, 'dt = '
+ str(np.round(st.inttime*st.dtarr[dtind], 3)*1e3)
+ ' ms' + ' (index ' + str(dtind) + ')',
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-9*space, 'disp delay = ' + str(np.round(dd, 1))
+ ' ms',
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
defstr = 'SNR (im'
snrstr = str(np.round(snrim, 1))
if canddata.snrk is not None:
defstr += '/k): '
snrstr += '/' + str(np.round(canddata.snrk, 1))
else:
defstr += '): '
ax.text(left, start-10*space, defstr+snrstr,
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
if canddata.cluster is not None:
label, size = canddata.cluster, canddata.clustersize
ax.text(left, start-11*space, 'Cluster label: {0}'.format(str(label)),
fontname='sans-serif',
transform=ax.transAxes, fontsize='small')
ax.text(left, start-12*space, 'Cluster size: {0}'.format(size),
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
# set the plot invisible so that it doesn't interfere with annotations
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['right'].set_color('white')
ax.spines['left'].set_color('white')
# plot full dynamic spectra
left, width = 0.75, 0.2*2./3.
bottom, height = 0.2, 0.7
# three rectangles for each panel of the spectrum (RR, RR+LL, LL)
rect_dynsp1 = [left, bottom, width/3., height]
rect_dynsp2 = [left+width/3., bottom, width/3., height]
rect_dynsp3 = [left+2.*width/3., bottom, width/3., height]
rect_lc1 = [left, bottom-0.1, width/3., 0.1]
rect_lc2 = [left+width/3., bottom-0.1, width/3., 0.1]
rect_lc3 = [left+2.*width/3., bottom-0.1, width/3., 0.1]
rect_sp = [left+width, bottom, 0.1*2./3., height]
ax_dynsp1 = fig.add_axes(rect_dynsp1)
# sharey so that axes line up
ax_dynsp2 = fig.add_axes(rect_dynsp2, sharey=ax_dynsp1)
ax_dynsp3 = fig.add_axes(rect_dynsp3, sharey=ax_dynsp1)
# hide RR+LL and LL dynamic spectra y labels to avoid overlap
[label.set_visible(False) for label in ax_dynsp2.get_yticklabels()]
[label.set_visible(False) for label in ax_dynsp3.get_yticklabels()]
ax_sp = fig.add_axes(rect_sp, sharey=ax_dynsp3)
[label.set_visible(False) for label in ax_sp.get_yticklabels()]
ax_lc1 = fig.add_axes(rect_lc1)
ax_lc2 = fig.add_axes(rect_lc2, sharey=ax_lc1)
ax_lc3 = fig.add_axes(rect_lc3, sharey=ax_lc1)
[label.set_visible(False) for label in ax_lc2.get_yticklabels()]
[label.set_visible(False) for label in ax_lc3.get_yticklabels()]
# now actually plot the data
spectra = np.swapaxes(data.real, 0, 1)
dd1 = spectra[..., 0]
dd2 = spectra[..., 0] + spectra[..., 1]
dd3 = spectra[..., 1]
colormap = 'viridis'
logger.debug('{0}'.format(dd1.shape))
logger.debug('{0}'.format(dd2.shape))
logger.debug('{0}'.format(dd3.shape))
_ = ax_dynsp1.imshow(dd1, origin='lower', interpolation='nearest',
aspect='auto', cmap=plt.get_cmap(colormap))
_ = ax_dynsp2.imshow(dd2, origin='lower', interpolation='nearest',
aspect='auto', cmap=plt.get_cmap(colormap))
_ = ax_dynsp3.imshow(dd3, origin='lower', interpolation='nearest',
aspect='auto', cmap=plt.get_cmap(colormap))
ax_dynsp1.set_yticks(list(range(0, len(st.freq), 30)))
ax_dynsp1.set_yticklabels(st.freq[::30].round(3))
ax_dynsp1.set_ylabel('Freq (GHz)')
ax_dynsp1.set_xlabel('RR')
ax_dynsp1.xaxis.set_label_position('top')
ax_dynsp2.set_xlabel('RR+LL')
ax_dynsp2.xaxis.set_label_position('top')
ax_dynsp3.set_xlabel('LL')
ax_dynsp3.xaxis.set_label_position('top')
# hide xlabels invisible so that they don't interefere with lc plots
[label.set_visible(False) for label in ax_dynsp1.get_xticklabels()]
# This one y label was getting in the way
ax_dynsp1.get_yticklabels()[0].set_visible(False)
# plot stokes I spectrum of the candidate pulse (assume middle bin)
# select stokes I middle bin
spectrum = spectra[:, canddata.integration_rel].mean(axis=1)
ax_sp.plot(spectrum, list(range(len(spectrum))), 'k.')
# plot 0 Jy dotted line
ax_sp.plot(np.zeros(len(spectrum)), list(range(len(spectrum))), 'r:')
xmin, xmax = ax_sp.get_xlim()
ax_sp.set_xticks(np.linspace(xmin, xmax, 3).round(2))
ax_sp.set_xlabel('Flux (Jy)')
# plot mean flux values for each time bin
lc1 = dd1.mean(axis=0)
lc2 = dd2.mean(axis=0)
lc3 = dd3.mean(axis=0)
lenlc = len(data)
ax_lc1.plot(list(range(0, lenlc)), list(lc1)[:lenlc], 'k.')
ax_lc2.plot(list(range(0, lenlc)), list(lc2)[:lenlc], 'k.')
ax_lc3.plot(list(range(0, lenlc)), list(lc3)[:lenlc], 'k.')
# plot 0 Jy dotted line for each plot
ax_lc1.plot(list(range(0, lenlc)), list(np.zeros(lenlc)), 'r:')
ax_lc2.plot(list(range(0, lenlc)), list(np.zeros(lenlc)), 'r:')
ax_lc3.plot(list(range(0, lenlc)), list(np.zeros(lenlc)), 'r:')
ax_lc2.set_xlabel('Integration (rel)')
ax_lc1.set_ylabel('Flux (Jy)')
ax_lc1.set_xticks([0, 0.5*lenlc, lenlc])
# only show the '0' label for one of the plots to avoid messy overlap
ax_lc1.set_xticklabels(['0', str(lenlc//2), str(lenlc)])
ax_lc2.set_xticks([0, 0.5*lenlc, lenlc])
ax_lc2.set_xticklabels(['', str(lenlc//2), str(lenlc)])
ax_lc3.set_xticks([0, 0.5*lenlc, lenlc])
ax_lc3.set_xticklabels(['', str(lenlc//2), str(lenlc)])
ymin, ymax = ax_lc1.get_ylim()
ax_lc1.set_yticks(np.linspace(ymin, ymax, 3).round(2))
# adjust the x tick marks to line up with the lc plots
ax_dynsp1.set_xticks([0, 0.5*lenlc, lenlc])
ax_dynsp2.set_xticks([0, 0.5*lenlc, lenlc])
ax_dynsp3.set_xticks([0, 0.5*lenlc, lenlc])
# plot second set of dynamic spectra
left, width = 0.45, 0.1333
bottom, height = 0.1, 0.4
rect_dynsp1 = [left, bottom, width/3., height]
rect_dynsp2 = [left+width/3., bottom, width/3., height]
rect_dynsp3 = [left+2.*width/3., bottom, width/3., height]
rect_sp = [left+width, bottom, 0.1*2./3., height]
ax_dynsp1 = fig.add_axes(rect_dynsp1)
ax_dynsp2 = fig.add_axes(rect_dynsp2, sharey=ax_dynsp1)
ax_dynsp3 = fig.add_axes(rect_dynsp3, sharey=ax_dynsp1)
# hide RR+LL and LL dynamic spectra y labels
[label.set_visible(False) for label in ax_dynsp2.get_yticklabels()]
[label.set_visible(False) for label in ax_dynsp3.get_yticklabels()]
ax_sp = fig.add_axes(rect_sp, sharey=ax_dynsp3)
[label.set_visible(False) for label in ax_sp.get_yticklabels()]
# calculate the channels to average together for SNR=2
n = int((2.*(len(spectra))**0.5/snrim)**2)
if n == 0: # if n==0 then don't average
dd1avg = dd1
dd3avg = dd3
else:
# otherwise, add zeros onto the data so that it's length is cleanly
# divisible by n (makes it easier to average over)
dd1zerotemp = np.concatenate((np.zeros((n-len(spectra) % n,
len(spectra[0])),
dtype=dd1.dtype), dd1), axis=0)
dd3zerotemp = np.concatenate((np.zeros((n-len(spectra) % n,
len(spectra[0])),
dtype=dd3.dtype), dd3), axis=0)
# make masked arrays so appended zeros do not affect average
zeros = np.zeros((len(dd1), len(dd1[0])))
ones = np.ones((n-len(spectra) % n, len(dd1[0])))
masktemp = np.concatenate((ones, zeros), axis=0)
dd1zero = np.ma.masked_array(dd1zerotemp, mask=masktemp)
dd3zero = np.ma.masked_array(dd3zerotemp, mask=masktemp)
# average together the data
dd1avg = np.array([], dtype=dd1.dtype)
for i in range(len(spectra[0])):
temp = dd1zero[:, i].reshape(-1, n)
tempavg = np.reshape(np.mean(temp, axis=1), (len(temp), 1))
# repeats the mean values to create more pixels
# (easier to properly crop when it is finally displayed)
temprep = np.repeat(tempavg, n, axis=0)
if i == 0:
dd1avg = temprep
else:
dd1avg = np.concatenate((dd1avg, temprep), axis=1)
dd3avg = np.array([], dtype=dd3.dtype)
for i in range(len(spectra[0])):
temp = dd3zero[:, i].reshape(-1, n)
tempavg = np.reshape(np.mean(temp, axis=1), (len(temp), 1))
temprep = np.repeat(tempavg, n, axis=0)
if i == 0:
dd3avg = temprep
else:
dd3avg = np.concatenate((dd3avg, temprep), axis=1)
dd2avg = dd1avg + dd3avg # add together to get averaged RR+LL spectrum
colormap = 'viridis'
# if n==0 then don't crop the spectra because no zeroes were appended
if n == 0:
dd1avgcrop = dd1avg
dd2avgcrop = dd2avg
dd3avgcrop = dd3avg
else: # otherwise, crop off the appended zeroes
dd1avgcrop = dd1avg[len(ones):len(dd1avg), :]
dd2avgcrop = dd2avg[len(ones):len(dd2avg), :]
dd3avgcrop = dd3avg[len(ones):len(dd3avg), :]
logger.debug('{0}'.format(dd1avgcrop.shape))
logger.debug('{0}'.format(dd2avgcrop.shape))
logger.debug('{0}'.format(dd3avgcrop.shape))
_ = ax_dynsp1.imshow(dd1avgcrop, origin='lower',
interpolation='nearest', aspect='auto',
cmap=plt.get_cmap(colormap))
_ = ax_dynsp2.imshow(dd2avgcrop, origin='lower',
interpolation='nearest', aspect='auto',
cmap=plt.get_cmap(colormap))
_ = ax_dynsp3.imshow(dd3avgcrop, origin='lower',
interpolation='nearest', aspect='auto',
cmap=plt.get_cmap(colormap))
spw_reffreq = np.sort(st.metadata.spw_reffreq)
# TODO: need to find best chan for label even for overlapping spw
spw_chans = [np.abs(reffreq/1e9-st.freq).argmin() for reffreq in spw_reffreq]
ax_dynsp1.set_yticks(spw_chans)
ax_dynsp1.set_yticklabels((spw_reffreq/1e9).round(3))
ax_dynsp1.set_ylabel('Freq of SPW (GHz)')
ax_dynsp1.set_xlabel('RR')
ax_dynsp1.xaxis.set_label_position('top')
ax_dynsp2.set_xlabel('Integration (rel)')
ax2 = ax_dynsp2.twiny()
ax2.set_xlabel('RR+LL')
[label.set_visible(False) for label in ax2.get_xticklabels()]
ax_dynsp3.set_xlabel('LL')
ax_dynsp3.xaxis.set_label_position('top')
# plot stokes I spectrum of the candidate pulse from middle integration
ax_sp.plot(dd2avgcrop[:, canddata.integration_rel]/2.,
list(range(len(dd2avgcrop))), 'k.')
ax_sp.plot(np.zeros(len(dd2avgcrop)), list(range(len(dd2avgcrop))),
'r:')
xmin, xmax = ax_sp.get_xlim()
ax_sp.set_xticks(np.linspace(xmin, xmax, 3).round(2))
ax_sp.get_xticklabels()[0].set_visible(False)
ax_sp.set_xlabel('Flux (Jy)')
# readjust the x tick marks on the dynamic spectra
ax_dynsp1.set_xticks([0, 0.5*lenlc, lenlc])
ax_dynsp1.set_xticklabels(['0', str(lenlc//2), str(lenlc)])
ax_dynsp2.set_xticks([0, 0.5*lenlc, lenlc])
ax_dynsp2.set_xticklabels(['', str(lenlc//2), str(lenlc)])
ax_dynsp3.set_xticks([0, 0.5*lenlc, lenlc])
ax_dynsp3.set_xticklabels(['', str(lenlc//2), str(lenlc)])
# plot the image and zoomed cutout
ax = fig.add_subplot(2, 3, 4)
fov = np.degrees(1./st.uvres)*60.
_ = ax.imshow(im.transpose(), aspect='equal', origin='upper',
interpolation='nearest',
extent=[fov/2, -fov/2, -fov/2, fov/2],
cmap=plt.get_cmap('viridis'), vmin=0,
vmax=0.5*im.max())
ax.set_xlabel('RA Offset (arcmin)')
ax.set_ylabel('Dec Offset (arcmin)')
# to set scale when we plot the triangles that label the location
ax.autoscale(False)
# add markers on the axes at measured position of the candidate
ax.scatter(x=[l1arcm], y=[-fov/2], c='#ffff00', s=60, marker='^',
clip_on=False)
ax.scatter(x=[fov/2], y=[m1arcm], c='#ffff00', s=60, marker='>',
clip_on=False)
# makes it so the axis does not intersect the location triangles
ax.set_frame_on(False)
# add a zoomed cutout image of the candidate (set width at 5*beam)
sbeam = np.mean(st.beamsize_deg)*60
# figure out the location to center the zoomed image on
xratio = len(im[0])/fov # pix/arcmin
yratio = len(im)/fov # pix/arcmin
mult = 5 # sets how many times the synthesized beam the zoomed FOV is
xmin = max(0, int(len(im[0])//2-(m1arcm+sbeam*mult)*xratio))
xmax = int(len(im[0])//2-(m1arcm-sbeam*mult)*xratio)
ymin = max(0, int(len(im)//2-(l1arcm+sbeam*mult)*yratio))
ymax = int(len(im)//2-(l1arcm-sbeam*mult)*yratio)
left, width = 0.231, 0.15
bottom, height = 0.465, 0.15
rect_imcrop = [left, bottom, width, height]
ax_imcrop = fig.add_axes(rect_imcrop)
logger.debug('{0}'.format(im.transpose()[xmin:xmax, ymin:ymax].shape))
logger.debug('{0} {1} {2} {3}'.format(xmin, xmax, ymin, ymax))
_ = ax_imcrop.imshow(im.transpose()[xmin:xmax,ymin:ymax], aspect=1,
origin='upper', interpolation='nearest',
extent=[-1, 1, -1, 1],
cmap=plt.get_cmap('viridis'), vmin=0,
vmax=0.5*im.max())
# setup the axes
ax_imcrop.set_ylabel('Dec (arcmin)')
ax_imcrop.set_xlabel('RA (arcmin)')
ax_imcrop.xaxis.set_label_position('top')
ax_imcrop.xaxis.tick_top()
xlabels = [str(np.round(l1arcm+sbeam*mult/2, 1)), '',
str(np.round(l1arcm, 1)), '',
str(np.round(l1arcm-sbeam*mult/2, 1))]
ylabels = [str(np.round(m1arcm-sbeam*mult/2, 1)), '',
str(np.round(m1arcm, 1)), '',
str(np.round(m1arcm+sbeam*mult/2, 1))]
ax_imcrop.set_xticklabels(xlabels)
ax_imcrop.set_yticklabels(ylabels)
# change axis label loc of inset to avoid the full picture
ax_imcrop.get_yticklabels()[0].set_verticalalignment('bottom')
# create SNR versus N histogram for the whole observation
# (properties for each candidate in the observation given by prop)
if snrs is not None:
left, width = 0.45, 0.2
bottom, height = 0.6, 0.3
rect_snr = [left, bottom, width, height]
ax_snr = fig.add_axes(rect_snr)
pos_snrs = snrs[snrs >= 0]
neg_snrs = snrs[snrs < 0]
if not len(neg_snrs): # if working with subset and only pos snrs
neg_snrs = pos_snrs
nonegs = True
else:
nonegs = False
minval = 5.5
maxval = 8.0
# determine the min and max values of the x axis
if min(pos_snrs) < min(np.abs(neg_snrs)):
minval = min(pos_snrs)
else:
minval = min(np.abs(neg_snrs))
if max(pos_snrs) > max(np.abs(neg_snrs)):
maxval = max(pos_snrs)
else:
maxval = max(np.abs(neg_snrs))
# positive SNR bins are in blue
# absolute values of negative SNR bins are taken and plotted as
# red x's on top of positive blue bins for compactness
n, b, patches = ax_snr.hist(pos_snrs, 50, (minval, maxval),
facecolor='blue', zorder=1)
vals, bin_edges = np.histogram(np.abs(neg_snrs), 50,
(minval, maxval))
bins = np.array([(bin_edges[i]+bin_edges[i+1])/2.
for i in range(len(vals))])
vals = np.array(vals)
if not nonegs:
ax_snr.scatter(bins[vals > 0], vals[vals > 0], marker='x',
c='orangered', alpha=1.0, zorder=2)
ax_snr.set_xlabel('SNR')
ax_snr.set_xlim(left=minval-0.2)
ax_snr.set_xlim(right=maxval+0.2)
ax_snr.set_ylabel('N')
ax_snr.set_yscale('log')
# draw vertical line where the candidate SNR is
ax_snr.axvline(x=snrim, linewidth=1, color='y', alpha=0.7)
if not outname:
outname = os.path.join(st.prefs.workdir,
'cands_{0}.png'
.format(canddata.candid))
try:
canvas = FigureCanvasAgg(fig)
canvas.print_figure(outname)
logger.info('Wrote candidate plot to {0}'.format(outname))
except ValueError:
logger.warning('Could not write figure to {0}'.format(outname)) | 26,311 |
def looks_like_PEM(text):
"""
Guess whether text looks like a PEM encoding.
"""
i = text.find("-----BEGIN ")
return i >= 0 and text.find("\n-----END ", i) > i | 26,312 |
def find_duplicates(list_to_check):
"""
This finds duplicates in a list of values of any type and then returns the values that are duplicates. Given Counter
only works with hashable types, ie it can't work with lists, create a tuple of the lists and then count if the
list_to_check contains un-hashable items
:param list_to_check: A list of values with potential duplicates within it
:type list_to_check: list
:return:The values that where duplicates
:rtype: list
"""
try:
counted_list = Counter(list_to_check)
except TypeError:
counted_list = Counter([tuple(x) for x in list_to_check])
return [key for key in counted_list if counted_list[key] > 1] | 26,313 |
def test_failure(database):
""" Test failure for PrimaryPlaceofPerformanceZIP+4 should not be provided for any format of
PrimaryPlaceOfPerformanceCode other than XX#####. """
det_award_1 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00FORGN",
place_of_performance_zip4a="1234")
det_award_2 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="00*****",
place_of_performance_zip4a="4312")
det_award_3 = DetachedAwardFinancialAssistanceFactory(place_of_performance_code="ny**987",
place_of_performance_zip4a="4312")
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3])
assert errors == 3 | 26,314 |
def none_to_default(field: Any, default: Any) -> Any:
"""Convert None values into default values.
:param field: the original value that may be None.
:param default: the new, default, value.
:return: field; the new value if field is None, the old value
otherwise.
:rtype: any
"""
return default if field is None else field | 26,315 |
def quandl_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
cache,
show_progress,
output_dir):
"""Build a zipline data bundle from the Quandl WIKI dataset.
"""
api_key = environ.get('QUANDL_API_KEY')
metadata = fetch_symbol_metadata_frame(
api_key,
cache=cache,
show_progress=show_progress,
)
symbol_map = metadata.symbol
# data we will collect in `gen_symbol_data`
splits = []
dividends = []
asset_db_writer.write(metadata)
daily_bar_writer.write(
gen_symbol_data(
api_key,
cache,
symbol_map,
calendar,
splits,
dividends,
environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5),
),
show_progress=show_progress,
)
adjustment_writer.write(
splits=pd.concat(splits, ignore_index=True),
dividends=pd.concat(dividends, ignore_index=True),
) | 26,316 |
def _maybe_disable_mpi(mpi_disabled):
"""A context that can temporarily remove the mpi4py import.
Useful for testing whether non-MPI algorithms work as intended when
mpi4py isn't installed.
Args:
disable_mpi (bool): If True, then this context temporarily removes
the mpi4py import from `sys.modules`
"""
if mpi_disabled and "mpi4py" in sys.modules:
temp = sys.modules["mpi4py"]
try:
sys.modules["mpi4py"] = None
yield
finally:
sys.modules["mpi4py"] = temp
else:
yield | 26,317 |
def test_ap_wps_authenticator_mismatch_m5(dev, apdev):
"""WPS and Authenticator attribute mismatch in M5"""
addr,bssid,hapd = wps_start_ext(apdev[0], dev[0])
wps_ext_eap_identity_req(dev[0], hapd, bssid)
wps_ext_eap_identity_resp(hapd, dev[0], addr)
wps_ext_eap_wsc(dev[0], hapd, bssid, "EAP-WSC/Start")
wps_ext_eap_wsc(hapd, dev[0], addr, "M1")
wps_ext_eap_wsc(dev[0], hapd, bssid, "M2")
wps_ext_eap_wsc(hapd, dev[0], addr, "M3")
wps_ext_eap_wsc(dev[0], hapd, bssid, "M4")
logger.debug("M5")
wps_auth_corrupt_to_ap(dev[0], hapd, addr, "msg=9") | 26,318 |
def search():
"""Search downloaded pages.""" | 26,319 |
def test_convert_new_line_tags():
"""Tests converting HTML links into JSON."""
json = html_to_draftjs("<br><br/>", strict=True)
assert json == {
"entityMap": {},
"blocks": [
{
"key": "",
"text": "\n\n",
"type": "unstyled",
"depth": 0,
"inlineStyleRanges": [],
"entityRanges": [],
"data": {},
}
],
} | 26,320 |
def fetch_pauli2018(data_dir=None, url=None, resume=True, verbose=1):
"""
Downloads files for Pauli et al., 2018 subcortical parcellation
Parameters
----------
data_dir : str, optional
Path to use as data directory. If not specified, will check for
environmental variable 'NNT_DATA'; if that is not set, will use
`~/nnt-data` instead. Default: None
url : str, optional
URL from which to download data. Default: None
resume : bool, optional
Whether to attempt to resume partial download, if possible. Default:
True
verbose : int, optional
Modifies verbosity of download, where higher numbers mean more updates.
Default: 1
Returns
-------
filenames : :class:`sklearn.utils.Bunch`
Dictionary-like object with keys ['probabilistic', 'deterministic'],
where corresponding values are filepaths to downloaded atlas files.
References
----------
Pauli, W. M., Nili, A. N., & Tyszka, J. M. (2018). A high-resolution
probabilistic in vivo atlas of human subcortical brain nuclei. Scientific
Data, 5, 180063.
Notes
-----
License: CC-BY Attribution 4.0 International
"""
dataset_name = 'atl-pauli2018'
keys = ['probabilistic', 'deterministic', 'info']
data_dir = _get_data_dir(data_dir=data_dir)
info = _get_dataset_info(dataset_name)
# format the query how _fetch_files() wants things and then download data
files = [
(i['name'], i['url'], dict(md5sum=i['md5'], move=i['name']))
for i in info
]
data = _fetch_files(data_dir, files=files, resume=resume, verbose=verbose)
return Bunch(**dict(zip(keys, data))) | 26,321 |
def test_list_installed_packages():
"""
Test the main function of ggd list
"""
## Normal Run
args = Namespace(command='list', pattern=None, prefix=None, reset=False)
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
list_installed_pkgs.list_installed_packages((), args)
output = temp_stdout.getvalue().strip()
assert "hg19-gaps-ucsc-v1" in output
assert "Name" in output and "Pkg-Version" in output and "Pkg-Build" in output and "Channel" in output and "Environment-Variables" in output
assert "To use the environment variables run `source activate base" in output
assert "You can see the available ggd data package environment variables by running `ggd show-env" in output
## Pattern set to exact package name
args = Namespace(command='list', pattern="hg19-gaps-ucsc-v1", prefix=None, reset=False)
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
list_installed_pkgs.list_installed_packages((), args)
output = temp_stdout.getvalue().strip()
assert "hg19-gaps-ucsc-v1" in output
assert "Name" in output and "Pkg-Version" in output and "Pkg-Build" in output and "Channel" in output and "Environment-Variables" in output
assert "To use the environment variables run `source activate base" in output
assert "You can see the available ggd data package environment variables by running `ggd show-env" in output
## Pattern set to beginning of package name
args = Namespace(command='list', pattern="hg19", prefix=None, reset=False)
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
list_installed_pkgs.list_installed_packages((), args)
output = temp_stdout.getvalue().strip()
assert "hg19-gaps-ucsc-v1" in output
assert "Name" in output and "Pkg-Version" in output and "Pkg-Build" in output and "Channel" in output and "Environment-Variables" in output
assert "To use the environment variables run `source activate base" in output
assert "You can see the available ggd data package environment variables by running `ggd show-env" in output
## Pattern set to middle of package name
args = Namespace(command='list', pattern="gaps", prefix=None, reset=False)
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
list_installed_pkgs.list_installed_packages((), args)
output = temp_stdout.getvalue().strip()
assert "hg19-gaps-ucsc-v1" in output
assert "Name" in output and "Pkg-Version" in output and "Pkg-Build" in output and "Channel" in output and "Environment-Variables" in output
assert "To use the environment variables run `source activate base" in output
assert "You can see the available ggd data package environment variables by running `ggd show-env" in output
## Pattern does not match an installed package
args = Namespace(command='list', pattern="BADPATTERN", prefix=None, reset=False)
with pytest.raises(SystemExit) as pytest_wrapped_e:
list_installed_pkgs.list_installed_packages((), args)
assert "SystemExit" in str(pytest_wrapped_e.exconly()) ## test that SystemExit was raised by sys.exit()
assert pytest_wrapped_e.match("'{p}' did not match any installed data packages".format(p="BADPATTERN"))
## Package in set prefix (Not conda_root)
p = os.path.join(utils.conda_root(), "envs", "temp_env") ## From test_get_environment_variables()
args = Namespace(command='list', pattern=None, prefix=p, reset=False)
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
list_installed_pkgs.list_installed_packages((), args)
output = temp_stdout.getvalue().strip()
assert "hg19-pfam-domains-ucsc-v1" in output
assert "Name" in output and "Pkg-Version" in output and "Pkg-Build" in output and "Channel" in output and "Environment-Variables" in output
assert "The environment variables are only available when you are using the '{}' conda environment".format(p) in output
## Package in set prefix (Not conda_root) and using the prefix name rather than the prefix path
args = Namespace(command='list', pattern=None, prefix="temp_env", reset=False)
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
list_installed_pkgs.list_installed_packages((), args)
output = temp_stdout.getvalue().strip()
assert "hg19-pfam-domains-ucsc-v1" in output
assert "Name" in output and "Pkg-Version" in output and "Pkg-Build" in output and "Channel" in output and "Environment-Variables" in output
assert "The environment variables are only available when you are using the '{}' conda environment".format(p) in output
## Remove "hg19-pfam-domains-ucsc-v1" from temp_env conda metadata but not ggd
metadata_path = "share/ggd_info/channeldata.json"
full_path = os.path.join(utils.get_conda_prefix_path("temp_env"), metadata_path)
## Check that the package is still displayed, but a warning is provided about it is missing from conda metadata
sp.check_output(["conda", "uninstall", "hg19-pfam-domains-ucsc-v1", "-p", utils.get_conda_prefix_path("temp_env")])
with redirect_stdout(temp_stdout):
list_installed_pkgs.list_installed_packages((), args)
output = temp_stdout.getvalue().strip()
assert "hg19-pfam-domains-ucsc-v1" in output
assert "[WARNING: Present in GGD but missing from Conda]" in str(output)
assert ("NOTE: Packages with the '[WARNING: Present in GGD but missing from Conda]' messages represent packages where the ggd"
" package(s) are installed, but the package metadata has been removed from conda storage. This"
" happens when one of the following happen: \n 1) The package represents an ID specific meta-"
"recipe intsalled by GGD. \n 2) When the recipe is built locally using 'ggd check-recipe' and"
" has not been uninstalled. (Commonly for private data packages).\n Or \n 3) The package is"
" uninstalled using conda rather then ggd. The package is still available for use and is in"
" the same state as before the 'conda uninstall'. To fix the problem on conda's side, uninstall"
" the package with 'ggd uninstall' and re-install with 'ggd install'.\n"
)
## Remove temp env created in test_get_environment_variables()
sp.check_output(["conda", "env", "remove", "--name", "temp_env"])
try:
shutil.rmtree(p)
except Exception:
pass
assert os.path.exists(p) == False
## Test basic reset works
args = Namespace(command='list', pattern=None, prefix=None, reset=True)
with pytest.raises(SystemExit) as pytest_wrapped_e:
list_installed_pkgs.list_installed_packages((), args)
assert "SystemExit" in str(pytest_wrapped_e.exconly()) ## test that SystemExit was raised by sys.exit()
assert pytest_wrapped_e.match("0") | 26,322 |
def filled_in_kinefold_form(filename, list_of_sub_objects):
"""
Creates a kinefold submission for for simulating and processing
devices on hyak.
:param filename: The name of the csv filename.
:type filename: str
:param list_of_sub_objects: List of submission objects to be created
:type list_of_sub_objects: list of str
"""
if '.csv' not in filename:
filename += '.csv'
with open(filename, 'wb') as f:
writer = csv.writer(f)
headers = ['sequence', 'name', 'window start',
'window stop', 'numberofsimulations',
'Polymerization Rate (nt/s)',
'Folding time after elongation (s)',
'1 renaturation 2 contrans',
'psudoknots 0 no 1 yes',
'entanglements 0 no 1 yes',
'helix minimum free energy (Leave Blank IF Unsure)']
forcedlist = ['forced start', 'forced stop', 'forced size']
for i in range(3):
headers.extend(forcedlist)
headers.append('posrefpart')
for i in range(5):
headers.append('part' + str(i+1))
headers.append('part start')
headers.append('part stop')
# writing headers
writer.writerow(headers)
if list_of_sub_objects is None:
pass
else:
for sub_object in list_of_sub_objects:
linetowrite = sub_object.generate_csv_line()
writer.writerow(linetowrite) | 26,323 |
def lambda_to_ent(la):
""" entanglement from a schmidt coeff lambda
ent = - [la * log(la) + (1 - la) * log(1 - la)]
where la (lambda) is the Schmidt coefficient
"""
return - np.nan_to_num((1-la)*np.log(1-la) + la*np.log(la)) | 26,324 |
def parse_states(
value_field: str,
selected_values: selected_values_type,
selected_fields: selected_fields_type,
*,
field_cleaners: Dict[str, Callable[[pd.DataFrame, str], pd.DataFrame]] = None,
) -> parsed_data_type:
"""
Outputs CSVs of state data after parsing a large CSV of U.S. county-level census data for selected states.
Args:
value_field (str): Field that will be used to filter data by.
selected_values (selected_values_type): A list of dictionaries relating to the state's selected for data
extraction. Each dict has a key-value pairs for the full name of the state and it's two-letter abbreviation.
selected_fields (selected_fields_type): A list of dictionaries that represent the fields that will be selected from
the U.S. Census CSV, and how the field will be represented in the final CSV.
field_cleaners (Dict[Callable[[pd.DataFrame, str], pd.DataFrame]]): (Optional) function that cleans a
specified field
Returns:
parsed_data_type - A list of dictionaries with parsed data
"""
# read
df = pd.read_csv(PATH_USA_POP, encoding="ISO-8859-1")
# filter - remove statewide population counts
df = df[df["COUNTY"] != 0]
# filter - include only selected values
selected_values_names = [x["name"] for x in selected_values]
df = df[df[value_field].isin(selected_values_names)]
# option - clean value field
if field_cleaners:
for field in field_cleaners.keys():
cleaner_func = field_cleaners[field]
df = cleaner_func(df, field)
# rename field lookuptable
rename_schema = {}
for field in selected_fields:
input_name = field["input_name"]
output_name = field["output_name"]
rename_schema[input_name] = output_name
# group by
by_state = df.groupby(value_field)
payload = []
for name, group in by_state:
logging.info(f"Processing: {name}")
# get selected state dict for processing instructions
selected_state = list(filter(lambda x: x["name"] == name, selected_values))[0]
# generate FIPS code
# Temporarily disabling SettingWithCopy warning
pd.reset_option("mode.chained_assignment")
with pd.option_context("mode.chained_assignment", None):
group["STATE"] = group["STATE"].astype(str).str.zfill(2)
group["COUNTY"] = group["COUNTY"].astype(str).str.zfill(3)
group["FIPS"] = group["STATE"] + group["COUNTY"]
# truncate cols in df
selected_fields_input = [x["input_name"] for x in selected_fields]
group = group[selected_fields_input]
# rename
group = group.rename(columns=rename_schema)
# option - special processor (special funcs for doing extra stuff to df)
special_processors = selected_state.get("special_processors")
if special_processors:
for processor in special_processors:
group = processor(group)
# produce csv
abbrv = selected_state["abbrv"]
payload.append({"name": abbrv, "data": group})
return payload | 26,325 |
def check_updates():
"""
invoke --upgrade-dist
return output lines as array
"""
# check that permissions are sufficent
if not os.access(os.path.join(CONFIG["rpmdir"], 'Packages'), os.R_OK):
die("ERROR", "rpm Packages accessible: %s" % os.path.join(CONFIG["rpmdir"], 'Packages'))
if not os.access(CONFIG["cache"], os.R_OK | os.W_OK | os.X_OK):
die("ERROR", "Cache dir not accessible: %s" % CONFIG["cache"])
update_indexes()
# check security updates
(ret, stdout) = poldek(["--cmd", "ls -S --qf 'pkg %{N}\n'"])
if ret < 0:
die("ERROR", "Could not run poldek: Killed by " + str(-ret) + " signal.")
if ret > 0:
die("ERROR", "Could not run poldek: Poldek exited with " + str(ret) + ".")
pkgs_security = []
for line in stdout:
line = line.rstrip()
split = line.split()
if len(split) != 2 or split[0] != "pkg":
die("ERROR", "unexpected line: " + line)
if (CONFIG["verbose"]):
print >> sys.stderr, "stdout: %s" % line
pkgs_security.append(split[1])
# check plain updates
(ret, stdout) = poldek(["--cmd", "ls -u --qf 'pkg %{N}\n'"])
if ret < 0:
die("ERROR", "Could not run poldek: Killed by " + str(-ret) + " signal.")
if ret > 0:
die("ERROR", "Could not run poldek: Poldek exited with " + str(ret) + ".")
pkgs_update = []
for line in stdout:
line = line.rstrip()
split = line.split()
if len(split) != 2 or split[0] != "pkg":
die("ERROR", "unexpected line: " + line)
if (CONFIG["verbose"]):
print >> sys.stderr, "stdout: %s" % line
pkgs_update.append(split[1])
status_line = []
status_code = "UNKNOWN"
# security updates mark always critical
if len(pkgs_security):
status_line.append("%d security updates (%s)" % (len(pkgs_security), ", ".join(pkgs_security)))
status_code = "CRITICAL"
n_pkgs_update = len(pkgs_update)
if n_pkgs_update == 0:
# security updates always present in normal update list too
status_code = "OK"
status_line.append("No updates")
else:
if n_pkgs_update > CONFIG["errorLevel"]:
status_code = "CRITICAL"
elif n_pkgs_update > CONFIG["warningLevel"]:
if not status_code == "CRITICAL":
status_code = "WARNING"
else:
if not status_code == "CRITICAL":
status_code = "OK"
status_line.append("%d updates pending" % n_pkgs_update)
die(status_code, "; ".join(status_line)) | 26,326 |
def resp():
""" Requests response fixture"""
resp = Response()
resp.status_code = 409
yield resp | 26,327 |
def get_reference_models(content_model):
"""Yields (model, lookups) tuples, where model
is a model that can contain references to content_model.
"""
extension = get_extension()
yield from _get_reference_models(content_model, extension.reference_models) | 26,328 |
def is_diagonal_segment(vector_start, vector_end):
"""Diagonal as defined by a slope of 1 or -1"""
return slope(vector_start, vector_end) in (1, -1) | 26,329 |
def evaluate_statistic_methods_on_file(f, xc, mas, classifier_obj, tagger):
"""
:type xc: RGCorpus
:type mas: MultAlignScorer
"""
xc.heur_align()
# Start by adding the manual alignments...
mas.add_corpus('gold', INTENT_ALN_MANUAL, f, xc)
EVAL_LOG.info("")
xc.giza_align_t_g(aligner=ALIGNER_FASTALIGN, use_heur=False)
mas.add_corpus('fast_align', INTENT_ALN_GIZA, f, xc)
xc.remove_alignments(INTENT_ALN_GIZA)
xc.giza_align_t_g(aligner=ALIGNER_FASTALIGN, use_heur=True)
mas.add_corpus('fast_align_heur', INTENT_ALN_GIZA, f, xc)
xc.remove_alignments(INTENT_ALN_GIZA)
xc.giza_align_t_g(use_heur=False, resume=False)
mas.add_corpus('statistic', INTENT_ALN_GIZA, f, xc)
xc.remove_alignments(INTENT_ALN_GIZA)
xc.giza_align_t_g(use_heur=True, resume=False)
mas.add_corpus('statistic_heur', INTENT_ALN_GIZA, f, xc)
xc.remove_alignments(INTENT_ALN_GIZA)
xc.giza_align_t_g(use_heur=False, resume=True)
mas.add_corpus('statistic+', INTENT_ALN_GIZA, f, xc)
xc.remove_alignments(INTENT_ALN_GIZA)
xc.giza_align_t_g(use_heur=True, resume=True)
mas.add_corpus('statistic+_heur', INTENT_ALN_GIZA, f, xc)
xc.remove_alignments(INTENT_ALN_GIZA) | 26,330 |
def properties_table(segment_props, columns=None, exclude_columns=None):
"""
Construct a `~astropy.table.Table` of properties from a list of
`SegmentProperties` objects.
If ``columns`` or ``exclude_columns`` are not input, then the
`~astropy.table.Table` will include all scalar-valued properties.
Multi-dimensional properties, e.g.
`~photutils.SegmentProperties.data_cutout`, can be included in the
``columns`` input.
Parameters
----------
segment_props : `SegmentProperties` or list of `SegmentProperties`
A `SegmentProperties` object or list of `SegmentProperties`
objects, one for each source segment.
columns : str or list of str, optional
Names of columns, in order, to include in the output
`~astropy.table.Table`. The allowed column names are any of the
attributes of `SegmentProperties`.
exclude_columns : str or list of str, optional
Names of columns to exclude from the default properties list in
the output `~astropy.table.Table`. The default properties are
those with scalar values.
Returns
-------
table : `~astropy.table.Table`
A table of properties of the segmented sources, one row per
source segment.
See Also
--------
:class:`photutils.detection.detect_sources`, segment_properties
Examples
--------
>>> import numpy as np
>>> from photutils import segment_properties, properties_table
>>> image = np.arange(16.).reshape(4, 4)
>>> print(image)
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]
[ 12. 13. 14. 15.]]
>>> segm_image = np.array([[1, 1, 0, 0],
... [1, 0, 0, 2],
... [0, 0, 2, 2],
... [0, 2, 2, 0]])
>>> segm_props = segment_properties(image, segm_image)
>>> columns = ['id', 'xcentroid', 'ycentroid', 'segment_sum']
>>> t = properties_table(segm_props, columns=columns)
>>> print(t)
id xcentroid ycentroid segment_sum
pix pix
--- ------------- ------------- -----------
1 0.2 0.8 5.0
2 2.09090909091 2.36363636364 55.0
"""
if isinstance(segment_props, list) and len(segment_props) == 0:
raise ValueError('segment_props is an empty list')
segment_props = np.atleast_1d(segment_props)
props_table = Table()
# all scalar-valued properties
columns_all = ['id', 'xcentroid', 'ycentroid', 'ra_icrs_centroid',
'dec_icrs_centroid', 'segment_sum',
'segment_sum_err', 'background_sum', 'background_mean',
'background_atcentroid', 'xmin', 'xmax', 'ymin', 'ymax',
'min_value', 'max_value', 'minval_xpos', 'minval_ypos',
'maxval_xpos', 'maxval_ypos', 'area', 'equivalent_radius',
'perimeter', 'semimajor_axis_sigma',
'semiminor_axis_sigma', 'eccentricity', 'orientation',
'ellipticity', 'elongation', 'covar_sigx2',
'covar_sigxy', 'covar_sigy2', 'cxx', 'cxy', 'cyy']
table_columns = None
if exclude_columns is not None:
table_columns = [s for s in columns_all if s not in exclude_columns]
if columns is not None:
table_columns = np.atleast_1d(columns)
if table_columns is None:
table_columns = columns_all
# it's *much* faster to calculate world coordinates using the
# complete list of (x, y) instead of from the individual (x, y).
# The assumption here is that the wcs is the same for each
# element of segment_props.
if ('ra_icrs_centroid' in table_columns or
'dec_icrs_centroid' in table_columns):
xcentroid = [props.xcentroid.value for props in segment_props]
ycentroid = [props.ycentroid.value for props in segment_props]
if segment_props[0]._wcs is not None:
skycoord = pixel_to_skycoord(
xcentroid, ycentroid, segment_props[0]._wcs, origin=1).icrs
ra = skycoord.ra.degree * u.deg
dec = skycoord.dec.degree * u.deg
else:
nprops = len(segment_props)
ra, dec = [None] * nprops, [None] * nprops
for column in table_columns:
if column == 'ra_icrs_centroid':
props_table[column] = ra
elif column == 'dec_icrs_centroid':
props_table[column] = dec
else:
values = [getattr(props, column) for props in segment_props]
if isinstance(values[0], u.Quantity):
# turn list of Quantities into a Quantity array
values = u.Quantity(values)
props_table[column] = values
return props_table | 26,331 |
def create_mutation(model, app):
"""Create Class-Mutation."""
app_name_lower = app.name.lower()
type_name = f"{ app_name_lower }{ app.model.name }Type"
mutation_name = f"{ app_name_lower }{ app.model.name }Mutation"
form_name = f"{ app_name_lower }{ app.model.name }Form"
api_uri = f"{ app_name_lower }_{ app.model.one }_editor"
model_uri = f"{ app_name_lower }.{ app.model.one }"
# Setup Form Configurations
meta_form = dict()
meta_form["model"] = model
meta_form["fields"] = "__all__"
# Setup Type Configurations
meta_type = dict()
meta_type["model"] = model
meta_type["interfaces"] = (graphene.relay.Node,)
# Create ModelForm
create_class_form = type(
form_name,
(ModelForm,),
create_class_meta(meta_form),
)
# Create ModelType
create_class_type = type(
type_name,
(DjangoObjectType,),
create_class_meta(meta_type),
)
# Create Real Mutation
@classmethod
def mutate_and_get_payload(cls, root, info, **kwargs):
user_base = get_role(info.context.user)
user_base.model = model_uri
user_base.form = create_class_form
instance = None
ids = None
if "id" in kwargs:
kwargs["id"] = from_global_id(kwargs["id"])[1]
ids = [kwargs["id"]]
del kwargs["id"]
if "ids" in kwargs:
kwargs["ids"] = [from_global_id(xid)[1] for xid in kwargs["ids"]]
ids = kwargs["ids"]
del kwargs["ids"]
# Do => <UPDATE>
if ids and not kwargs.get("del"):
"""
.##..##..#####...#####....####...######..######.
.##..##..##..##..##..##..##..##....##....##.....
.##..##..#####...##..##..######....##....####...
.##..##..##......##..##..##..##....##....##.....
..####...##......#####...##..##....##....######.
"""
user = get_access(user_base, app.perm.update)
user.crud = "update"
user.is_allowed = check_request(user, kwargs)
user.update = update_many
if info.context.user.is_superuser:
kwargs = get_related_superuser(model, kwargs)
else:
kwargs = get_related(user, kwargs)
instance = ModelGraphQL.update(
user,
model,
info,
ids,
kwargs,
)
# Do => <DELETE>
elif ids and kwargs.get("del"):
"""
.#####...######..##......######..######..######.
.##..##..##......##......##........##....##.....
.##..##..####....##......####......##....####...
.##..##..##......##......##........##....##.....
.#####...######..######..######....##....######.
"""
user = get_access(user_base, app.perm.delete)
user.crud = "delete"
user.is_allowed = check_request(user, kwargs)
objects = ModelGraphQL.delete(
user,
model,
info,
ids,
)
if objects:
objects.delete()
# Do => <CREATE>
else:
"""
..####...#####...######...####...######..######.
.##..##..##..##..##......##..##....##....##.....
.##......#####...####....######....##....####...
.##..##..##..##..##......##..##....##....##.....
..####...##..##..######..##..##....##....######.
"""
user = get_access(user_base, app.perm.create)
user.crud = "create"
user.is_allowed = check_request(user, kwargs)
user.create = model.objects.create
if info.context.user.is_superuser:
kwargs = get_related_superuser(model, kwargs)
else:
kwargs = get_related(user, kwargs)
instance = ModelGraphQL.create(
user,
model,
info,
kwargs,
)
dict_out = {app.model.one: instance}
return class_mutation(**dict_out)
# Create Description
model_description = description(app, model, model_uri)
# Setup Mutation
setup_mutation = create_class_meta(
{"form_class": create_class_form, "description": model_description}
)
setup_mutation[app.model.one] = graphene.Field(create_class_type)
setup_mutation["mutate_and_get_payload"] = mutate_and_get_payload
setup_mutation["Input"] = type(
"Input",
(object,),
{
"ids": graphene.List(
graphene.ID, description="List of IDs to UPDATE or DELETE."
),
"del": graphene.Boolean(description="Use (del: true) to DELETE."),
},
)
class_mutation = type(
mutation_name,
(DjangoModelFormMutation,),
setup_mutation,
)
# Return: class Mutation(graphene.ObjectType)
return type(
"Mutation",
(graphene.ObjectType,),
{api_uri: class_mutation.Field()},
) | 26,332 |
def read_players_info():
"""Get players info - [player 1 name, player 1 sign]"""
first_player_name = input("Player one name: ")
second_player_name = input("Player two name: ")
first_player_sign = read_first_player_sign(first_player_name)
second_player_sign = "O" if first_player_sign == "X" else "X"
return ([first_player_name, first_player_sign],
[second_player_name, second_player_sign]) | 26,333 |
def z_gate_circuits_deterministic(final_measure=True):
"""Z-gate test circuits with deterministic counts."""
circuits = []
qr = QuantumRegister(1)
if final_measure:
cr = ClassicalRegister(1)
regs = (qr, cr)
else:
regs = (qr, )
# Z alone
circuit = QuantumCircuit(*regs)
circuit.z(qr)
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# HZH = X
circuit = QuantumCircuit(*regs)
circuit.h(qr)
circuit.barrier(qr)
circuit.z(qr)
circuit.barrier(qr)
circuit.h(qr)
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
# HZZH = I
circuit = QuantumCircuit(*regs)
circuit.h(qr)
circuit.barrier(qr)
circuit.z(qr)
circuit.barrier(qr)
circuit.z(qr)
circuit.barrier(qr)
circuit.h(qr)
if final_measure:
circuit.barrier(qr)
circuit.measure(qr, cr)
circuits.append(circuit)
return circuits | 26,334 |
def symbol_sum(variables):
"""
``` python
a = symbols('a0:100')
%timeit Add(*a)
# >>> 10000 loops, best of 3: 34.1 µs per loop
b = symbols('b0:1000')
%timeit Add(*b)
# >>> 1000 loops, best of 3: 343 µs per loop
c = symbols('c0:3000')
%timeit Add(*c)
# >>> 1 loops, best of 3: 1.03 ms per loop
```
See the `github thread <https://github.com/sympy/sympy/issues/13945>`_
:param variables:
:return:
"""
from sympy import Add
k=0
# If we encounter a zero, which is a special type, increase k
while isinstance(variables[k], Zero) and k<len(variables):
k+=1
if k == len(variables):
# everything is 0
return 0
if k>len(variables): #it's only zeroes
return 0
if isinstance(variables[k], GenericVariable):
return Add(*[x.variable for x in variables])
elif isinstance(variables[k], optlang.interface.Variable) or \
isinstance(variables[k], sympy.Mul) or \
isinstance(variables[k], sympy.Add) or \
isinstance(variables[k], Number):
return Add(*variables)
else:
raise ValueError('Arguments should be of type Number, sympy.Add, or sympy.Mul, '
'or optlang.Variable, or GenericVariable') | 26,335 |
def before_request():
"""Make sure we are connected to the database each request and look
up the current user so that we know he's there.
"""
g.db = connect_db()
g.user = None
if 'user_id' in session:
g.user = query_db('select * from user where user_id = ?',
[session['user_id']], one=True) | 26,336 |
def any_specified_encoding(sequence: bytes, search_zone: int = 4096) -> Optional[str]:
"""
Extract using ASCII-only decoder any specified encoding in the first n-bytes.
"""
if not isinstance(sequence, bytes):
raise TypeError
seq_len = len(sequence) # type: int
results = findall(
RE_POSSIBLE_ENCODING_INDICATION,
sequence[: seq_len if seq_len <= search_zone else search_zone].decode(
"ascii", errors="ignore"
),
) # type: List[str]
if len(results) == 0:
return None
for specified_encoding in results:
specified_encoding = specified_encoding.lower().replace("-", "_")
for encoding_alias, encoding_iana in aliases.items():
if encoding_alias == specified_encoding:
return encoding_iana
if encoding_iana == specified_encoding:
return encoding_iana
return None | 26,337 |
def maf(genotypes):
"""Computes the MAF and returns a boolean indicating if the minor allele
is currently the coded allele.
"""
warnings.warn("deprecated: use 'Genotypes.maf'", DeprecationWarning)
g = genotypes.genotypes
maf = np.nansum(g) / (2 * np.sum(~np.isnan(g)))
if maf > 0.5:
maf = 1 - maf
return maf, False
return maf, True | 26,338 |
def test_extract_media_extracts_file() -> None:
"""Ensure extract_media extracts a FileDescription and sets attachment correctly."""
extracted_media, extracted_file = extract_media(InputMediaPhoto(Path('test.jpg')))
assert isinstance(extracted_media.media, str)
assert extracted_media.media.startswith('attach://')
assert extracted_file is not None
assert extracted_media.media.replace('attach://', '') == extracted_file.basename | 26,339 |
def lower_bound_jensen_shannon(logu, joint_sample_mask=None,
validate_args=False, name=None):
"""Lower bound on Jensen-Shannon (JS) divergence.
This lower bound on JS divergence is proposed in
[Goodfellow et al. (2014)][1] and [Nowozin et al. (2016)][2].
When estimating lower bounds on mutual information, one can also use
different approaches for training the critic w.r.t. estimating
mutual information [(Poole et al., 2018)][3]. The JS lower bound is
used to train the critic with the standard lower bound on the
Jensen-Shannon divergence as used in GANs, and then evaluates the
critic using the NWJ lower bound on KL divergence, i.e. mutual information.
As Eq.7 and Eq.8 of [Nowozin et al. (2016)][2], the bound is given by
```none
I_JS = E_p(x,y)[log( D(x,y) )] + E_p(x)p(y)[log( 1 - D(x,y) )]
```
where the first term is the expectation over the samples from joint
distribution (positive samples), and the second is for the samples
from marginal distributions (negative samples), with
```none
D(x, y) = sigmoid(f(x, y)),
log(D(x, y)) = softplus(-f(x, y)).
```
`f(x, y)` is a critic function that scores all pairs of samples.
Example:
`X`, `Y` are samples from a joint Gaussian distribution, with
correlation `0.8` and both of dimension `1`.
```python
batch_size, rho, dim = 10000, 0.8, 1
y, eps = tf.split(
value=tf.random.normal(shape=(2 * batch_size, dim), seed=7),
num_or_size_splits=2, axis=0)
mean, conditional_stddev = rho * y, tf.sqrt(1. - tf.square(rho))
x = mean + conditional_stddev * eps
# Scores/unnormalized likelihood of pairs of samples `x[i], y[j]`
# (For JS lower bound, the optimal critic is of the form `f(x, y) = 1 +
# log(p(x | y) / p(x))` [(Poole et al., 2018)][3].)
conditional_dist = tfd.MultivariateNormalDiag(
mean, scale_identity_multiplier=conditional_stddev)
conditional_scores = conditional_dist.log_prob(y[:, tf.newaxis, :])
marginal_dist = tfd.MultivariateNormalDiag(tf.zeros(dim), tf.ones(dim))
marginal_scores = marginal_dist.log_prob(y)[:, tf.newaxis]
scores = 1 + conditional_scores - marginal_scores
# Mask for joint samples in the score tensor
# (The `scores` has its shape [x_batch_size, y_batch_size], i.e.
# `scores[i, j] = f(x[i], y[j]) = log p(x[i] | y[j])`.)
joint_sample_mask = tf.eye(batch_size, dtype=bool)
# Lower bound on Jensen Shannon divergence
lower_bound_jensen_shannon(logu=scores, joint_sample_mask=joint_sample_mask)
```
Args:
logu: `float`-like `Tensor` of size `[batch_size_1, batch_size_2]`
representing critic scores (scores) for pairs of points (x, y) with
`logu[i, j] = f(x[i], y[j])`.
joint_sample_mask: `bool`-like `Tensor` of the same size as `logu`
masking the positive samples by `True`, i.e. samples from joint
distribution `p(x, y)`.
Default value: `None`. By default, an identity matrix is constructed as
the mask.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'lower_bound_jensen_shannon').
Returns:
lower_bound: `float`-like `scalar` for lower bound on JS divergence.
#### References:
[1]: Ian J. Goodfellow, et al. Generative Adversarial Nets. In
_Conference on Neural Information Processing Systems_, 2014.
https://arxiv.org/abs/1406.2661.
[2]: Sebastian Nowozin, Botond Cseke, Ryota Tomioka. f-GAN: Training
Generative Neural Samplers using Variational Divergence Minimization.
In _Conference on Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1606.00709.
[3]: Ben Poole, Sherjil Ozair, Aaron van den Oord, Alexander A. Alemi,
George Tucker. On Variational Bounds of Mutual Information. In
_International Conference on Machine Learning_, 2019.
https://arxiv.org/abs/1905.06922.
"""
with tf.name_scope(name or 'lower_bound_jensen_shannon'):
with tf.control_dependencies(
_maybe_assert_float_matrix(logu, validate_args)):
if joint_sample_mask is None:
logu = tf.convert_to_tensor(
logu, dtype_hint=tf.float32, name='logu')
logu_diag = tf.linalg.diag_part(logu)
joint_samples_nll = -tf.reduce_mean(
tf.nn.softplus(-logu_diag), axis=[-1])
n, m = tf.unstack(tf.cast(tf.shape(logu)[-2:], dtype=logu.dtype))
marginal_samples_nll = (
(tf.reduce_sum(tf.nn.softplus(logu), axis=[-2, -1])
- tf.reduce_sum(tf.nn.softplus(logu_diag), axis=[-1]))
/ (n * (m - 1.)))
return joint_samples_nll - marginal_samples_nll
logu, joint_sample_mask = _check_and_get_mask(
logu, joint_sample_mask, validate_args=validate_args)
joint_samples = tf.boolean_mask(logu, joint_sample_mask)
lower_bound = -tf.reduce_mean(tf.math.softplus(-joint_samples),
axis=[-1])
marginal_samples = tf.boolean_mask(
logu, ~joint_sample_mask) # pylint: disable=invalid-unary-operand-type
lower_bound -= tf.reduce_mean(tf.math.softplus(marginal_samples),
axis=[-1])
return lower_bound | 26,340 |
def start_data_package_loader():
"""
Adds the data package loader to the module loaders.
"""
sys.meta_path.append(DataPackageFinder()) | 26,341 |
def random_splits(sets: List, out_path: str, out_size: int, num: int, cat: str=None):
"""Make `num` random splits of the datasets in `sets` containing `out_size` images each."""
for n in range(num):
concat_sets(sets, out_path + '_' + str(out_size) + id_string(n) + '.json', out_size, cat=cat) | 26,342 |
def competition_ranking(data, dtype="int32"):
"""
Ranks the given data in increasing order and resolving duplicates using the
lowest common rank and skipping as many ranks as there are duplicates, i.e.,
[0.5, 1.2, 3.4, 1.2, 1.2] -> [1, 2, 5, 2, 2].
Parameters
----------
data: numpy.array
data to be ranked, should behave like a numpy.array
dtype: str (optional)
string desciribing the data type of the numpy.array storing the ranks
Returns
-------
numpy.array:
ranks of the data as explained above
Notes
-----
The given data should be one-dimensional. This can be achieved using
numpy.ravel and then reshaping the result as necessary.
If the data contains `nan` or other undesirable values, masked arrays may be
your solution.
"""
ranks = numpy.zeros(data.size, dtype=dtype)
order = data.argsort()
ranks[order] = numpy.arange(1, data.size + 1)
# returns repeats and their count
repeats = scipy.stats.mstats.find_repeats(data)[0]
for r in repeats:
condition = data == r
# all repeats have the same minimal rank
# using the first element works iff sorting was stable
# ranks[condition] = ranks[condition][0]
ranks[condition] = ranks[condition].min()
return ranks | 26,343 |
def fmt_dashes(name: str) -> str:
"""
Converts name to words separated by dashes. Words are identified by
capitalization, dashes, and underscores.
"""
return '-'.join([word.lower() for word in split_words(name)]) | 26,344 |
def stringify_dict_key(_dict):
"""
保证_dict中所有key为str类型
:param _dict:
:return:
"""
for key, value in _dict.copy().items():
if isinstance(value, dict):
value = stringify_dict_key(value)
if not isinstance(key, str):
del _dict[key]
_dict[str(key)] = value
return _dict | 26,345 |
def bestIndividual(hof, X, y):
"""
Get the best individual
"""
maxAccurcy = 0.0
for individual in hof:
if(individual.fitness.values > maxAccurcy):
maxAccurcy = individual.fitness.values
_individual = individual
_individualHeader = [list(X)[i] for i in range(
len(_individual)) if _individual[i] == 1]
return _individual.fitness.values, _individual, _individualHeader | 26,346 |
def output_with_grid_search(kalman_filter, key, grid_params, annotations):
"""パラメータをグリッドサーチして, xiごとに1つの図に出力.
Args:
kalman_filter (KalmanFilterSteadyState): kalman filterクラス.
key (str): グリッドサーチ対象の(kalman_filterクラスの)メンバ変数.
grid_params (list): グリッドサーチに使う値リスト.
annotations (list): grid_paramsごとにグラフに表示する注釈.
"""
k = kalman_filter.k
x_stack = []
for param in grid_params:
kf = deepcopy(kalman_filter)
kf[key] = param
kf.run()
estimated_x_list = kf.estimated_x_list
x_stack.append(estimated_x_list)
x_stack = np.array(x_stack)
x_stack = x_stack.transpose((2, 0, 1)) # x_stackの軸を変更. 軸のindexを(0, 1, 2)→(2, 0, 1)に並び替え.
for i, xi_stack in enumerate(x_stack):
output_multi_graphs(x=k, y_list=xi_stack, x_label='k', y_label=f'x{i}', annotations=annotations,
title=f'kalman_filter_change_{key}',
file_name=f'graph/kalman_filter_change_{key}_graph_x{i}.png') | 26,347 |
def rename_fasta(file: str, out: str, new_locus_tag_prefix: str, old_locus_tag_prefix: str = None, validate: bool = False):
"""
Change the locus tags in a protein/nucleotide FASTA file
:param file: input file
:param out: output file
:param new_locus_tag_prefix: desired locus tag
:param old_locus_tag_prefix: locus tag to replace
:param validate: if true, perform sanity check
"""
FastaFile(
file=file
).rename(
out=out,
new_locus_tag_prefix=new_locus_tag_prefix,
old_locus_tag_prefix=old_locus_tag_prefix,
validate=validate
) | 26,348 |
def make_new_paste(devkey, paste_text, user_key=None, paste_title=None, paste_format=None, paste_type=None, paste_expiry: int=None):
"""This function creates a new paste
on pastebin with the given arguments."""
data = {'api_dev_key': devkey, 'api_option': 'paste', 'api_paste_code': paste_text, 'api_paste_expire_date': f'{paste_expiry}M', 'api_paste_format': paste_format, 'api_user_key': user_key}
r = requests.post('https://pastebin.com/api/api_post.php', data=data)
return r.text | 26,349 |
def not_none(value):
"""
This function ensures that passed value is not None:
>>> schema = Schema(not_none)
>>> assert 1 == schema(1)
>>> try:
... schema(None)
... assert False, "an exception should've been raised"
... except MultipleInvalid:
... pass
"""
if value is None:
raise NoneInvalid('value is None')
else:
return value | 26,350 |
def before_request():
"""
before request
"""
if 'is_admin' in session:
g.is_admin = 1
else:
g.is_admin = None | 26,351 |
def j1c_dblprime(amplitudes):
"""Calculate j''1c angular observable"""
[_, _, _, _, a_0_l, a_0_r, a_00_l, a_00_r] = amplitudes
return (2 / tf.sqrt(3.0)) * (
tf.math.real(a_00_l * tf.math.conj(a_0_l) * bw_k700_k892) +
tf.math.real(a_00_r * tf.math.conj(a_0_r) * bw_k700_k892)
) | 26,352 |
def preprocess(img):
"""Changes RGB [0,1] valued image to BGR [0,255] with mean subtracted."""
mean_bgr = load_mean_bgr()
print 'mean blue', np.mean(mean_bgr[:, :, 0])
print 'mean green', np.mean(mean_bgr[:, :, 1])
print 'mean red', np.mean(mean_bgr[:, :, 2])
out = np.copy(img) * 255.0
out = out[:, :, [2, 1, 0]] # swap channel from RGB to BGR
out -= mean_bgr
return out | 26,353 |
def test_get_breach(requests_mock):
"""Tests darktrace-get-breach command function.
Configures requests_mock instance to generate the appropriate
get_alerts API response, loaded from a local JSON file. Checks
the output of the command function with the expected output.
"""
from Darktrace import Client, get_breach_command
# GIVEN an integration is configured to Darktrace
mock_api_response = util_load_json('test_data/get_breach.json')
requests_mock.get('https://mock.darktrace.com/modelbreaches?pbid=95',
json=mock_api_response)
client = Client(
base_url='https://mock.darktrace.com',
verify=False,
auth=('examplepub', 'examplepri')
)
# WHEN the desired model breach has id 95
args = {
'pbid': '95',
}
integration_response = get_breach_command(client, args)
expected_response = util_load_json('test_data/formatted_get_breach.json')
# THEN the response should be returned and formatted
assert integration_response.outputs == expected_response
assert integration_response.outputs_prefix == 'Darktrace.ModelBreach'
assert integration_response.outputs_key_field == 'pbid' | 26,354 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the WorldTidesInfo Custom sensor."""
# Get data from configuration.yaml
name = config.get(CONF_NAME)
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
if None in (lat, lon):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return
key = config.get(CONF_API_KEY)
vertical_ref = config.get(CONF_VERTICAL_REF)
plot_color = config.get(CONF_PLOT_COLOR)
plot_background = config.get(CONF_PLOT_BACKGROUND)
# worldides_request_interval = config.get(CONF_WORLDTIDES_REQUEST_INTERVAL)
tide_station_distance = config.get(CONF_STATION_DISTANCE)
# what is the unit used
if config.get(CONF_UNIT) == HA_CONF_UNIT and hass.config.units == IMPERIAL_SYSTEM:
unit_to_display = IMPERIAL_CONF_UNIT
elif config.get(CONF_UNIT) == IMPERIAL_CONF_UNIT:
unit_to_display = IMPERIAL_CONF_UNIT
else:
unit_to_display = METRIC_CONF_UNIT
show_on_map = True
live_position_management = STATIC_CONF
live_position_sensor_update_distance = DEFAULT_SENSOR_UPDATE_DISTANCE
source = None
source_attr_lat = None
source_attr_long = None
tides_sensors = setup_sensor(
hass,
name,
lat,
lon,
key,
vertical_ref,
plot_color,
plot_background,
tide_station_distance,
unit_to_display,
show_on_map,
live_position_management,
live_position_sensor_update_distance,
source,
source_attr_lat,
source_attr_long,
)
# for tides in tides_sensors:
# tides.update()
# if tides._worldtide_data_coordinator.no_data():
# _LOGGER.error(f"No data available for this location: {name}")
# return
add_entities(tides_sensors) | 26,355 |
def get_modules(folder):
"""Find (and import) all valid modules in the given submodule of this file.
@return: all loaded valid modules
@rtype: iterator of module
"""
mod = importlib.import_module(".." + folder, __name__)
prefix = mod.__name__ + "."
modules = [m[1] for m in pkgutil.iter_modules(mod.__path__, prefix)]
for elm in _get_all_modules_pyinstaller():
if elm.startswith(prefix):
modules.append(elm)
for name in modules:
try:
yield importlib.import_module(name)
except ImportError as msg:
out.error("could not load module %s: %s" % (name, msg)) | 26,356 |
def profile_detail(request, username, template_name='userena/profile_detail.html', extra_context=None, **kwargs):
"""
Detailed view of an user.
:param username:
String of the username of which the profile should be viewed.
:param template_name:
String representing the template name that should be used to display
the profile.
:param extra_context:
Dictionary of variables which should be supplied to the template. The
``profile`` key is always the current profile.
**Context**
``profile``
Instance of the currently viewed ``Profile``.
"""
user = get_object_or_404(User,
username__iexact=username)
profile = user.get_profile()
if not profile.can_view_profile(request.user):
return HttpResponseForbidden(_("You don't have permission to view this profile."))
if not extra_context: extra_context = dict()
extra_context['profile'] = user.get_profile()
return direct_to_template(request,
template_name,
extra_context=extra_context,
**kwargs) | 26,357 |
def prepare_ternary(figsize, scale):
"""Help function to ternary plot"""
fig, ax = plt.subplots(figsize=figsize)
tax = ternary.TernaryAxesSubplot(ax=ax, scale=scale)
ax.axis('off')
gm = 0.1 * scale
blw = 1
tlw = 1
# Draw Boundary and Gridlines
tax.boundary(linewidth=blw)
tax.gridlines(color='grey', multiple=gm, alpha=0.8)
# Set Axis labels and Title
tax.bottom_axis_label(
r"Retweets $\rightarrow$", offset=-0.08, fontsize='small')
tax.right_axis_label(r"$\leftarrow$Replies", offset=0.2, fontsize='small')
tax.left_axis_label(r"$\leftarrow$Tweets", offset=0.2, fontsize='small')
# ticks
locations = range(0, scale + 1, 4)
ticks = ['{}'.format(x * 10) for x in range(0, 11, 2)]
tax.ticks(
axis='lbr',
ticks=ticks,
locations=locations,
linewidth=tlw,
offset=0.03,
fsize=9,
clockwise=False)
return tax | 26,358 |
def parse_spreadsheet(hca_spreadsheet: Workbook, entity_dictionary: Dict):
"""
Parse the spreadsheet and fill the metadata with accessions.
:param hca_spreadsheet: Workbook object of the spreadsheet
:param entity_dictionary: Dictionary mapping by entity UUID to the proper archiveEntity
:return: Accessioned spreadsheet
"""
# Parse each sheet for the UUIDs
for sheet in hca_spreadsheet.sheetnames:
for row in hca_spreadsheet[sheet].rows:
if row[0].value in entity_dictionary:
# Get fqk, search for it, add accession based on the entity dictionary
fqk = (accession_mapping[entity_dictionary[row[0].value]['type']]['fqk']
.replace("{}", sheet.lower().replace(" ", "_")))
coordinate_column = search_fqk_in_sheet(hca_spreadsheet[sheet], fqk, 4)
coordinate_row = row[0].coordinate[1:]
cell_coordinate = f'{coordinate_column}{coordinate_row}'
hca_spreadsheet[sheet][cell_coordinate].value = entity_dictionary[row[0].value]['accession']
return hca_spreadsheet | 26,359 |
def test_a_extra_tokens():
"""Test a theoretical A format with extra tokens in first section."""
msg = ".AR SHPP1 20210925 Z DH06 DC202109210148/DUE/DQG/QIIFE 151000.0"
res = process_message_a(msg)
assert res[0].qualifier == "G" | 26,360 |
def test__rules__std_L016_L036_long_line_fix2():
"""Verify that a long line that causes a clash between L016 and L036 does not add multiple newlines (see #1424)."""
sql = "SELECT\n 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\n"
result = sqlfluff.fix(sql)
assert (
result
== "SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\n"
) | 26,361 |
def decode(data):
"""Decode JSON serialized string, with possible embedded Python objects.
"""
return _decoder.decode(data) | 26,362 |
def p_correction(p_values):
"""
Corrects p_values for multiple testing.
:param p_values: Dictionary storing p_values with corresponding feature names as keys.
:return: DataFrame which shows the results of the analysis; p-value, corrected p-value and boolean indicating \
significance.
"""
p_trans = _transform_p_dict(p_values)
# get and drop features which are NaN to skip them in multitest correction
nan_features = p_trans[pd.isnull(p_trans[0])]
p_trans = p_trans.dropna(axis=0, subset=[0])
# extract p_value column to pass into multiple testing correction
p_val_col = p_trans[0].sort_values()
# add NaN features back to p_trans to include them into result table later on
p_trans = pd.concat([p_trans, nan_features])
# raise Error if no p_values where calculated that can be passed into multiple test correction
if p_val_col.values.size == 0:
# unpack the p_values which are stored in 2 layer nested dicts.
nested_values = []
for value in p_values.values():
nested_values.append(*value.values())
# if all p_values are nan, return an all nan result table
if pd.isnull(nested_values).all():
result_table = _create_result_table(None, p_val_col, p_trans, conf_invs, counts)
return result_table.sort_index()
raise ValueError("No p_values have been submitted into multiple test correction.")
# correct p-values
result = multipletests(p_val_col.values)
return result, p_val_col, p_trans | 26,363 |
def GetFlippedPoints3(paths, array):
"""same as first version, but doesnt flip locations: just sets to -1
used for random walks with self intersections - err type 6"""
# this may not work for double ups?
for i in paths:
for j in i: # for the rest of the steps...
array[j[0]][j[1]][j[2]] = -1 # flip initial position
return(array) | 26,364 |
def reset(context):
""" Recreates database.
"""
import django
from django.conf import settings
django.setup()
if not settings.DEBUG:
print("DEBUG is False!")
Exit()
return
user = settings.DATABASES['default']['USER']
password = settings.DATABASES['default']['PASSWORD']
name = settings.DATABASES['default']['NAME']
context.run('mysql -u{} -p{} -f -e "DROP DATABASE IF EXISTS {}"'.format(
user, password, name
))
context.run('mysqladmin -u{} -p{} create {}'.format(user, password, name))
context.run('{} migrate'.format(manage_file), pty=True) | 26,365 |
async def _silent_except(f: Callable, *args, **kwargs):
"""
Helper Function that calls a function or coroutine and returns its result excepting all errors
"""
try:
called = f(*args, **kwargs)
except:
return
if isawaitable(called):
try:
result = await called
except:
return
else:
return result
else:
return called | 26,366 |
def get_concat_h(im1, im2):
"""Concatenate two images horizontally."""
dst = Image.new("RGB", (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst | 26,367 |
def visualize_one_channel_dataset(images_original, images_transformed, labels):
"""
Helper function to visualize one channel grayscale images
"""
num_samples = len(images_original)
for i in range(num_samples):
plt.subplot(2, num_samples, i + 1)
# Note: Use squeeze() to convert (H, W, 1) images to (H, W)
plt.imshow(images_original[i].squeeze(), cmap=plt.cm.gray)
plt.title(PLOT_TITLE_DICT[1][0] + ":" + str(labels[i]))
plt.subplot(2, num_samples, i + num_samples + 1)
plt.imshow(images_transformed[i].squeeze(), cmap=plt.cm.gray)
plt.title(PLOT_TITLE_DICT[1][1] + ":" + str(labels[i]))
plt.show() | 26,368 |
def get_taxname(taxid):
"""Return scientific name for NCBI Taxonomy ID."""
if get_taxname.id_name_map is None:
get_taxname.id_name_map = load_taxid_name_map('data/taxnames.tsv')
if get_taxname.id_name_map is None: # assume fail, fallback
get_taxname.id_name_map = TAXID_NAME_MAP
return get_taxname.id_name_map.get(taxid, '<UNKNOWN>') | 26,369 |
def _get_movies(dir):
"""Gets the movies from the specified directory"""
movieList = []
directories = os.listdir(dir)
for d in directories:
# We need to skip past directories without instruction sets
if '__' not in d:
continue
files = os.listdir("{root}/{subdir}".format(root=dir, subdir=d))
for f in files:
# Don't add .mkv's that are handbrake encodes.
if '--converted' not in f and '.mkv' in f:
movie = Movie(dir, d, f)
movieList.append(movie)
return movieList | 26,370 |
def lrfn(epoch):
"""
lrfn(epoch)
This function creates a custom piecewise linear-exponential learning rate function for a custom learning rate scheduler. It is linear to a max, then exponentially decays
* INPUTS: current `epoch` number
* OPTIONAL INPUTS: None
* GLOBAL INPUTS:`START_LR`, `MIN_LR`, `MAX_LR`, `RAMPUP_EPOCHS`, `SUSTAIN_EPOCHS`, `EXP_DECAY`
* OUTPUTS: the function lr with all arguments passed
"""
def lr(epoch, START_LR, MIN_LR, MAX_LR, RAMPUP_EPOCHS, SUSTAIN_EPOCHS, EXP_DECAY):
if epoch < RAMPUP_EPOCHS:
lr = (MAX_LR - START_LR)/RAMPUP_EPOCHS * epoch + START_LR
elif epoch < RAMPUP_EPOCHS + SUSTAIN_EPOCHS:
lr = MAX_LR
else:
lr = (MAX_LR - MIN_LR) * EXP_DECAY**(epoch-RAMPUP_EPOCHS-SUSTAIN_EPOCHS) + MIN_LR
return lr
return lr(epoch, START_LR, MIN_LR, MAX_LR, RAMPUP_EPOCHS, SUSTAIN_EPOCHS, EXP_DECAY) | 26,371 |
def get_balances():
"""
Get the balances of the configured validator (if possible)
"""
balances = account.get_balance_on_all_shards(validator_config['validator-addr'], endpoint=node_config['endpoint'])
for bal in balances:
bal['balance'] = float(numbers.convert_atto_to_one(bal['balance']))
return balances | 26,372 |
def test_duplicate():
"""verify duplicate table detection"""
with pytest.raises(ValueError) as ex:
A.query.join(C).join(C)
assert ex.value.args[0] == "duplicate table 'c'"
A.query.join(C).join(C, alias='CC') | 26,373 |
def _make_ordered_node_map(
pipeline: pipeline_pb2.Pipeline
) -> 'collections.OrderedDict[str, pipeline_pb2.PipelineNode]':
"""Prepares the Pipeline proto for DAG traversal.
Args:
pipeline: The input Pipeline proto, which must already be topologically
sorted.
Returns:
An OrderedDict that maps node_ids to PipelineNodes.
"""
result = collections.OrderedDict()
for pipeline_or_node in pipeline.nodes:
node_id = pipeline_or_node.pipeline_node.node_info.id
result[node_id] = pipeline_or_node.pipeline_node
return result | 26,374 |
def energy(_x, _params):
"""Kinetic and Potential Energy of point mass pendulum.
_x is an array/list in the following order:
q1: Angle of first pendulum link relative to vertical (0 downwards)
u1: A[1] measure number of the inertial angular velocity of the first link.
_params is an array/list in the following order:
m: Mass of first pendulum point mass.
l: Length of first pendulum link.
g: Gravitational constant.
Returns a list/array of kinetic energy and potential energy, respectively.
"""
# Unpack function arguments
q1, u1 = _x
# Unpack function parameters
m, g, l, b = _params
# Trigonometric functions
c1 = cos(q1)
# Calculate return values
ke = m*l**2*u1**2/2
pe = g*l*m*(1 - c1)
# Return calculated values
return [ke, pe] | 26,375 |
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
# math.sqrt needed for bfloat16 compatibility
cdf = 0.5 * (1.0 + tf.erf(input_tensor / math.sqrt(2.0)))
return input_tensor * cdf | 26,376 |
def cli():
"""Query threat report from APIs, or submit for analysis.""" | 26,377 |
def KPConv_ops(query_points,
support_points,
neighbors_indices,
features,
K_points,
K_values,
KP_extent,
KP_influence,
aggregation_mode):
"""
This function creates a graph of operations to define Kernel Point Convolution in tensorflow. See KPConv function
above for a description of each parameter
:param query_points: [n_points, dim]
:param support_points: [n0_points, dim]
:param neighbors_indices: [n_points, n_neighbors]
:param features: [n_points, in_fdim]
:param K_points: [n_kpoints, dim]
:param K_values: [n_kpoints, in_fdim, out_fdim]
:param KP_extent: float32
:param KP_influence: string
:param aggregation_mode: string
:return: [n_points, out_fdim]
"""
# Get variables
n_kp = int(K_points.shape[0])
# print(support_points.shape)
# Add a fake point in the last row for shadow neighbors
shadow_point = torch.ones_like(support_points[:1, :]) * 1e6
support_points = torch.cat([support_points, shadow_point], axis=0)
# Get neighbor points [n_points, n_neighbors, dim]
# print(shadow_point.shape)
# print(support_points.shape)
# print(neighbors_indices.shape)
neighbors = support_points[neighbors_indices]
# Center every neighborhood
neighbors = neighbors - query_points.unsqueeze(1)
# Get all difference matrices [n_points, n_neighbors, n_kpoints, dim]
neighbors = neighbors.unsqueeze(2)
neighbors = neighbors.repeat([1, 1, n_kp, 1])
differences = neighbors - K_points
# Get the square distances [n_points, n_neighbors, n_kpoints]
sq_distances = torch.sum(differences ** 2, axis=3)
# Get Kernel point influences [n_points, n_kpoints, n_neighbors]
if KP_influence == 'constant':
# Every point get an influence of 1.
all_weights = torch.ones_like(sq_distances)
all_weights = all_weights.permute(0, 2, 1)
elif KP_influence == 'linear':
# Influence decrease linearly with the distance, and get to zero when d = KP_extent.
all_weights = torch.relu(1 - torch.sqrt(sq_distances) / KP_extent)
all_weights = all_weights.permute(0, 2, 1)
elif KP_influence == 'gaussian':
# Influence in gaussian of the distance.
sigma = KP_extent * 0.3
all_weights = radius_gaussian(sq_distances, sigma)
all_weights = all_weights.permute(0, 2, 1)
else:
raise ValueError('Unknown influence function type (config.KP_influence)')
# In case of closest mode, only the closest KP can influence each point
if aggregation_mode == 'closest':
neighbors_1nn = torch.argmin(sq_distances, axis=2,
output_type=torch.long)
#
# all_weights *= tf.one_hot(neighbors_1nn, n_kp, axis=1,
# dtype=torch.float32)
all_weights *= torch.zeros_like(all_weights,
dtype=torch.float32).scatter_(
1, neighbors_1nn, 1)
elif aggregation_mode != 'sum':
raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'")
features = torch.cat([features, torch.zeros_like(features[:1, :])], axis=0)
# Get the features of each neighborhood [n_points, n_neighbors, in_fdim]
neighborhood_features = features[neighbors_indices]
# Apply distance weights [n_points, n_kpoints, in_fdim]
weighted_features = torch.matmul(all_weights, neighborhood_features)
# Apply network weights [n_kpoints, n_points, out_fdim]
weighted_features = weighted_features.permute(1, 0, 2)
kernel_outputs = torch.matmul(weighted_features, K_values)
# Convolution sum to get [n_points, out_fdim]
output_features = torch.sum(kernel_outputs, axis=0)
return output_features | 26,378 |
def listen_keyboard():
"""
This function will listen the keyboard and save the event.
:return: None
"""
data_path = 'Data/Train_Data/Keyboard'
if not os.path.exists(data_path):
os.makedirs(data_path)
def on_press(key):
save_event_keyboard(data_path, 1, key)
def on_release(key):
save_event_keyboard(data_path, 2, key)
with key_listener(on_press=on_press, on_release=on_release) as listener:
listener.join() | 26,379 |
async def invert(message: discord.Message, image_arg: image):
""" Invert the colors of an image. """
image_arg.modify(ImageOps.invert, convert="RGB")
await send_image(message, image_arg, quality=100) | 26,380 |
def kerneleval(X_test, X_train, kernel):
"""
This function computes the pariwise distances between
each row in X_test and X_train using the kernel
specified in 'kernel'
X_test, X_train: 2d np.arrays
kernel: kernel parameters
"""
if kernel is None:
return X_train
fn = kernel['fn']
if fn == 'rbf':
return rbf(X_train, X_test, gamma=kernel['gamma'])
elif fn == 'poly':
return poly(X_train, X_test, degree=kernel['degree'])
elif fn == 'linear':
return linear(X_train, X_test) | 26,381 |
def aks_show_snapshot_table_format(result):
"""Format a snapshot as summary results for display with "-o table"."""
return [_aks_snapshot_table_format(result)] | 26,382 |
def get_all_monitors() -> List[Dict[str, Any]]:
"""
:return: all monitors array list sorted from left to right.
i.e: [
{'hr': 1366, 'vr': 768, 'ho': 0, 'vo': 914, 'name': 'eDP-1-1'},
{'hr': 2560, 'vr': 1440, 'ho': 1366, 'vo': 0, 'name': 'HDMI-1-1'},
]
hr: Horizontal resolution
vr: Vertical resolution
ho: Horizontal offset
vo: Vertical offset
name: The screen name
"""
# all_monitors_xrand_resp_ is string like this:
# Monitors: 2
# 0: +*HDMI-1-1 2560/621x1440/341+1366+0 HDMI-1-1
# 1: +eDP-1-1 1366/309x768/174+0+45 eDP-1-1
all_monitors_xrand_resp_ = subprocess.getoutput("xrandr --listmonitors")
monitors_ = []
for line_ in all_monitors_xrand_resp_.split(": ")[2:]:
monitor = {
# Horizontal resolution. i.e 2560
"hr": atoi(line_.split(" ")[1].split("/")[0]),
# Vertical resolution. i.e 1440
"vr": atoi(line_.split(" ")[1].split("/")[1].split("x")[1].split("/")[0]),
# Horizontal offset. i.e 1366
"ho": atoi(line_.split(" ")[1].split("+")[1]),
# Vertical offset. i.e 0
"vo": atoi(line_.split(" ")[1].split("+")[2]),
# Monitor name. i.e HDMI-1-1
"name": line_.replace(" ", " ").rsplit(" ")[0].replace("+", "").replace("*", ""),
}
monitors_.append(monitor)
return sorted(monitors_, key=lambda i: i['ho']) | 26,383 |
def poly_prem(f, g, *symbols):
"""Returns polynomial pseudo-remainder. """
return poly_pdiv(f, g, *symbols)[1] | 26,384 |
def reset_accumulators():
"""
Simply reset all accumulators for scalars.
"""
_accumulators.clear() | 26,385 |
def gen_input_code(question, id):
"""
Returns the html code for rendering the appropriate input
field for the given question.
Each question is identified by name=id
"""
qtype = question['type']
if qtype == 'text':
return """<input type="text" class="ui text" name="{0}"
placeholder="your answer..." />""".format(id)
elif qtype == 'code':
return '<textarea class="ui text" name="{0}"></textarea>'.format(id)
else:
button_template = '<input type="radio" name="{0}" value="{1}"> {1}<br>'
code = ''
for choice in question['choices']:
code = code + button_template.format(id, choice)
return code | 26,386 |
def orbit_position(data, body='sun'):
"""calculate orbit position of sun or moon for instrument position at each time in 'data' using :class:`ephem`
Args:
data: :class:`xarray.Dataset`, commonly Measurement.data
body (optional): name of astronomical body to calculate orbit from ('sun' or 'moon'). Defaults to 'sun'
Returns:
tuple containing:
ele: :class:`numpy.ndarray` of elevations of the body for each time step
azi: :class:`numpy.ndarray` of azimuths of the body for each time step
"""
obs = ephem.Observer()
if body == 'sun':
obj = ephem.Sun()
elif body == 'moon':
obj = ephem.Moon()
else:
raise NotImplementedError("function only implemented for 'body' in ['sun', 'moon']")
ele = np.full(data['time'].shape, np.nan)
azi = np.full(data['time'].shape, np.nan)
for ind, time in enumerate(data['time']):
# observer settings
obs.lat = str(data['lat'][ind].values) # needs to be string to be interpreted as degrees
obs.lon = str(data['lon'][ind].values) # needs to be string to be interpreted as degrees
obs.elevation = data['altitude'][ind].values
obs.date = str(time.dt.strftime('%Y/%m/%d %H:%M:%S').values)
# get object's position in degrees
obj.compute(obs)
ele[ind] = np.rad2deg(obj.alt)
azi[ind] = np.rad2deg(obj.az)
return ele, azi | 26,387 |
def test_by_type_failing(db):
"""
Ensure that when incorrect brewery type is requested, correct error
response returned.
"""
# GIVEN FastAPI GET request to breweries endpoint
# WHEN GET response to invalid brewery type `by_type` query parameter
response = client.get("/breweries", params={"by_type": "something"})
# THEN assert HTTPException returned
assert response.status_code == 422
response_dict = response.json()
# THEN assert correct error detail is returned
assert response_dict["detail"] == "something is not a brewery type." | 26,388 |
def stop():
"""Stop any currently playing presets and return to the prior state."""
for bulb in BULBS:
bulb.stop_flow() | 26,389 |
def list_inventory (inventory):
"""
:param inventory: dict - an inventory dictionary.
:return: list of tuples - list of key, value pairs from the inventory dictionary.
"""
result = []
for element, quantity in inventory.items():
if quantity > 0:
result.append ((element, quantity))
return result | 26,390 |
def mcf_classical_atom(modal_context, fml, clausal_form_dict, id_mc):
""" Takes classical literal (fml), and adds to relevant disjunction.
"""
assert not u.is_complex(fml)
create_mc(modal_context, id_mc, clausal_form_dict, fml) | 26,391 |
def wsd_is_duplicated_msg(msg_id):
"""
Check for a duplicated message.
Implements SOAP-over-UDP Appendix II Item 2
"""
if msg_id in wsd_known_messages:
return True
wsd_known_messages.append(msg_id)
if len(wsd_known_messages) > WSD_MAX_KNOWN_MESSAGES:
wsd_known_messages.popleft()
return False | 26,392 |
def match_diagnostic_plot(V1, V2, pair_ix, tf=None, new_figure=False):
"""
Show the results of the pair matching from `match_catalog_quads`.
"""
import matplotlib.pyplot as plt
if new_figure:
fig = plt.figure(figsize=[4,4])
ax = fig.add_subplot(111)
else:
ax = plt.gca()
# Original catalog points
ax.scatter(V1[:,0], V1[:,1], marker='o', alpha=0.1, color='k',
label='V1, N={0}'.format(V1.shape[0]))
ax.scatter(V2[:,0], V2[:,1], marker='o', alpha=0.1, color='r',
label='V2, N={0}'.format(V2.shape[0]))
if tf is not None:
# First catalog matches
tf_mat = V1[pair_ix[:,0],:]
ax.plot(tf_mat[:,0], tf_mat[:,1], marker='o', alpha=0.1,
color='k', linewidth=2)
# Transformed first catalog
tf_mat = tf(V1[pair_ix[:,0],:])
ax.plot(tf_mat[:,0], tf_mat[:,1], marker='o', alpha=0.8, color='k',
linewidth=2, label='Transform:\n'+' shift=[{0:.2f}, {1:.2f}]\n rotation={2:.4f}'.format(tf.translation[0],
tf.translation[1], tf.rotation))
else:
# First catalog matches
tf_mat = V1[pair_ix[:,0],:]
ax.plot(tf_mat[:,0], tf_mat[:,1], marker='o', alpha=0.8, color='k',
linewidth=2)
# Second catalog matches
ax.plot(V2[pair_ix[:,1],0], V2[pair_ix[:,1],1], marker='.', alpha=0.8,
color='r', linewidth=0.8,
label='{0} pairs'.format(pair_ix.shape[0]))
ax.legend(fontsize=8)
if new_figure:
fig.tight_layout(pad=0.2)
return fig | 26,393 |
def sliding_tile_state():
"""
Return the current state of the puzzle
:return: JSON object representing the state of the maze puzzle
"""
json_state = {'sliding_tile': sliding_tile.array(), 'solver': sliding_tile_str_solver, 'steps': sliding_tile_steps,
'search_steps': sliding_tile_search_steps, 'size1': sliding_tile.size1, 'size2': sliding_tile.size2}
return json.dumps(json_state) | 26,394 |
def test_plot_raw_filtered():
"""Test filtering of raw plots."""
raw = _get_raw()
assert_raises(ValueError, raw.plot, lowpass=raw.info['sfreq'] / 2.)
assert_raises(ValueError, raw.plot, highpass=0)
assert_raises(ValueError, raw.plot, lowpass=1, highpass=1)
assert_raises(ValueError, raw.plot, lowpass=1, filtorder=0)
assert_raises(ValueError, raw.plot, clipping='foo')
raw.plot(lowpass=1, clipping='transparent')
raw.plot(highpass=1, clipping='clamp')
raw.plot(highpass=1, lowpass=2) | 26,395 |
def _json_to_numpy(string_like, dtype=None): # type: (str) -> np.array
"""Convert a JSON object to a numpy array.
Args:
string_like (str): JSON string.
dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the
contents of each column, individually. This argument can only be used to
'upcast' the array. For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array
"""
data = json.loads(string_like)
return np.array(data, dtype=dtype) | 26,396 |
def connect(**kwargs):
"""
A strategy to connect a bot.
:param kwargs: strategy, listener, and orders_queue
:return: the input strategy with a report
"""
strategy = kwargs['strategy']
listener = kwargs['listener']
orders_queue = kwargs['orders_queue']
assets = kwargs['assets']
logger = log.get_logger(__name__, strategy['bot'])
if support_functions.get_profile(strategy['bot'])['banned']:
logger.warning('{} has been banned'.format(strategy['bot']))
strategy['report'] = {
'success': False,
'details': {'Execution time': 0, 'Reason': '{} has been banned'.format(strategy['bot'])}
}
log.close_logger(logger)
return strategy
if 'connected' in listener.game_state.keys():
if listener.game_state['connected']:
logger.info('Bot connected in {}s'.format(0))
strategy['report'] = {
'success': True,
'details': {'Execution time': 0}
}
log.close_logger(logger)
return strategy
bot_profile = strategies.support_functions.get_profile(strategy['bot'])
order = {
'command': 'connect',
'parameters': {
'name': bot_profile['name'],
'username': bot_profile['username'],
'password': bot_profile['password'],
'serverId': assets['server_2_id'][bot_profile['server']],
}
}
logger.info('Sending order to bot API: {}'.format(order))
orders_queue.put((json.dumps(order),))
start = time.time()
timeout = 40 if 'timeout' not in strategy.keys() else strategy['timeout']
waiting = True
while waiting and time.time() - start < timeout:
if 'connected' in listener.game_state.keys() and 'api_outdated' in listener.game_state.keys():
if 'pos' in listener.game_state.keys() or listener.game_state['api_outdated'] or listener.game_state['banned']:
# Actually wait for the map to load and not just a connection confirmation
waiting = False
time.sleep(0.05)
execution_time = time.time() - start
if waiting:
logger.warn('Failed connecting in {}s'.format(execution_time))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Timeout'}
}
log.close_logger(logger)
return strategy
if listener.game_state['api_outdated']:
logger.warn('Your BlackFalconAPI is outdated. Try to get the latest one or contact the BlackFalcon team if you already have the latest version')
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Your BlackFalconAPI is outdated. Try to get the latest one or contact the BlackFalcon team if you already have the latest version'}
}
log.close_logger(logger)
return strategy
if listener.game_state['banned']:
logger.warn('{} has been banned'.format(strategy['bot']))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': '{} has been banned'.format(strategy['bot'])}
}
log.close_logger(logger)
return strategy
logger.info('Connected {} in {}s'.format(strategy['bot'], execution_time))
strategy['report'] = {
'success': True,
'details': {'Execution time': execution_time}
}
log.close_logger(logger)
return strategy | 26,397 |
def create_env(env, render=False, shared=False, maddpg=False, evaluate=False):
"""Return, and potentially create, the environment.
Parameters
----------
env : str or gym.Env
the environment, or the name of a registered environment.
render : bool
whether to render the environment
shared : bool
specifies whether agents in an environment are meant to share policies.
This is solely used by multi-agent Flow environments.
maddpg : bool
whether to use an environment variant that is compatible with the
MADDPG algorithm
evaluate : bool
specifies whether this is a training or evaluation environment
Returns
-------
gym.Env or list of gym.Env or None
gym-compatible environment(s). Set to None if no environment is being
returned.
array_like or list of array_like or None
the observation(s) from the environment(s) upon reset. Set to None if
no environment is being returned.
"""
if env is None:
# No environment (for evaluation environments).
return None, None
elif isinstance(env, str):
if env in ENV_ATTRIBUTES.keys() or env.startswith("multiagent"):
# Handle multi-agent environments.
multiagent = env.startswith("multiagent")
if multiagent:
env = env[11:]
env = ENV_ATTRIBUTES[env]["env"](
evaluate, render, multiagent, shared, maddpg)
elif env.startswith("flow:"):
# environments in flow/examples
env = import_flow_env(env, render, shared, maddpg, evaluate)
else:
# This is assuming the environment is registered with OpenAI gym.
env = gym.make(env)
# Reset the environment.
if isinstance(env, list):
obs = [next_env.reset() for next_env in env]
else:
obs = env.reset()
return env, obs | 26,398 |
def measure_approximate_cost(structure):
""" Various bits estimate the size of the structures they return. This makes that consistent. """
if isinstance(structure, (list, tuple)): return 1 + sum(map(measure_approximate_cost, structure))
elif isinstance(structure, dict): return len(structure) + sum(map(measure_approximate_cost, structure.values()))
elif isinstance(structure, int) or structure is None: return 1
else: assert False, type(structure) | 26,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.