content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def compile():
"""
Compile C extension.
"""
subprocess.run(['gcc', '-shared', '-o', _c_ext_so_path,
'-std=c99', '-fPIC', _c_ext_c_path],
cwd=_dir_path) | 31,000 |
def has_edit_metadata_permission(user, record):
"""Return boolean whether user can update record."""
return EditMetadataPermission(user, record).can() | 31,001 |
def login_user(client, user):
"""Log in a specified user."""
with client.session_transaction() as sess:
sess['user_id'] = user.id if user else None
sess['_fresh'] = True | 31,002 |
def truncate_chars_middle(text, limit, sep="..."):
"""
Truncates a given string **text** in the middle, so that **text** has length **limit** if the number of characters
is exceeded, or else **len(text)** if it isn't.
Since this is a template filter, no exceptions are raised when they would normally do.
:param text: the text to truncate.
:param limit: the maximum length of **text**.
:param sep: the separator to display in place of the (**len(text) - limit**) truncated characters.
:return: a truncated version of **text**.
"""
if not text or limit < 0:
return ""
length = len(text)
if length < limit:
return text
else:
first_half = ceil(limit / 2)
second_half = length - floor(limit / 2)
return text[:first_half] + sep + text[second_half:] | 31,003 |
def parse_block(block, site_name, site_num, year):
"""Parse a main data block from a BBC file"""
# Cleanup difficult issues manually
# Combination of difficult \n's and OCR mistakes
replacements = {'Cemus': 'Census',
'Description of plot': 'Description of Plot',
'Description Oi Plot': 'Description of Plot',
'Acknowledgmentsz': 'Acknowledgments: ',
'Other Observers:]': 'Other Observers: ',
'Other 0berservers': 'Other Observers: ',
'0ther Observerers': 'Other Observers: ',
'Other 0bservers': 'Other Observers: ',
'Other Observers.': 'Other Observers:',
'Other Observers]': 'Other Observers:',
'Continnity': 'Continuity',
'lViagnolia': 'Magnolia',
'lVildlife': 'Wildlife',
'Mallard ): American Black Duck hybrid': 'Mallard x American Black Duck hybrid',
'Observerszj': 'Observers',
'Bobolink; 9.0 territories': 'Bobolink, 9.0 territories',
"37°38'N, 121°46lW": "37°38'N, 121°46'W",
'Common Yellowthroat, 4.5, Northern Flicker, 3.0': 'Common Yellowthroat, 4.5; Northern Flicker, 3.0',
'Red-bellied Woodpecker, 2.0, Carolina Chickadee, 2.0': 'Red-bellied Woodpecker, 2.0; Carolina Chickadee, 2.0',
'Winter 1992': ' ', #One header line in one file got OCR'd for some reason
'nuLquu “1:10': ' ',
'nululuu 1:1:1.)': ' ',
'20.9 h; 8 Visits (8 sunrise), 8, 15, 22, 29 April; 6, 13, 20, 27 May.': '20.9 h; 8 Visits (8 sunrise); 8, 15, 22, 29 April; 6, 13, 20, 27 May.',
'19.3 h; 11 visits (11 sunrise;': '19.3 h; 11 visits (11 sunrise);',
'Foster Plantation; 42"7’N': 'Foster Plantation; 42°7’N',
'Hermit Thrush, 4.5 (18), Black-throatcd Green Warbler': 'Hermit Thrush, 4.5 (18); Black-throated Green Warbler', # Fixes both delimiter and selling of throated
'39"] 2‘N, 76°54’W': '39°12‘N, 76°54’W',
"42°“7'N, 77°45’W": "42°7'N, 77°45’W",
'41°4\'N, 76"7’W': "41°4'N, 76°7’W",
'w‘sits': 'visits',
'79513’W': '79°13’W',
'Continuity.': 'Continuity:',
'Continuity"': 'Continuity:',
"40°44'N, 7 D50’W": "40°44'N, 75°50’W",
"41350'N, 71°33'W": "41°50'N, 71°33'W",
'44°57’N, 68D41’W': '44°57’N, 68°41’W',
'18.8 11; 11 Visits': '18.8 h; 11 Visits',
"Descripn'on of Plot": "Description of Plot",
'41 c’42’N, 73°13’VV': "41°42'N, 73°13'W",
'Northern Rough-winged Swallow. 0.5': 'Northern Rough-winged Swallow, 0.5',
'Warbling Vireo, 1.0, Northern Cardinal, 1.0': 'Warbling Vireo, 1.0; Northern Cardinal, 1.0',
'Wood Thrush, 3.0 (18), American Redstart, 3.0': 'Wood Thrush, 3.0; American Redstart, 3.0',
'study-hrs': 'study-hours',
'studyhours': 'study-hours',
'Nuttall’s Woodpecker, 3 (9; 2N),':'Nuttall’s Woodpecker, 3 (9; 2N);',
'38°35’45”N\', 76°45’46"W': '38°35’45”N, 76°45’46"W',
'Northern Parula 8': 'Northern Parula, 8',
'47°08’N, 99°] 5’ W': '47°08’N, 99°15’ W',
'Yellow Warbler, 1,’ Clay-colored Sparrow, 1,Savannah Sparrow, 1;': 'Yellow Warbler, 1; Clay-colored Sparrow, 1; Savannah Sparrow, 1;',
'Established 1993; 2 )n‘.': 'Established 1993; 2.',
'Established l983': 'Established 1983',
'Established 1978; 18 you': 'Established 1978; 18 yr.',
'This plot is part of a larger plot that was first censused in 1981.': '',
'Ruby-throatcd Hummingbird': 'Ruby-throated Hummingbird',
'RuHed Grouse': 'Ruffed Grouse',
'\Varbler': "Warbler",
'VVarbler': "Warbler",
'Common Yellowthroat 3': 'Common Yellowthroat, 3',
'all known to breed in immediate vicinity': '',
'and a number of vagrants': '',
"Utner Ubservers": "Other Observers",
'Dovmy': 'Downy',
"W'oodpecker": "Woodpecker",
"\700d Thrush": "Wood Thrush",
"\form-eating Warbler": "Worm-eating Warbler",
"Clifl' Swallow": "Cliff Swallow",
'Clifl\ Swallow"': 'Cliff Swallow',
'Downy Woodpecknululuu I JHJ er': 'Downy Woodpecker',
'unidentified Accipiter': 'Accipiter sp.',
"Traill’s Flycatcher": "Willow Flycatcher",
'Eastern Titmouse': 'Tufted Titmouse',
'Common Barn Owl': 'Barn Owl',
'Common Bushtit': 'Bushtit',
'Yellow-shafted Flicker': 'Northern Flicker',
'Yellowshafted Flicker': 'Northern Flicker',
'Common Barn-Owl': 'Barn Owl',
'Northern Parula Warbler': 'Northern Parula',
'Yellow-rumped,': 'Yellow-rumped Warbler,',
'Common Crow': 'American Crow',
', Raven,': ', Common Raven,',
'; Raven,': '; Common Raven,',
'+_': '+',
'chickadee sp.;': 'chickadee sp.,',
'Yellow Warbler, 0.5, Common Yellowthroat, 0.5.': 'Yellow Warbler, 0.5; Common Yellowthroat, 0.5.',
'Whip-poor-will, 1.0, European Starling, 1.0': 'Whip-poor-will, 1.0; European Starling, 1.0',
'80(9\'45"': '80°9\'45"',
'American Crow; 1.0;': 'American Crow, 1.0;',
"47°08'N7 99°15'W;": "47°08'N 99°15'W;",
"', 7'6°45": ", 76°45",
"43°] 6’N": "43°16'N",
"121°461W": "121°46'W",
"39.] h;": "39.1 h;",
"74°ll": "74°11",
"40°] 1": "40°11",
"Estao lished": "Established",
"Estabo lished": "Established",
"Estab lished": "Established",
"79°O": "79°0",
"79°]": "79°1",
"12.] h;": "12.1 h;",
"terfitories": "territories"
}
block = get_cleaned_string(block)
for replacement in replacements:
if replacement in block:
print("Replacing {} with {}".format(replacement, replacements[replacement]))
block = block.replace(replacement, replacements[replacement])
block = get_clean_block(block)
p = re.compile(r'((?:Site Number|Location|Continuity|Previously called|Size|Description of Plot|Edge|Topography and Elevation|Weather|Coverage|Census|Fledglings|Nests and Fledglings|Fledglings Seen|Fledglings Noted|Total|Visitors|Nests Found|Remarks|Observers|Other Observers|Other Observer|Acknowledgments)):')
split_block = p.split(block)[1:] #discard first value; an empty string
block_dict = {split_block[i]: split_block[i+1] for i in range(0, len(split_block), 2)}
block_dict['SiteName'] = site_name
block_dict['SiteNumInCensus'] = site_num * 10000 + year
return block_dict | 31,004 |
def isempty(s):
"""
return if input object(string) is empty
"""
if s in (None, "", "-", []):
return True
return False | 31,005 |
def get_client(host, port=None, username=None,
password=None, tenant=None,
auth_url=None, auth_strategy=None,
auth_token=None, region=None,
is_silent_upload=False, insecure=True,
aws_access_key=None, aws_secret_key=None):
"""
Returns a new boto Cloudformation client connection to a heat server
"""
# Note we pass None/None for the keys by default
# This means boto reads /etc/boto.cfg, or ~/.boto
# set is_secure=0 in the config to disable https
cloudformation = BotoClient(aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
port=port,
path="/v1")
if cloudformation:
logger.debug("Got CF connection object OK")
else:
logger.error("Error establishing Cloudformation connection!")
sys.exit(1)
return cloudformation | 31,006 |
def raw(text):
"""Returns a raw string representation of text"""
new_str = ''
for char in text:
try:
new_str += trans_map[char]
except KeyError:
new_str += char
return new_str | 31,007 |
def airtovac(wave_air):
"""
taken from idl astrolib
;+
; NAME:
; AIRTOVAC
; PURPOSE:
; Convert air wavelengths to vacuum wavelengths
; EXPLANATION:
; Wavelengths are corrected for the index of refraction of air under
; standard conditions. Wavelength values below 2000 A will not be
; altered. Uses relation of Ciddor (1996).
;
; CALLING SEQUENCE:
; AIRTOVAC, WAVE_AIR, [ WAVE_VAC]
;
; INPUT/OUTPUT:
; WAVE_AIR - Wavelength in Angstroms, scalar or vector
; If this is the only parameter supplied, it will be updated on
; output to contain double precision vacuum wavelength(s).
; OPTIONAL OUTPUT:
; WAVE_VAC - Vacuum wavelength in Angstroms, same number of elements as
; WAVE_AIR, double precision
;
; EXAMPLE:
; If the air wavelength is W = 6056.125 (a Krypton line), then
; AIRTOVAC, W yields an vacuum wavelength of W = 6057.8019
;
; METHOD:
; Formula from Ciddor 1996, Applied Optics 62, 958
;
; NOTES:
; Take care within 1 A of 2000 A. Wavelengths below 2000 A *in air* are
; not altered.
; REVISION HISTORY
; Written W. Landsman November 1991
; Use Ciddor (1996) formula for better accuracy in the infrared
; Added optional output vector, W Landsman Mar 2011
; Iterate for better precision W.L./D. Schlegel Mar 2011
;-
"""
wave_vac = wave_air * 1.0
g = wave_vac > 2000 #Only modify above 2000 A
if np.sum(g):
for iter in [0, 1]:
if isinstance(g, np.ndarray):
sigma2 = (1e4/wave_vac[g])**2. #Convert to wavenumber squared
# Compute conversion factor
fact = 1. + 5.792105e-2 / (238.0185 - sigma2) + \
1.67917e-3 / (57.362 - sigma2)
wave_vac[g] = wave_air[g] * fact #Convert Wavelength
else: # scalar version
sigma2 = (1e4/wave_vac)**2. #Convert to wavenumber squared
# Compute conversion factor
fact = 1. + 5.792105e-2 / (238.0185 - sigma2) + \
1.67917e-3 / (57.362 - sigma2)
wave_vac = wave_air * fact #Convert Wavelength
return wave_vac | 31,008 |
def make_qa_plot(cset, kind='image', outfilen='qa_plot'):
"""
Generate Quality Assurance plots by visualizing each channel where
significant emission occurs in the image cube of interest (i.e.,
`.image` or `.residual`).
Parameters
----------
cset : CubeSet
kind : str
Kind of image data to plot:
'image' : restored/cleaned image
'residual' : residual image
outfilen : str
"""
log_post(':: Making QA plots for: {0} ({1})'.format(cset.stem, kind))
# Configuration options specific to image or residual plots
if kind == 'image':
cmap = plt.cm.afmhot
mask_cont_color = 'cyan'
vmin, vmax = -3 * cset.rms, 10 * cset.rms
elif kind == 'residual':
cmap = plt.cm.RdBu_r
mask_cont_color = 'black'
vmin, vmax = -5 * cset.rms, 5 * cset.rms
else:
raise ValueError('Invalid plot kind: "{0}"'.format(kind))
# Set NaN color for colormap, '0.2' for darker gray
cmap.set_bad('0.5', 1.0)
# Plot configuration options
ncols = 5
subplot_size = 1.3 # inch; good for five across on a 8.5x11 page
max_rows_per_page = 14 # m*n -> 70 plot per page
max_plots_per_page = ncols * max_rows_per_page
tick_pos = cset.calc_tick_loc(ang_tick=5)
# Determine the number of pages that need to be created from the
# required number of channels to plot.
if cset.ngood <= max_plots_per_page:
plots_per_page = [cset.ngood]
else:
n_full_pages = cset.ngood // max_plots_per_page
n_mod_plots = cset.ngood % max_plots_per_page
plots_per_page = n_full_pages * [max_plots_per_page] + [n_mod_plots]
n_pages = len(plots_per_page)
iter_planes = cset.iter_planes()
# get pages per plot
for i_page, n_plots in enumerate(plots_per_page):
nempty = n_plots % ncols
nrows = n_plots // ncols + (1 if nempty > 0 else 0)
figsize = (ncols * subplot_size, nrows * subplot_size)
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
sharey=True, figsize=figsize, dpi=300)
for j_plot in range(n_plots):
try:
planes, chan_ix = next(iter_planes)
except StopIteration:
break
ax = axes.flat[j_plot]
image, residual, mask, pbeam = planes
data = image if kind == 'image' else residual
# Show colorscale of image data
ax.imshow(data, vmin=vmin, vmax=vmax, cmap=cmap, origin='lower')
# Show HPBW contour for primary beam data
ax.contour(pbeam, levels=[0.5], colors='0.5',
linestyles='dashed', linewidths=0.4)
# Show contours for high-SNR emission
if kind == 'image':
high_snr_levels = list(cset.rms * np.array([10, 20, 40, 80]))
ax.contourf(data, levels=high_snr_levels, cmap=plt.cm.rainbow)
# Show contour for the clean mask
if mask.any():
ax.contour(mask, level=[0.5], colors=mask_cont_color,
linestyles='solid', linewidths=0.2)
# show nice channel label in the top-right corner
text = ax.annotate(str(chan_ix), (0.83, 0.91),
xycoords='axes fraction', fontsize='xx-small')
text.set_path_effects([
path_effects.withStroke(linewidth=2, foreground='white')])
# show axis ticks as relative offsets in fixed arcsec interval
ax.set_xticks(tick_pos)
ax.set_yticks(tick_pos)
ax.set_xticklabels([])
ax.set_yticklabels([])
del image, residual, mask, pbeam
# zoom the window to crop out some of the PB NaNs around the edges
ax.set_xlim(0.12*cset.pix_width, 0.88*cset.pix_width)
ax.set_ylim(0.12*cset.pix_width, 0.88*cset.pix_width)
plt.tight_layout(pad=0.8)
# Hide unused plots
if nempty > 0:
for ax in axes.flat[-nempty:]:
ax.set_visible(False)
# Only use paginated file names if multiple pages are used
if n_pages == 1:
outfilen_ext = '{0}_{1}'.format(outfilen, kind)
else:
outfilen_ext = '{0}_{1}_page{2}'.format(outfilen, kind, i_page+1)
savefig(outfilen_ext) | 31,009 |
def clamp(val, min_, max_):
"""clamp val to between min_ and max_ inclusive"""
if val < min_:
return min_
if val > max_:
return max_
return val | 31,010 |
def plot_confusion_matrix(cm, labels=3, fname=''):
"""Plot the confusion matrix and save to file
Args:
cm (np.ndarray): The confusion matrix
labels (int): Number of labels
fname (str): Path to file for saving
"""
disp = ConfusionMatrixDisplay(confusion_matrix=cm,
display_labels=range(labels))
fig = disp.plot(include_values=True, cmap=plt.cm.Blues, ax=None,
xticks_rotation='horizontal', values_format=None)
fig = fig.figure_
fig.savefig(fname, format='pdf', bbox_inches='tight', pad_inces=0) | 31,011 |
def GetUserLink(provider, email):
"""Retrieves a url to the profile of the specified user on the given provider.
Args:
provider: The name of the provider
email: The email alias of the user.
Returns:
Str of the url to the profile of the user.
"""
user_link = ''
if email and provider == Provider.ISSUETRACKER:
user_link = 'http://code.google.com/u/' + email.split('@')[0]
return encoding_util.EncodeToAscii(user_link) | 31,012 |
def _as_bytes0(path):
"""Crashes translation if the path contains NUL characters."""
res = _as_bytes(path)
rstring.check_str0(res)
return res | 31,013 |
def updateCompareProjectShoppingList():
"""Updates the shopping list for the working project in the compareTab"""
treeOutput = alltreeCompareProject
workingProject = selectedCompareProject2.get()
result = JobMaterials.getAllProjectMaterials(workingProject)
alltreeCompareProject.delete(*alltreeCompareProject.get_children())
for i in result:
alltreeCompareProject.insert("",END,values=i) | 31,014 |
def get_ip(request):
"""Determines user IP address
Args:
request: resquest object
Return:
ip_address: requesting machine's ip address (PUBLIC)
"""
ip_address = request.remote_addr
return ip_address | 31,015 |
def _is_valid_new_style_arxiv_id(identifier):
"""Determine if the given identifier is a valid new style arXiv ID."""
split_identifier = identifier.split('v')
if len(split_identifier) > 2:
return False
elif len(split_identifier) == 2:
identifier, version = split_identifier
if not version.isnumeric():
return False
else:
identifier = split_identifier[0]
split_identifier = identifier.split('.')
if len(split_identifier) != 2:
return False
prefix, suffix = split_identifier
if not prefix.isnumeric() or not suffix.isnumeric():
return False
if len(prefix) != 4 or len(suffix) not in {4, 5}:
return False
month = prefix[2:4]
if int(month) > 12:
return False
return True | 31,016 |
def download_suite(request, domain, app_id):
"""
See Application.create_suite
"""
if not request.app.copy_of:
request.app.set_form_versions(None)
return HttpResponse(
request.app.create_suite()
) | 31,017 |
def get_graph(mol):
""" Converts `rdkit.Chem.Mol` object to `PreprocessingGraph`.
"""
if mol is not None:
if not C.use_aromatic_bonds:
rdkit.Chem.Kekulize(mol, clearAromaticFlags=True)
molecular_graph = PreprocessingGraph(molecule=mol, constants=C)
return molecular_graph | 31,018 |
def dict_fetchall(cursor):
"""
Returns all rows from a cursor as a dict
"""
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
] | 31,019 |
def test_tvh_widget_popup(tvh_manager):
"""Test the popup displays the correct information."""
tvh_manager.c.bar["top"].fake_button_press(0, "top", 0, 0, 1)
_, text = tvh_manager.c.widget["tvhwidget"].eval("self.popup.text")
assert text == (
"Upcoming recordings:\n"
"Fri 26 Nov 18:55: TVH Widget Test 1\n"
"Fri 26 Nov 19:05: TVH Widget Test 2"
)
# Popup hides when clicked again.
tvh_manager.c.bar["top"].fake_button_press(0, "top", 0, 0, 1)
_, result = tvh_manager.c.widget["tvhwidget"].eval("self.popup is None")
assert result == "True" | 31,020 |
def count_vowels(s):
"""Used to count the vowels in the sequence"""
s = s.lower()
counter=0
for x in s:
if(x in ['a','e','i','o','u']):
counter+=1
return counter | 31,021 |
def _is_valid_dtype(matrix, complex_dtype=False, all_dtype=False):
""" Check to see if it's a usable float dtype """
if all_dtype:
return matrix.dtype in NUMPY_FLOAT_DTYPES + NUMPY_COMPLEX_DTYPES
elif complex_dtype:
return matrix.dtype in NUMPY_COMPLEX_DTYPES
else:
return matrix.dtype in NUMPY_FLOAT_DTYPES | 31,022 |
def test_health_check_custom_check():
"""
Should return Custom health check results.
"""
loader = load_from_dict(
health_convention=dict(
include_build_info="false",
),
)
graph = create_object_graph(name="example", testing=True, loader=loader)
graph.use("health_convention")
client = graph.flask.test_client()
graph.health_convention.checks["foo"] = lambda graph: "hi"
response = client.get("/api/health")
assert_that(response.status_code, is_(equal_to(200)))
data = loads(response.get_data().decode("utf-8"))
assert_that(data, is_(equal_to({
"name": "example",
"ok": True,
"checks": {
"foo": {
"message": "hi",
"ok": True,
},
},
}))) | 31,023 |
def assess_edge(self, edge, fsmStack, request, **kwargs):
"""
Try to transition to ASSESS, or WAIT_ASSESS if not ready,
or jump to ASK if a new question is being asked.
"""
fsm = edge.fromNode.fsm
if not fsmStack.state.linkState: # instructor detached
return fsm.get_node('END')
elif fsmStack.state.linkState.fsmNode.node_name_is_one_of('QUESTION'):
if fsmStack.state.unitLesson == fsmStack.state.linkState.unitLesson:
return fsm.get_node('WAIT_ASSESS')
else: # jump to the new question
fsmStack.state.unitLesson = fsmStack.state.linkState.unitLesson
fsmStack.state.save()
return fsm.get_node('TITLE')
else: # pragma: no cover
if not fsmStack.next_point.response_to_check.selfeval:
return edge.toNode
if fsmStack.next_point.response_to_check.selfeval != 'correct':
return fsm.get_node('INCORRECT_ANSWER')
elif fsmStack.next_point.response_to_check.selfeval == 'correct':
return fsm.get_node('CORRECT_ANSWER')
return edge.toNode | 31,024 |
def viable_source_types_for_generator_real (generator):
""" Returns the list of source types, which, when passed to 'run'
method of 'generator', has some change of being eventually used
(probably after conversion by other generators)
"""
source_types = generator.source_types ()
if not source_types:
# If generator does not specify any source types,
# it might be special generator like builtin.lib-generator
# which just relays to other generators. Return '*' to
# indicate that any source type is possibly OK, since we don't
# know for sure.
return ['*']
else:
result = []
for s in source_types:
viable_sources = viable_source_types(s)
if viable_sources == "*":
result = ["*"]
break
else:
result.extend(type.all_derived(s) + viable_sources)
return unique(result) | 31,025 |
def negSamplingCostAndGradient(predicted, target, outputVectors, dataset,
K=10):
""" Negative sampling cost function for word2vec models
Implement the cost and gradients for one predicted word vector
and one target word vector as a building block for word2vec
models, using the negative sampling technique. K is the sample
size.
Note: See test_word2vec below for dataset's initialization.
Arguments/Return Specifications: same as softmaxCostAndGradient
"""
# Sampling of indices is done for you. Do not modify this if you
# wish to match the autograder and receive points!
indices = [target]
indices.extend(getNegativeSamples(target, dataset, K))
### YOUR CODE HERE
u_o, v_c = outputVectors[target], outputVectors[indices[1:]]
loss = -np.log(sigmoid(np.matmul(u_o, predicted)))
print(u_o)
print(v_c)
### END YOUR CODE
return cost, gradPred, grad | 31,026 |
def get_start_block(block):
"""
Gets the deepest block to use as the starting block.
"""
if not block.get('children'):
return block
first_child = block['children'][0]
return get_start_block(first_child) | 31,027 |
def transform_rank_list(lam_ref, A, b, rank):
"""
A is a list here. We sum the first `rank` elements of it
to return a matrix with the desired rank.
"""
_A = sum(A[0:rank])
_b = b
_d = _A @ lam_ref + _b
assert np.linalg.matrix_rank(_A) == rank, "Unexpected rank mismatch"
return _A, _b, _d | 31,028 |
def FindWindowsWithTitle(title_to_search):
"""Finds windows with given title.
Args:
title_to_search: Window title substring to search, case-insensitive.
Returns:
A list of HWND that match the search condition.
"""
desktop_handle = None
return FindWindowsWithText(desktop_handle, title_to_search) | 31,029 |
def main_update(next_image_step):
"""
This includes some functionality for image / file writing at a specified frequency,
Assumes global variables:
time, step, files_freq, next_image_step
if numerical dt exceeds next specified writing point
override dt make sure we hit that point
Set some flags so that image / file writing proceeds
"""
if md.thermal:
dt = advDiff.get_max_dt()*md.courantFac #additional md.courantFac helps stabilise advDiff
advDiff.integrate(dt)
else:
dt = advector.get_max_dt()
#This relates to file writing at set period:
#override dt make sure we hit certain time values
#Set some flags so that image / file writing proceeds
if step == 0:
files_this_step = True
else:
files_this_step = False
if time + dt >= next_image_step:
dt = next_image_step - time
files_this_step = True
next_image_step += files_freq #increment time for our next image / file dump
#Do advection
advector.integrate(dt)
marker.advection(dt)
#remove drift in pressure
pressureSurf = _pressure.evaluate()[0]
pressureField.data[:] -= pressureSurf/surfLength
return time+dt, step+1, files_this_step, next_image_step | 31,030 |
def record(packets: Iterable[Packet],
pcap_path: str,
*,
src_ip: str = "127.0.0.1",
dst_ip: str = "127.0.0.1",
lidar_port: int = 7502,
imu_port: int = 7503,
use_sll_encapsulation: bool = False) -> int:
"""Record a sequence of sensor packets to a pcap file.
Args:
packets: A (finite!) sequence of packets
pcap_path: Path of the output pcap file
src_ip: Source IP to use for all packets
dst_ip: Destination IP to use for all packets
lidar_port: Src/dst port to use for lidar packets
imu_port: Src/dst port to use for imu packets
use_sll_encapsulation: Use sll encapsulaiton for pcaps(ouster studio can not read)
Returns:
Number of packets captured
"""
has_timestamp = None
error = False
buf_size = 2**16
n = 0
handle = _pcap.record_initialize(pcap_path, src_ip, dst_ip, buf_size,
use_sll_encapsulation)
try:
for packet in packets:
if isinstance(packet, LidarPacket):
src_port = lidar_port
dst_port = lidar_port
elif isinstance(packet, ImuPacket):
src_port = imu_port
dst_port = imu_port
else:
raise ValueError("Unexpected packet type")
if has_timestamp is None:
has_timestamp = (packet.capture_timestamp is not None)
elif has_timestamp != (packet.capture_timestamp is not None):
raise ValueError("Mixing timestamped/untimestamped packets")
ts = packet.capture_timestamp or time.time()
_pcap.record_packet(handle, src_port, dst_port, packet._data, ts)
n += 1
except Exception:
error = True
raise
finally:
_pcap.record_uninitialize(handle)
if error and os.path.exists(pcap_path) and n == 0:
os.remove(pcap_path)
return n | 31,031 |
def assemble_batches(inputs, crop_mode='center_only'):
"""
Assemble DataFrame of image crops for feature computation.
Input:
inputs: list of filenames (center_only, corners, and selective_search mode)
OR input DataFrame (list mode)
mode: string
'list': take the image windows from the input as-is
'center_only': take the CROPPED_DIM middle of the image windows
'corners': take CROPPED_DIM-sized boxes at 4 corners and center of
the image windows, as well as their flipped versions: a total of 10.
'selective_search': run Selective Search region proposal on the
image windows, and take each enclosing subwindow.
Output:
df_batches: list of DataFrames, each one of BATCH_SIZE rows.
Each row has 'image', 'filename', and 'window' info.
Column 'image' contains (X x 3 x CROPPED_DIM x CROPPED_IM) ndarrays.
Column 'filename' contains source filenames.
Column 'window' contains [ymin, xmin, ymax, xmax] ndarrays.
If 'filename' is None, then the row is just for padding.
Note: for increased efficiency, increase the batch size (to the limit of gpu
memory) to avoid the communication cost
"""
if crop_mode == 'list':
images_df = _assemble_images_list(inputs)
elif crop_mode == 'center_only':
images_df = _assemble_images_center_only(inputs)
elif crop_mode == 'corners':
images_df = _assemble_images_corners(inputs)
elif crop_mode == 'selective_search':
images_df = _assemble_images_selective_search(inputs)
else:
raise Exception("Unknown mode: not in {}".format(CROP_MODES))
# Make sure the DataFrame has a multiple of BATCH_SIZE rows:
# just fill the extra rows with NaN filenames and all-zero images.
N = images_df.shape[0]
remainder = N % BATCH_SIZE
if remainder > 0:
zero_image = np.zeros_like(images_df['image'].iloc[0])
zero_window = np.zeros((1, 4), dtype=int)
remainder_df = pd.DataFrame([{
'filename': None,
'image': zero_image,
'window': zero_window
}] * (BATCH_SIZE - remainder))
images_df = images_df.append(remainder_df)
N = images_df.shape[0]
# Split into batches of BATCH_SIZE.
ind = np.arange(N) / BATCH_SIZE
df_batches = [images_df[ind == i] for i in range(N / BATCH_SIZE)]
return df_batches | 31,032 |
def __VF2_feasible(graph1, graph2, vertex1, vertex2, map21, map12, terminals1,
terminals2, subgraph):
"""
Returns :data:`True` if two vertices `vertex1` and `vertex2` from graphs
`graph1` and `graph2`, respectively, are feasible matches. `mapping21` and
`mapping12` are the current state of the mapping from `graph1` to `graph2`
and vice versa, respectively. `terminals1` and `terminals2` are lists of
the vertices that are directly connected to the already-mapped vertices.
`subgraph` is :data:`True` if graph2 is to be treated as a potential
subgraph of graph1. i.e. graph1 is a specific case of graph2.
Uses the VF2 algorithm of Vento and Foggia. The feasibility is assessed
through a series of semantic and structural checks. Only the combination
of the semantic checks and the level 0 structural check are both
necessary and sufficient to ensure feasibility. (This does *not* mean that
vertex1 and vertex2 are always a match, although the level 1 and level 2
checks preemptively eliminate a number of false positives.)
"""
cython.declare(vert1=Vertex, vert2=Vertex, edge1=Edge, edge2=Edge, edges1=dict, edges2=dict)
cython.declare(i=cython.int)
cython.declare(term1Count=cython.int, term2Count=cython.int, neither1Count=cython.int, neither2Count=cython.int)
if not subgraph:
# To be feasible the connectivity values must be an exact match
if vertex1.connectivity1 != vertex2.connectivity1: return False
if vertex1.connectivity2 != vertex2.connectivity2: return False
if vertex1.connectivity3 != vertex2.connectivity3: return False
# Semantic check #1: vertex1 and vertex2 must be equivalent
if subgraph:
if not vertex1.isSpecificCaseOf(vertex2): return False
else:
if not vertex1.equivalent(vertex2): return False
# Get edges adjacent to each vertex
edges1 = graph1.edges[vertex1]
edges2 = graph2.edges[vertex2]
# Semantic check #2: adjacent vertices to vertex1 and vertex2 that are
# already mapped should be connected by equivalent edges
for vert2 in edges2:
if vert2 in map12:
vert1 = map12[vert2]
if not vert1 in edges1: # atoms not joined in graph1
return False
edge1 = edges1[vert1]
edge2 = edges2[vert2]
if subgraph:
if not edge1.isSpecificCaseOf(edge2): return False
else: # exact match required
if not edge1.equivalent(edge2): return False
# there could still be edges in graph1 that aren't in graph2.
# this is ok for subgraph matching, but not for exact matching
if not subgraph:
for vert1 in edges1:
if vert1 in map21:
vert2 = map21[vert1]
if not vert2 in edges2: return False
# Count number of terminals adjacent to vertex1 and vertex2
term1Count = 0; term2Count = 0; neither1Count = 0; neither2Count = 0
for vert1 in edges1:
if vert1 in terminals1: term1Count += 1
elif vert1 not in map21: neither1Count += 1
for vert2 in edges2:
if vert2 in terminals2: term2Count += 1
elif vert2 not in map12: neither2Count += 1
# Level 2 look-ahead: the number of adjacent vertices of vertex1 and
# vertex2 that are non-terminals must be equal
if subgraph:
if neither1Count < neither2Count: return False
else:
if neither1Count != neither2Count: return False
# Level 1 look-ahead: the number of adjacent vertices of vertex1 and
# vertex2 that are terminals must be equal
if subgraph:
if term1Count < term2Count: return False
else:
if term1Count != term2Count: return False
# Level 0 look-ahead: all adjacent vertices of vertex2 already in the
# mapping must map to adjacent vertices of vertex1
for vert2 in edges2:
if vert2 in map12:
vert1 = map12[vert2]
if vert1 not in edges1: return False
# Also, all adjacent vertices of vertex1 already in the mapping must map to
# adjacent vertices of vertex2, unless we are subgraph matching
if not subgraph:
for vert1 in edges1:
if vert1 in map21:
vert2 = map21[vert1]
if vert2 not in edges2: return False
# All of our tests have been passed, so the two vertices are a feasible
# pair
return True | 31,033 |
async def place_rectangle(
interface, element, x, y, width, height, include_all_sides=True, variant=None
):
"""Place a rectangle of an element.
Parameters
----------
interface
The editor interface.
x
X coordinate of the upper left corner.
y
Y coordinate of the upper left corner.
width
Width of the rectangle.
height
Height of the rectangle.
include_all_sides
If False, skip returning the left, right and lower sides. Use this
when placing walls and you are only interested in the insides.
variant
The variant of element to place.
Returns
-------
A list of tuples (element, direction, x, y, variant) of placed elements.
Some may be omitted, if `include_all_sides` is false.
"""
await interface.place_element(
element,
Direction.NONE,
(x, y),
(x + width - 1, y + height - 1),
variant=variant,
)
return_elements = []
if include_all_sides:
x_range = range(x, x + width)
y_range = range(y, y + height)
else:
x_range = range(x + 1, x + width - 1)
y_range = range(y, y + height - 1)
for placed_x in x_range:
for placed_y in y_range:
return_elements.append(
(element, Direction.NONE, placed_x, placed_y, variant)
)
return return_elements | 31,034 |
def rainbow_strokes(strokes: bpy.types.GPencilStrokes):
"""
strokesのインデックスに合せて頂点カラーを設定します
"""
n = [colorize_stroke(stroke, i, True) for i, stroke in enumerate(strokes)]
return n
# logger.debug(f"update:{sum(n)}")
# logger.debug(rainbow.cache_info()) | 31,035 |
def calc_Q_hat_hs_d_t(Q, A_A, V_vent_l_d_t, V_vent_g_i, mu_H, mu_C, J_d_t, q_gen_d_t, n_p_d_t, q_p_H, q_p_CS, q_p_CL, X_ex_d_t, w_gen_d_t, Theta_ex_d_t, L_wtr, region):
"""(40-1a)(40-1b)(40-2a)(40-2b)(40-2c)(40-3)
Args:
Q: 当該住戸の熱損失係数(W/(m2・K))
A_A: 床面積の合計(m2)
V_vent_l_d_t: 日付dの時刻tにおける局所換気量(m3/h)
V_vent_g_i: 暖冷房区画iの全般換気量(m3/h)
mu_H: 当該住戸の暖房期の日射取得係数((W/m2)/(W/m2))
mu_C: 当該住戸の冷房期の日射取得係数((W/m2)/(W/m2))
J_d_t: 日付dの時刻tにおける水平面全天日射量(W/m2)
q_gen_d_t: 日付dの時刻tにおける内部発熱(W)
n_p_d_t: 日付dの時刻tにおける在室人数(人)
q_p_H: 暖房期における人体からの1人当たりの顕熱発熱量(W/人)
q_p_CS: 冷房期における人体からの1人当たりの顕熱発熱量(W/人)
q_p_CL: 冷房期における人体からの1人当たりの潜熱発熱量(W/人)
X_ex_d_t: 日付dの時刻tにおける外気の絶対湿度(kg/kg(DA))
w_gen_d_t: param Theta_ex_d_t: 日付dの時刻tにおける外気温度(℃)
L_wtr: 水の蒸発潜熱(kJ/kg)
region: 地域区分
Theta_ex_d_t: returns: 日付dの時刻tにおける1時間当たりの熱源機の風量を計算するための熱源機の暖房出力(MJ/h)
Returns:
日付dの時刻tにおける1時間当たりの熱源機の風量を計算するための熱源機の暖房出力(MJ/h)
"""
H, C, M = get_season_array_d_t(region)
c_p_air = get_c_p_air()
rho_air = get_rho_air()
Theta_set_H = get_Theta_set_H()
Theta_set_C = get_Theta_set_C()
X_set_C = get_X_set_C()
Q_hat_hs_d_t = np.zeros(24 * 365)
Q_hat_hs_H_d_t = np.zeros(24 * 365)
Q_hat_hs_CS_d_t = np.zeros(24 * 365)
Q_hat_hs_CL_d_t = np.zeros(24 * 365)
# 暖房期 (40-1b)
if mu_H is not None:
Q_hat_hs_H_d_t[H] = (((Q - 0.35 * 0.5 * 2.4) * A_A + (c_p_air * rho_air * (V_vent_l_d_t[H] + np.sum(V_vent_g_i[:5]))) / 3600) * (Theta_set_H - Theta_ex_d_t[H]) \
- mu_H * A_A * J_d_t[H] - q_gen_d_t[H] - n_p_d_t[H] * q_p_H) * 3600 * 10 ** -6
# (40-1a)
Q_hat_hs_d_t[H] = np.clip(Q_hat_hs_H_d_t[H], 0, None)
# 冷房期 (40-2b)
Q_hat_hs_CS_d_t[C] = (((Q - 0.35 * 0.5 * 2.4) * A_A + (c_p_air * rho_air * (V_vent_l_d_t[C] + np.sum(V_vent_g_i[:5]))) / 3600) * (Theta_ex_d_t[C] - Theta_set_C) \
+ mu_C * A_A * J_d_t[C] + q_gen_d_t[C] + n_p_d_t[C] * q_p_CS) * 3600 * 10 ** -6
# (40-2c)
Q_hat_hs_CL_d_t[C] = ((rho_air * (V_vent_l_d_t[C] + np.sum(V_vent_g_i[:5])) * (X_ex_d_t[C] - X_set_C) * 10 ** 3 + w_gen_d_t[C]) \
* L_wtr + n_p_d_t[C] * q_p_CL * 3600) * 10 ** -6
# (40-2a)
Q_hat_hs_d_t[C] = np.clip(Q_hat_hs_CS_d_t[C], 0, None) + np.clip(Q_hat_hs_CL_d_t[C], 0, None)
# 中間期 (40-3)
Q_hat_hs_d_t[M] = 0
return Q_hat_hs_d_t | 31,036 |
def test_install_galaxy_role(runtime_tmp: Runtime) -> None:
"""Check install role with empty galaxy file."""
pathlib.Path(f"{runtime_tmp.project_dir}/galaxy.yml").touch()
pathlib.Path(f"{runtime_tmp.project_dir}/meta").mkdir()
pathlib.Path(f"{runtime_tmp.project_dir}/meta/main.yml").touch()
# this should only raise a warning
runtime_tmp._install_galaxy_role(runtime_tmp.project_dir, role_name_check=1)
# this shoul test the bypass role name check path
runtime_tmp._install_galaxy_role(runtime_tmp.project_dir, role_name_check=2)
# this should raise an error
with pytest.raises(
InvalidPrerequisiteError, match="does not follow current galaxy requirements"
):
runtime_tmp._install_galaxy_role(runtime_tmp.project_dir, role_name_check=0) | 31,037 |
def test_runrecipe_load_valid() -> None:
"""Test loading a valid run recipe from file."""
with open(RUNRECIPE_VALID) as fp:
assert RunRecipe.load_from_file(fp) == RunRecipe(
script=["ping example.com", "git init"],
ports=[(3, 2), (7, 204)],
sockets=[("/var/opt/example", "/var/opt/otherthing")],
) | 31,038 |
def get_all_infoproviders():
"""
Endpunkt `/infoproviders`.
Response enthält Informationen über alle, in der Datenbank enthaltenen, Infoprovider.
"""
try:
return flask.jsonify(queries.get_infoprovider_list())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading all infoproviders"})
return err, 400 | 31,039 |
def maidenhead(dec_lat, dec_lon):
"""Convert latitude and longitude to Maidenhead grid locators."""
try:
dec_lat = float(dec_lat)
dec_lon = float(dec_lon)
except ValueError:
return ''
if _non_finite(dec_lat) or _non_finite(dec_lon):
return ''
if 90 < math.fabs(dec_lat) or 180 < math.fabs(dec_lon):
return ''
if 89.99999 < dec_lat:
# force North Pole to just inside lat_sq 'R'
dec_lat = 89.99999
if 179.99999 < dec_lon:
# force 180 to just inside lon_sq 'R'
dec_lon = 179.99999
adj_lat = dec_lat + 90.0
adj_lon = dec_lon + 180.0
# divide into 18 zones (fields) each 20 degrees lon, 10 degrees lat
grid_lat_sq = chr(int(adj_lat / 10) + 65)
grid_lon_sq = chr(int(adj_lon / 20) + 65)
# divide into 10 zones (squares) each 2 degrees lon, 1 degrees lat
grid_lat_field = str(int(adj_lat % 10))
grid_lon_field = str(int((adj_lon / 2) % 10))
# remainder in minutes
adj_lat_remainder = (adj_lat - int(adj_lat)) * 60
adj_lon_remainder = ((adj_lon) - int(adj_lon / 2) * 2) * 60
# divide into 24 zones (subsquares) each 5 degrees lon, 2.5 degrees lat
grid_lat_subsq = chr(97 + int(adj_lat_remainder / 2.5))
grid_lon_subsq = chr(97 + int(adj_lon_remainder / 5))
# remainder in seconds
adj_lat_remainder = (adj_lat_remainder % 2.5) * 60
adj_lon_remainder = (adj_lon_remainder % 5.0) * 60
# divide into 10 zones (extended squares) each 30 secs lon, 15 secs lat
grid_lat_extsq = chr(48 + int(adj_lat_remainder / 15))
grid_lon_extsq = chr(48 + int(adj_lon_remainder / 30))
return (grid_lon_sq + grid_lat_sq +
grid_lon_field + grid_lat_field +
grid_lon_subsq + grid_lat_subsq +
grid_lon_extsq + grid_lat_extsq) | 31,040 |
def parse_worker_string(miner, worker):
"""
Parses a worker string and returns the coin address and worker ID
Returns:
String, String
"""
worker_part_count = worker.count(".") + 1
if worker_part_count > 1:
if worker_part_count == 2:
coin_address, worker = worker.split('.')
else:
worker_parts = worker.split('.')
coin_address = worker_parts[0]
worker = worker_parts[worker_part_count - 1]
else:
coin_address = worker
if coin_address is not None:
if miner.coin_address is None or len(miner.coin_address) == 0:
miner.coin_address = coin_address
elif miner.coin_address != coin_address:
miner.coin_address = coin_address
if worker is not None:
if miner.worker_name is None or len(miner.worker_name) == 0:
miner.worker_name = worker
elif miner.worker_name != worker:
miner.worker_name = worker
return coin_address, worker | 31,041 |
def test_table_answer_element_deser_no_rows():
"""Check deserialization creates empty table when no rows are present."""
answer = {
"answerElements": [
{"metadata": {"columnMetadata": [{"name": "col1", "schema": "Node"}]}}
]
}
table = TableAnswer(answer)
assert len(table.metadata.column_metadata) == 1
assert table.metadata.column_metadata[0].name == "col1"
assert len(table.rows) == 0
assert table.frame().empty
assert table.frame().columns == list(
map(attrgetter("name"), table.metadata.column_metadata)
) | 31,042 |
def get_inference_sequence(file_path):
"""
:param file_path: path of 2D bounding boxes
:return:
"""
with open(file_path + '.json', 'r') as f:
detected_bdbs = json.load(f)
f.close()
boxes = list()
for j, bdb2d in enumerate(detected_bdbs):
box = bdb2d['bbox']
box = {'x1': box[0], 'y1': box[1], 'x2': box[2], 'y2': box[3]}
box['score'] = bdb2d['score']
box['classname'] = bdb2d['class']
boxes.append({'2dbdb': box})
camera = dict()
camera['K'] = np.array([[529.5, 0., 365.], [0, 529.5, 265.], [0, 0, 1]])
boxes_out = list()
for box in boxes:
box_set = dict()
box_set['bdb_pos'] = [box['2dbdb']['x1'], box['2dbdb']['y1'], box['2dbdb']['x2'], box['2dbdb']['y2']]
if box['2dbdb']['classname'] not in OBJ_CATEGORY_TEST:
continue
box_set['size_cls'] = OBJ_CATEGORY_CLEAN.index(box['2dbdb']['classname'])
boxes_out.append(box_set)
data = dict()
data['rgb_path'] = file_path + '.jpg'
data['camera'] = camera
data['boxes'] = list_of_dict_to_dict_of_list(boxes_out)
data['sequence_id'] = int(file_path.split('/')[-1])
return data | 31,043 |
def _DC_GetBoundingBox(self):
"""
GetBoundingBox() -> (x1,y1, x2,y2)
Returns the min and max points used in drawing commands so far.
"""
return (self.MinX(), self.MinY(), self.MaxX(), self.MaxY()) | 31,044 |
def map_text(
text: Union[str, Text, Iterable[str], Iterable[Text]],
mapping: StringMapper
) -> Union[str, List[str]]:
"""
Replace text if it matches one of the dictionary keys.
:param text: Text instance(s) to map.
:param mapping: Mappings to replace text.
"""
if isinstance(text, Text):
text = text.get_text()
if not isinstance(text, str):
return [map_text(str(t), mapping) for t in text]
if mapping is None:
return text
if isinstance(mapping, dict) or isinstance(mapping, Series):
if text in mapping.keys():
return mapping[text]
else:
return text
elif callable(mapping):
return mapping(text)
else:
raise TypeError('mapping must be a dict or callable') | 31,045 |
def _load_components(config: ConfigType) -> ConfigType:
"""Load the different componenets in a config
Args:
config (ConfigType)
Returns:
ConfigType
"""
special_key = "_load"
if config is not None and special_key in config:
loaded_config = read_config_file(config.pop(special_key))
updated_config = OmegaConf.merge(loaded_config, config)
assert isinstance(updated_config, ConfigType)
return updated_config
return config | 31,046 |
def _value_to_variant(value: typing.Union[bytes, int, float, str]) -> GLib.Variant:
"""
Automatically convert a Python value to a GLib.Variant by guessing the
matching variant type.
"""
if isinstance(value, bool):
return GLib.Variant("b", value)
elif isinstance(value, bytes):
return GLib.Variant("y", value)
elif isinstance(value, int):
return GLib.Variant("x", value)
elif isinstance(value, float):
return GLib.Variant("d", value)
elif isinstance(value, str):
return GLib.Variant("s", value)
else:
raise ValueError("Unknown value type", value) | 31,047 |
def demo(num_sents=2000, max_rules=200, min_score=3,
error_output="errors.out", rule_output="rules.yaml",
randomize=False, train=.8, trace=3):
"""
Brill Tagger Demonstration
:param num_sents: how many sentences of training and testing data to use
:type num_sents: int
:param max_rules: maximum number of rule instances to create
:type max_rules: int
:param min_score: the minimum score for a rule in order for it to
be considered
:type min_score: int
:param error_output: the file where errors will be saved
:type error_output: str
:param rule_output: the file where rules will be saved
:type rule_output: str
:param randomize: whether the training data should be a random subset
of the corpus
:type randomize: bool
:param train: the fraction of the the corpus to be used for training
(1=all)
:type train: float
:param trace: the level of diagnostic tracing output to produce (0-4)
:type trace: int
"""
from nltk.corpus import treebank
from nltk import tag
from nltk.tag import brill
nn_cd_tagger = tag.RegexpTagger([(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),
(r'.*', 'NN')])
# train is the proportion of data used in training; the rest is reserved
# for testing.
print("Loading tagged data... ")
tagged_data = treebank.tagged_sents()
if randomize:
random.seed(len(sents))
random.shuffle(sents)
cutoff = int(num_sents*train)
training_data = tagged_data[:cutoff]
gold_data = tagged_data[cutoff:num_sents]
testing_data = [[t[0] for t in sent] for sent in gold_data]
print("Done loading.")
# Unigram tagger
print("Training unigram tagger:")
unigram_tagger = tag.UnigramTagger(training_data,
backoff=nn_cd_tagger)
if gold_data:
print(" [accuracy: %f]" % unigram_tagger.evaluate(gold_data))
# Bigram tagger
print("Training bigram tagger:")
bigram_tagger = tag.BigramTagger(training_data,
backoff=unigram_tagger)
if gold_data:
print(" [accuracy: %f]" % bigram_tagger.evaluate(gold_data))
# Brill tagger
templates = [
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,1)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (2,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,3)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,1)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (2,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,3)),
brill.ProximateTokensTemplate(brill.ProximateTagsRule, (-1, -1), (1,1)),
brill.ProximateTokensTemplate(brill.ProximateWordsRule, (-1, -1), (1,1)),
]
trainer = brill.FastBrillTaggerTrainer(bigram_tagger, templates, trace)
#trainer = brill.BrillTaggerTrainer(u, templates, trace)
brill_tagger = trainer.train(training_data, max_rules, min_score)
if gold_data:
print(("\nBrill accuracy: %f" % brill_tagger.evaluate(gold_data)))
if trace <= 1:
print("\nRules: ")
for rule in brill_tagger.rules():
print((str(rule)))
print_rules = file(rule_output, 'w')
yaml.dump(brill_tagger, print_rules)
print_rules.close()
testing_data = brill_tagger.batch_tag(testing_data)
error_file = file(error_output, 'w')
error_file.write('Errors for Brill Tagger %r\n\n' % rule_output)
for e in error_list(gold_data, testing_data):
error_file.write(e+'\n')
error_file.close()
print(("Done; rules and errors saved to %s and %s." %
(rule_output, error_output))) | 31,048 |
def ha_rise_set(el_limit, lat, dec):
"""
Hour angle from transit for rising and setting.
Returns pi for a source that never sets and 0 for a source always below
the horizon.
@param el_limit : the elevation limit in radians
@type el_limit : float
@param lat : the observatory latitude in radians
@type lat : float
@param dec : the source declination in radians
@type dec : float
@return: hour angle from transit in radians
"""
cos_ha = (math.sin(el_limit) - math.sin(lat)*math.sin(dec)) \
/(math.cos(lat)*math.cos(dec))
if cos_ha <= -1:
# never sets
return pi
elif cos_ha >= 1:
# never visible
return 0
else:
return math.acos(cos_ha) | 31,049 |
def create(transactions, user=None):
"""# Create Transactions
Send a list of Transaction objects for creation in the Stark Bank API
## Parameters (required):
- transactions [list of Transaction objects]: list of Transaction objects to be created in the API
## Parameters (optional):
- user [Project object]: Project object. Not necessary if starkbank.user was set before function call
## Return:
- list of Transaction objects with updated attributes
"""
return rest.post_multi(resource=_resource, entities=transactions, user=user) | 31,050 |
def get_quote(symbol):
"""
Returns today's stock price
"""
contents = get_content(symbol)
return contents('.time_rtq_ticker span').text() | 31,051 |
def load_pil(data, is_file = False):
""" Parses a string or file written in PIL notation! """
# We only assign reactions in a postprocessing step,
# because there are no macrostates in nuskell.
set_io_objects(D = NuskellDomain, C = NuskellComplex)
out = dsd_read_pil(data, is_file)
clear_io_objects()
cxs = {k: v for k, v in out['complexes'].items()}
rms = {k: v for k, v in out['macrostates'].items()}
det = set(list(out['det_reactions']))
con = set(list(out['con_reactions']))
[o.clear() for o in out.values()]
out.clear()
return cxs, rms, det, con | 31,052 |
def get(obj, key, default=None, pattern_default=(), apply_transforms=True):
"""
Get a value specified by the dotted key. If dotted is a pattern,
return a tuple of all matches
>>> d = {'hello': {'there': [1, '2', 3]}}
>>> get(d, 'hello.there[1]|int')
2
>>> get(d, 'hello.there[1:]')
['2', 3]
>>> get([{'a': 1}, {'a':2}], '[*].a')
(1, 2)
"""
ops = parse(key)
vals = el.gets(ops, obj)
if apply_transforms:
vals = ( ops.apply(v) for v in vals )
found = tuple(vals)
if not is_pattern(ops):
return found[0] if found else default
return found if found else pattern_default | 31,053 |
def movep(src, dst, overlay = True):
""" 移動文件
overlay: True / False, True為自動覆蓋 """
if not os.path.isdir(dst): raise TypeError("dst must be a directory.")
# 移動文件
if os.path.isfile(src):
dst_dir = os.path.join(dst, os.path.basename(src))
if os.path.exists(dst_dir):
if not overlay: return
os.remove(dst_dir)
shutil.move(src, dst_dir)
return
# 移動文件夾
for folder in os.walk(src):
# 把目標路徑, 系統分隔符 和 src 文件夾的子路徑合成一層路徑
dst_dir = dst + os.sep + os.path.basename(src) + folder[0].split(src, 1)[-1]
# 當路徑已存在於目標文件夾, 刪除目標文件夾的文件, 再把新的文件移動
if os.path.exists(dst_dir):
for exs_file in folder[-1]:
abs_path = os.path.join(dst_dir, exs_file)
if os.path.exists(abs_path):
if not overlay: continue
os.remove(abs_path)
shutil.move(os.path.join(folder[0], exs_file), os.path.join(dst_dir, exs_file))
elif not os.path.exists(dst_dir): shutil.move(folder[0], dst_dir)
# 刪除移動後的空文件夾
if os.path.exists(src) and overlay: shutil.rmtree(src) | 31,054 |
def write_proba_predictions(ids, y_proba_pred,
predpath=os.path.join(data_path, 'id_ssl.csv')):
"""Write the probability of belonging to each class in a csv file."""
assert len(ids) == y_proba_pred.shape[0]
with open(predpath, 'w') as f:
f.write('Id,Class1,Class2,Class3,Class4\n')
for i in len(ids):
f.write(
str(ids[i]) + ',' +
str(y_proba_pred[i,0]) + ',' +
str(y_proba_pred[i,1]) + ',' +
str(y_proba_pred[i,2]) + ',' +
str(y_proba_pred[i,3])
)
logger.info('Wrote %i predictions into %s' % (len(predictions), predpath)) | 31,055 |
def str_view(request):
"""
A simple test view that returns a string.
"""
return '<Response><Message>Hi!</Message></Response>' | 31,056 |
def _run_script(script, start_with_ctty, args, kwargs):
"""
Meant to be called inside a python subprocess, do NOT call directly.
"""
enter_pty(start_with_ctty)
result = script(*args, **kwargs)
# Python-spawned subprocesses do not call exit funcs - https://stackoverflow.com/q/34506638/2907819
atexit._run_exitfuncs()
return result | 31,057 |
def get_feed_list(feeds):
""" Return List of Proto Feed Object
"""
feeds_pb_list = [feeds_pb2.Feed(**_get_valid_fields_feed(feed)) for feed in feeds]
return feeds_pb2.FeedList(data=feeds_pb_list) | 31,058 |
def group_interpellet_interval_plot(FEDs, groups, kde, logx, **kwargs):
"""
FED3 Viz: Plot the interpellet intervals as a histogram, first aggregating
the values for devices in a Groups.
Parameters
----------
FEDs : list of FED3_File objects
FED3 files (loaded by load.FED3_File)
groups : list of strings
Groups to plot (based on the group attribute of each FED3_File)
kde : bool
Whether or not to include kernel density estimation, which plots
probability density (rather than count) and includes a fit line (see
seaborn.distplot)
logx : bool
When True, plots on a logarithmic x-axis
**kwargs :
ax : matplotlib.axes.Axes
Axes to plot on, a new Figure and Axes are
created if not passed
date_filter : array
A two-element array of datetimes (start, end) used to filter
the data
**kwargs also allows FED3 Viz to pass all settings to all functions.
Returns
-------
fig : matplotlib.figure.Figure
"""
if not isinstance(FEDs, list):
FEDs = [FEDs]
for FED in FEDs:
assert isinstance(FED, FED3_File),'Non FED3_File passed to interpellet_interval_plot()'
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(4,5), dpi=125)
else:
ax = kwargs['ax']
bins=[]
if logx:
lowest = -2
highest = 5
ax.set_xticks(range(lowest,highest))
ax.set_xticklabels([10**num for num in range(-2,5)])
c=0
while c <= highest:
bins.append(round(lowest+c,2))
c+=0.1
else:
ax.set_xticks([0,300,600,900])
div = 900/50
bins = [i*div for i in range(50)]
ax.set_xlim(-100,1000)
for group in groups:
all_vals = []
for FED in FEDs:
if group in FED.group:
df = FED.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = list(df['Interpellet_Intervals'][df['Interpellet_Intervals'] > 0])
if logx:
y = [np.log10(val) for val in y if not pd.isna(val)]
all_vals += y
sns.distplot(all_vals,bins=bins,label=group,ax=ax,norm_hist=False,
kde=kde)
ax.legend(fontsize=8)
ylabel = 'Density Estimation' if kde else 'Count'
ax.set_ylabel(ylabel)
ax.set_xlabel('minutes between pellets')
ax.set_title('Interpellet Interval Plot')
plt.tight_layout()
return fig if 'ax' not in kwargs else None | 31,059 |
def filter(args):
"""
%prog filter frgfile idsfile
Removes the reads from frgfile that are indicated as duplicates in the
clstrfile (generated by CD-HIT-454). `idsfile` includes a set of names to
include in the filtered frgfile. See apps.cdhit.ids().
"""
p = OptionParser(filter.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
frgfile, idsfile = args
assert frgfile.endswith(".frg")
fp = open(idsfile)
allowed = set(x.strip() for x in fp)
logging.debug("A total of {0} allowed ids loaded.".format(len(allowed)))
newfrgfile = frgfile.replace(".frg", ".filtered.frg")
fp = open(frgfile)
fw = open(newfrgfile, "w")
nfrags, discarded_frags = 0, 0
nmates, discarded_mates = 0, 0
for rec in iter_records(fp):
if rec.type == "FRG":
readname = rec.get_field("acc")
readname = readname.rstrip("ab")
nfrags += 1
if readname not in allowed:
discarded_frags += 1
continue
if rec.type == "LKG":
readname = rec.get_field("frg")
readname = readname.rstrip("ab")
nmates += 1
if readname not in allowed:
discarded_mates += 1
continue
print(rec, file=fw)
# Print out a summary
survived_frags = nfrags - discarded_frags
survived_mates = nmates - discarded_mates
print("Survived fragments: {0}".\
format(percentage(survived_frags, nfrags)), file=sys.stderr)
print("Survived mates: {0}".\
format(percentage(survived_mates, nmates)), file=sys.stderr) | 31,060 |
def _beta(x, p):
"""Helper function for `pdf_a`, beta = pi * d(1 - omega(x), omega(p))."""
omega = _amplitude_to_angle
return np.pi * _circ_dist(1 - omega(x), omega(p)) | 31,061 |
def set_reporting_max_width(w):
"""
Set the max width for reported parameters. This is used to that failures don't overflow
terminals in the event arguments are dumped.
:param w: The new max width to enforce for the module
:type w: int
:return: True
"""
_REPR_MAX_WIDTH[0] = int(w)
return True | 31,062 |
def get_version():
"""Get LanguageTool version."""
version = _get_attrib().get('version')
if not version:
match = re.search(r"LanguageTool-?.*?(\S+)$", get_directory())
if match:
version = match.group(1)
return version | 31,063 |
def get_mnsp_offer_index(data) -> list:
"""Get MNSP offer index"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))
# Container for offer index
offer_index = []
for i in interconnectors:
# Non-MNSP interconnectors do not have an MNSPOfferCollection attribute
if i.get('MNSPOfferCollection') is None:
continue
# Extract InterconnectorID and RegionID for each offer entry
for j in i.get('MNSPOfferCollection').get('MNSPOffer'):
offer_index.append((i['@InterconnectorID'], j['@RegionID']))
return offer_index | 31,064 |
def main() -> None:
"""
Executes the main bulk of this function.
:raises RuntimeError: If something goes wrong.
"""
if len(_ROUTE) == 0:
raise RuntimeError('The route must contain at least one location')
if _NOW.time() > _DAY_END:
raise RuntimeError('There are no more working hours today')
session = requests.session()
samples = itertools.chain.from_iterable(
filter_samples(
darksky.location_hourly(session, _DARK_SKY_SECRET_KEY, lat, long_),
_NOW, _DAY_BEGIN, _DAY_END)
for lat, long_ in _ROUTE)
# we need to iterate over the whole sequence at least twice due to the min
# and max temp calculation - laziness doesn't buy anything
samples = list(samples)
over_threshold = [sample for sample in samples
if sample.umbrella_score >= _SCORE_THRESHOLD]
umbrella = len(over_threshold) > 0
logger.info('Umbrella required? %s', umbrella)
if umbrella:
logger.info('%d samples were over the threshold of %f',
len(over_threshold), _SCORE_THRESHOLD)
# in case we say an umbrella is required, but it turns out not to be
for sample in over_threshold:
logger.debug(sample)
temperatures = [sample.apparent_temp for sample in samples]
low = min(temperatures)
high = max(temperatures)
logger.info('Low: %.2f, High: %.2f', low, high)
summary = 'You need to take your umbrella' \
if umbrella else 'You don\'t need your umbrella!'
message = {
'app': _PUSHOVER_APP_TOKEN,
'body': f'{summary} (Low: {round(low)}°C, High: {round(high)}°C)',
'url': f'https://darksky.net/forecast/{_ROUTE[0][0]},{_ROUTE[0][1]}'
f'/uk224/en'
}
response = _SNS.publish(
TopicArn=_NOTIFICATION_TOPIC_ARN,
Message=json.dumps(message, ensure_ascii=False))
logger.info(f"Published message {response['MessageId']}") | 31,065 |
def _find_clusters(
data,
cluster_range: Tuple[int, int] = None,
metric: str = "silhouette_score",
target=None,
**kwargs,
):
"""Finds the optimal number of clusters for K-Means clustering using the selected metric.
Args:
data: The data.
cluster_range: A tuple of the minimum and maximum cluster
search range. Defaults to (2, 20).
metric: The metric to optimize (from sklearn.metrics).
target: (For supervised clustering) The labels, as a 1-D array.
**kwargs: Keyword arguments to be passed into the K-Means estimator.
Raises:
ValueError: Max of cluster range greater than the min.
Returns:
clusters, KMeansFit
"""
cluster_range = cluster_range or (2, 20)
if not cluster_range[0] < cluster_range[1]:
raise ValueError(
"cluster_range expected to be (min_cluster, max_cluster), but the min was >= the max"
)
unsupervised_metrics = [
"silhouette_score",
"davies_bouldin_score",
"calinski_harabasz_score",
]
scores = []
widgets = []
for n in range(*cluster_range):
clusterwidget = _fit_kmeans(data, n, **kwargs)
analysis_func = getattr(sklearn.metrics, metric)
if metric in unsupervised_metrics:
score = analysis_func(data, clusterwidget.clusters)
else:
if target is None:
raise ValueError("'target' must be specified for supervised clustering")
score = analysis_func(target, clusterwidget.clusters)
scores.append(score)
widgets.append(clusterwidget)
best_idx = np.argmax(scores)
clusterwidget = widgets[best_idx]
clusterwidget.search = True
clusterwidget.cluster_range = cluster_range
clusterwidget.metric = metric
clusterwidget.scores = scores
if target is not None:
clusterwidget.target = target
return clusterwidget | 31,066 |
def _gate_objectives_li_pe(basis_states, gate, H, c_ops):
"""Objectives for two-qubit local-invariants or perfect-entangler
optimizaton"""
if len(basis_states) != 4:
raise ValueError(
"Optimization towards a two-qubit gate requires 4 basis_states"
)
# Bell states as in "Theorem 1" in
# Y. Makhlin, Quantum Inf. Process. 1, 243 (2002)
psi1 = (basis_states[0] + basis_states[3]) / np.sqrt(2)
psi2 = (1j * basis_states[1] + 1j * basis_states[2]) / np.sqrt(2)
psi3 = (basis_states[1] - basis_states[2]) / np.sqrt(2)
psi4 = (1j * basis_states[0] - 1j * basis_states[3]) / np.sqrt(2)
return [
Objective(initial_state=psi, target=gate, H=H, c_ops=c_ops)
for psi in [psi1, psi2, psi3, psi4]
] | 31,067 |
def get_pip_package_list(path: str) -> Generator[pkg_resources.Distribution, None, None]:
"""Get the Pip package list of a Python virtual environment.
Must be a path like: /project/venv/lib/python3.9/site-packages
"""
packages = pkg_resources.find_distributions(path)
return packages | 31,068 |
def write_ruassump(rufail,rulist):
"""Writes rectifier environment assumption
Parameters
----------
rufail : int
how many rectifiers may fail at once time
genlist : list of all generators
"""
if len(rulist) > 0:
f.write('assumptions += '"'"'&\\n\\t[]((ru'+str(rulist[0]))
for i in range(1,len(rulist)):
f.write(' + ru'+str(rulist[i]))
f.write(') >= '+str(len(rulist)-rufail)+')'"'")
f.write('\n') | 31,069 |
def hasGLUExtension( specifier ):
"""Given a string specifier, check for extension being available"""
from OpenGL.GLU import gluGetString, GLU_EXTENSIONS
if not AVAILABLE_GLU_EXTENSIONS:
AVAILABLE_GLU_EXTENSIONS[:] = gluGetString( GLU_EXTENSIONS )
return specifier.replace(as_8_bit('.'),as_8_bit('_')) in AVAILABLE_GLU_EXTENSIONS | 31,070 |
def do_slots_information(parser, token):
"""Calculates some context variables based on displayed slots.
"""
bits = token.contents.split()
len_bits = len(bits)
if len_bits != 1:
raise TemplateSyntaxError(_('%s tag needs no argument') % bits[0])
return SlotsInformationNode() | 31,071 |
def get_region_of_interest(img, sx=0.23, sy=0.15, delta=200, return_vertices=False):
"""
:param img: image to extract ROI from
:param sx: X-axis factor for ROI bottom base
:param sy: Y-axis factor for ROI top base
:param delta: ROI top base length
:param return_vertices: whether to return the ROI vertices
:return: ROI (optional: vertices)
"""
assert len(img.shape) == 2
h, w = img.shape
mask = np.zeros(img.shape)
fill_color = 255
vertices = np.array(
[
[0.5 * (w - delta), sy * h],
[0.5 * (w + delta), sy * h],
[(1 - sx) * w, h - 1],
[sx * w, h - 1],
]
)
cv2.fillPoly(mask, np.array([vertices], dtype=np.int32), fill_color)
roi = mask.astype(np.uint8) & img.astype(np.uint8)
if return_vertices:
return roi, vertices
else:
return roi | 31,072 |
def load_search_freq(fp=SEARCH_FREQ_JSON):
"""
Load the search_freq from JSON file
"""
try:
with open(fp, encoding="utf-8") as f:
return Counter(json.load(f))
except FileNotFoundError:
return Counter() | 31,073 |
def _sum_of_squares(a, axis=0):
"""
Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis) | 31,074 |
def assert_equal(actual: numpy.ndarray, desired: list):
"""
usage.orange3: 1
usage.scipy: 7
usage.statsmodels: 1
usage.xarray: 1
"""
... | 31,075 |
def get_preprocess_fn(pp_pipeline, remove_tpu_dtypes=True):
"""Transform an input string into the preprocessing function.
The minilanguage is as follows:
fn1|fn2(arg, arg2,...)|...
And describes the successive application of the various `fn`s to the input,
where each function can optionally have one or more arguments, which are
either positional or key/value, as dictated by the `fn`.
The output preprocessing function expects a dictinary as input. This
dictionary should have a key "image" that corresponds to a 3D tensor
(height x width x channel).
Args:
pp_pipeline: A string describing the pre-processing pipeline.
remove_tpu_dtypes: Whether to remove TPU incompatible types of data.
Returns:
preprocessing function.
Raises:
ValueError: if preprocessing function name is unknown
"""
def _preprocess_fn(data):
"""The preprocessing function that is returned."""
# Validate input
if not isinstance(data, dict):
raise ValueError("Argument `data` must be a dictionary, "
"not %s" % str(type(data)))
# Apply all the individual steps in sequence.
logging.info("Data before pre-processing:\n%s", data)
for fn_name in pp_pipeline.split("|"):
data = eval(fn_name)(data) # pylint: disable=eval-used
if remove_tpu_dtypes:
# Remove data that are TPU-incompatible (e.g. filename of type tf.string).
for key in list(data.keys()):
if data[key].dtype not in TPU_SUPPORTED_DTYPES:
tf.logging.warning(
"Removing key '{}' from data dict because its dtype {} is not in "
" the supported dtypes: {}".format(key, data[key].dtype,
TPU_SUPPORTED_DTYPES))
del data[key]
logging.info("Data after pre-processing:\n%s", data)
return data
return _preprocess_fn | 31,076 |
def check_udev_properties(yaml_data, uinput):
"""
Compare the properties our new uinput device has with the ones from the
recording and ring the alarm bell if one of them is off.
"""
yaml_udev_section = fetch(yaml_data, "udev")
yaml_udev_props = fetch(yaml_udev_section, "properties")
yaml_props = {
k: v for (k, v) in [prop.split("=", maxsplit=1) for prop in yaml_udev_props]
}
try:
# We don't assign this one to virtual devices
del yaml_props["LIBINPUT_DEVICE_GROUP"]
except KeyError:
pass
# give udev some time to catch up
time.sleep(0.2)
context = pyudev.Context()
udev_device = pyudev.Devices.from_device_file(context, uinput.devnode)
for name, value in udev_device.properties.items():
if name in yaml_props:
if yaml_props[name] != value:
error(
f"Warning: udev property mismatch: recording has {name}={yaml_props[name]}, device has {name}={value}"
)
del yaml_props[name]
else:
# The list of properties we add to the recording, see libinput-record.c
prefixes = (
"ID_INPUT",
"LIBINPUT",
"EVDEV_ABS",
"MOUSE_DPI",
"POINTINGSTICK_",
)
for prefix in prefixes:
if name.startswith(prefix):
error(f"Warning: unexpected property: {name}={value}")
# the ones we found above were removed from the dict
for name, value in yaml_props.items():
error(f"Warning: device is missing recorded udev property: {name}={value}") | 31,077 |
def check_with(ctx, check, what, *args, **kwargs):
"""
Perform `check`, also looking at directories specified by the --with-X
commandline option and X_HOME environment variable (X = what.upper())
The extra_args
"""
import os
from os.path import abspath
# adds 'extra_paths' and other defaults...
kwargs = ctx._findbase_setup(kwargs)
with_dir = getattr(ctx.options, "with_" + what, None)
env_dir = os.environ.get(what.upper() + "_HOME", None)
paths = [with_dir, env_dir] + kwargs.pop("extra_paths", [])
WHAT = what.upper()
kwargs["uselib_store"] = kwargs.get("uselib_store", WHAT)
kwargs["use"] = waflib.Utils.to_list(kwargs.get("use", [])) + \
waflib.Utils.to_list(kwargs["uselib_store"])
for path in [abspath(ctx.hwaf_subst_vars(p)) for p in paths if p]:
ctx.in_msg = 0
ctx.to_log("Checking for %s in %s" % (what, path))
if ctx.find_at(check, what, path, **kwargs):
#print ">> found %s at %s" % (what, path)
ctx.in_msg = 0
ctx.msg("Found %s at" % what, path, color="WHITE")
ctx.hwaf_declare_runtime_env(WHAT + "_HOME")
return
pass
ctx.in_msg = 0
check(**kwargs)
ctx.in_msg = 0
ctx.msg("Found %s at" % what, "(local environment)", color="WHITE")
# FIXME: handle windows ?
ctx.env[WHAT + "_HOME"] = "/usr"
ctx.hwaf_declare_runtime_env(WHAT + "_HOME")
return | 31,078 |
def mock_successful_encode_job(mocker):
"""Mock everything required for a successful transcode"""
mocker.patch("cloudsync.api.VideoTranscoder.encode")
mocker.patch(
"cloudsync.api.get_et_job",
return_value={"Id": "1498220566931-qtmtcu", "Status": "Complete"},
) | 31,079 |
def nodeset(v):
"""Convert a value to a nodeset."""
if not nodesetp(v):
raise XPathTypeError, "value is not a node-set"
return v | 31,080 |
def get_char_embs(char_emb_path, char_emb_size, alphabet_size=1422):
"""Get pretrained character embeddings and a dictionary mapping characters to their IDs.
Skips IDs 0 and 1, since these are reserved for PAD and UNK, respectively.
Input:
char_emb_path: path to glove.840B.{char_embedding_size}d-char.txt. If None, use random initialization.
char_embedding_size: Size of character embeddings
Returns:
char_emb_matrix: Numpy array shape (1426, char_embedding_size) containing char embeddings.
char2id: dict. Maps chars (string) to their IDs (int).
"""
print("Loading char embeddings from file: {}...".format(char_emb_path))
char_emb_matrix = []
char2id = {}
idx = 0
with open(char_emb_path, 'r') as fh:
for line in tqdm(fh, total=alphabet_size):
line = line.lstrip().rstrip().split(" ")
char = line[0]
vector = list(map(float, line[1:]))
if char_emb_size != len(vector):
raise Exception("Expected vector of size {}, but got vector of size {}.".format(char_emb_size, len(vector)))
char_emb_matrix.append(vector)
char2id[char] = idx
idx += 1
char_emb_matrix = np.array(char_emb_matrix, dtype=np.float32)
print("Loaded char embedding matrix with shape {}.".format(char_emb_matrix.shape))
return char_emb_matrix, char2id | 31,081 |
def _get_reverse_complement(seq):
"""
Get the reverse compliment of a DNA sequence.
Parameters:
-----------
seq
Returns:
--------
reverse_complement_seq
Notes:
------
(1) No dependencies required. Pure python.
"""
complement_seq = ""
for i in seq:
if i == "C":
complement_seq += "G"
elif i == "G":
complement_seq += "C"
elif i == "A":
complement_seq += "T"
elif i == "T":
complement_seq += "A"
elif i == "N":
complement_seq += "N"
reverse_complement_seq = complement_seq[::-1]
return reverse_complement_seq | 31,082 |
def wave_reduce_min_all(val):
"""
All threads get the result
"""
res = wave_reduce_min(val)
return broadcast(res, 0) | 31,083 |
def predict_class(all_headlines):
"""
Predict whether each headline is negative or positive.
:param all_headlines: all headlines
:return: headlines with predictions
"""
clf, v = load_classifier("SVM")
headlines = []
for h in all_headlines:
headlines.append(h.to_array())
df = pd.DataFrame(headlines)
df.columns = \
[
'headline',
'origin',
'semantic_value',
'pos',
'neg',
'neu',
'published_at'
]
df['headline'] = df['headline'].map(lambda x: strip_punctuation(x))
df['headline'] = df['headline'].map(lambda x: x.lower())
df['headline'] = df['headline'].map(lambda x: filter_stop_words(x))
df['published_at'] = df['published_at'].map(lambda x: to_epoch(x))
df = normalise_column(df, 'published_at')
tr_counts = v.transform(df['headline'])
tr = pd.DataFrame(tr_counts.todense())
df.join(tr)
output = clf.predict(df.drop(["headline", "origin"], axis=1)).astype(int)
df['predicted_class'] = output
i = 0
for h in all_headlines:
h.predicted_class = df['predicted_class'].loc[i]
i += 1
return all_headlines | 31,084 |
def find_tickets_for_seat_manager(
user_id: UserID, party_id: PartyID
) -> list[DbTicket]:
"""Return the tickets for that party whose respective seats the user
is entitled to manage.
"""
return db.session \
.query(DbTicket) \
.filter(DbTicket.party_id == party_id) \
.filter(DbTicket.revoked == False) \
.filter(
(
(DbTicket.seat_managed_by_id == None) &
(DbTicket.owned_by_id == user_id)
) |
(DbTicket.seat_managed_by_id == user_id)
) \
.options(
db.joinedload(DbTicket.occupied_seat),
) \
.all() | 31,085 |
def insert_features_from_iters(dataset_path, insert_features, field_names, **kwargs):
"""Insert features into dataset from iterables.
Args:
dataset_path (str): Path of the dataset.
insert_features (iter of iter): Collection of iterables representing
features.
field_names (iter): Collection of field names to insert. These must
match the order of their attributes in the insert_features items.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
use_edit_session (bool): Flag to perform updates in an edit session.
Default is False.
log_level (str): Level to log the function at. Defaults to 'info'.
Returns:
str: Path of the dataset updated.
"""
kwargs.setdefault('use_edit_session', False)
log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
log("Start: Insert features into %s from iterables.", dataset_path)
meta = {'dataset': dataset_metadata(dataset_path)}
keys = {'row': tuple(contain(field_names))}
if inspect.isgeneratorfunction(insert_features):
insert_features = insert_features()
session = Editor(meta['dataset']['workspace_path'], kwargs['use_edit_session'])
cursor = arcpy.da.InsertCursor(dataset_path, field_names=keys['row'])
feature_count = Counter()
with session, cursor:
for row in insert_features:
cursor.insertRow(tuple(row))
feature_count['inserted'] += 1
log("%s features inserted.", feature_count['inserted'])
log("End: Insert.")
return feature_count | 31,086 |
def test_solution() -> None:
"""Solution to Challenge 3."""
ct = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736"
pt = b"Cooking MC's like a pound of bacon"
ct_bytes = bytes.fromhex(ct)
key = attacks.find_single_xor_key(ct_bytes)
assert ciphers.SingleXor(key).decrypt(ct_bytes) == pt | 31,087 |
def test_failed_resolve(webapp):
"""Test basic failure to resolve an IP for a domain - because it's
unregistered.
"""
domain = 'imprettysurethatthisdomaindoesnotexist.com'
response = webapp.get('/api/ip/{}'.format(Domain(domain).to_hex()))
assert response.status_code == 200
assert response.json['ip'] is False
assert response.json['error'] is False | 31,088 |
def check_table(conn, table, interconnect):
"""
searches if Interconnect exists in table in database
:param conn: connect instance for database
:param table: name of table you want to check
:param interconnect: name of the Interconnect you are looking for
:return: results of SQL query searching for table
"""
cur = conn.cursor()
sql_search = "SELECT * \
FROM %s \
WHERE Interconnect='%s'" % (table, interconnect)
found = cur.execute(sql_search).fetchone()
return found | 31,089 |
def ensure_folder(path: str):
"""Read the file path and recursively create the folder structure if needed."""
folder_path: str = dirname(realpath(path))
make_dirs(folder_path) | 31,090 |
def get_host_config_tar_response(host):
"""
Build the tar.gz attachment response for the GetHostConfig view.
Note: This is re-used to download host config from the admin interface.
:returns: HttpResponseAttachment
"""
filename = '{host}_v{version}.tar.gz'.format(
host=host.path_str(),
version=host.config_version)
# Use the response as file-like object to write the tar
resp = HttpResponseAttachment(filename=filename, content_type='application/gzip')
with closing(tarfile.open(mode='w:gz', fileobj=resp)) as tar:
config_tar.generate_host_config_tar(host, TarWriter(tar))
return resp | 31,091 |
def deploy(*slugs):
"""
Deploy the latest app(s) to S3 and, if configured, to our servers.
"""
if slugs[0] == '':
print 'You must specify at least one slug, like this: "deploy:slug" or "deploy:slug,slug"'
return
for slug in slugs:
deploy_single(slug) | 31,092 |
async def update_product_remove_tag_by_id(
*,
product_id: int,
session: Session = Depends(get_session),
db_product: Product = Depends(get_product_or_404),
db_tag: Tag = Depends(get_tag_or_404),
):
"""
Remove tag from product
"""
existing_product = db_product["db_product"]
existing_tag = db_tag["db_tag"]
try:
existing_product.tags.remove(existing_tag)
session.add(existing_product)
session.commit()
session.refresh(existing_product)
profiling_api(
f"Product:update:remove_tag:by_id:{product_id}",
db_product["start_time"],
db_product["username"],
)
except Exception as message:
logger.error(message)
logger.exception(message)
raise HTTPException(
status_code=404,
detail="Impossible to remove the tag: product or tag not existing",
)
return existing_product | 31,093 |
def aggregate_results_data(results, include_raw=False):
"""This function aggregates the results of an archive/unarchive operation into an easy-to-parse dictionary.
.. versionchanged:: 4.1.1
This function can now properly handle the ``ARCHIVED`` status when returned.
.. versionadded:: 4.1.0
:param results: The results from an archive or unarchive operation
:type results: list, dict
:param include_raw: Includes the raw API response in the aggregated data dictionary under the ``raw`` key
(``False`` by default)
:type include_raw: bool
:returns: A dictionary with fields for ``status``, ``archived``, ``unarchived``, ``failed`` and ``unknown`` or the
raw response when the API call completely fails, with the optional raw data when requested
"""
# Initially define the aggregate data
aggregate_data = {'status': 'success'}
archived_values = ['ARCHIVING', 'ARCHIVED']
archived, unarchived, failed, unknown = [], [], [], 0
# Return the raw error response if the entire API call failed
if isinstance(results, dict) and results.get('status') == 'error':
# TODO: Record a log entry for the failed API call
aggregate_data.update(results)
elif isinstance(results, list):
for message in results:
if isinstance(message, dict) and message.get('archivalStatus') in archived_values:
archived.append(f"{message.get('msgUid')}")
elif isinstance(message, dict) and message.get('unarchivalStatus') == 'UNARCHIVING':
unarchived.append(f"{message.get('msgUid')}")
elif isinstance(message, dict) and message.get('msgUid'):
failed.append(f"{message.get('msgUid')}")
else:
# TODO: Record a log entry for the unknown result
unknown += 1
# Update the aggregate data with the parsed results and return the dictionary
aggregate_data['archived'] = archived
aggregate_data['unarchived'] = unarchived
aggregate_data['failed'] = failed
aggregate_data['unknown'] = unknown
if include_raw:
aggregate_data['raw'] = results
return aggregate_data | 31,094 |
def post_save_create_interaction_activity(sender, instance, created, **kwargs):
""" """
if created:
RebuzzInteractions.objects.create(rebuzz=instance)
RebuzzNotification.objects.create(rebuzz=instance, user=instance.author)
_create_activity(
instance.author,
UserActivity.Actions.POST,
ContentType.objects.get_for_model(instance),
instance.id,
)
instance.sentiment_value = calculate_sentiment_index(instance.content)
instance.textblob_value = calculate_textblob_value(instance.content)
instance.save() | 31,095 |
def checkpoint_valid(epoch: int):
"""Create a checkpoint for a valid run."""
model_out_path = "model_valid_{}.pth".format(epoch)
torch.save(model, model_out_path)
logger.debug("Checkpoint saved to {}".format(model_out_path)) | 31,096 |
def hard_sigmoid(x: tf.Tensor) -> tf.Tensor:
"""Hard sigmoid activation function.
```plot-activation
activations.hard_sigmoid
```
# Arguments
x: Input tensor.
# Returns
Hard sigmoid activation.
"""
return tf.clip_by_value(x+0.5, 0.0, 1.0) | 31,097 |
def get_image_as_np_array(filename: str):
"""Returns an image as an numpy array
"""
img = Image.open(filename)
return np.asarray(img) | 31,098 |
def update_move_local_plot():
"""
This function just keeps our local plot moves off the same drive as our remote plot moves so
we don't saturate a single drive with multiple inbound plots.
"""
log.debug("update_move_local_plot() Started")
try:
if chianas.current_internal_drive == get_internal_plot_drive_to_use()[0]:
log.debug(f'Currently Configured Internal Plot Drive: {chianas.current_internal_drive}')
log.debug(f'System Selected Internal Plot Drive: {get_internal_plot_drive_to_use()[0]}')
log.debug('Configured and Selected Drives Match!')
log.debug(f'No changes necessary to Internal Plotting Drive')
log.debug(
f'Plots left available on configured Internal plotting drive: {get_drive_info("space_free_plots_by_mountpoint", chianas.current_internal_drive)}')
else:
notify('Internal Plot Drive Updated', f'Internal Plot Drive Updated: Was: {chianas.current_internal_drive}, Now: {get_internal_plot_drive_to_use()[0]}')
chianas.update_current_internal_drive(get_internal_plot_drive_to_use()[0])
log.info(f'Updated Internal Plot Drive, Was: {chianas.current_internal_drive}, Now: {get_internal_plot_drive_to_use()[0]}')
except TypeError:
log.debug ('No Additional Drives found to be used as internal plot drives!')
log.debug('Please add additional drive manually or via auto_drive.py and try again!') | 31,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.