content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def decrypt(private, ciphertext, output):
"""Decrypt ciphertext with private key.
Requires PRIVATE key file and the CIPHERTEXT encrypted with
the corresponding public key.
"""
privatekeydata = json.load(private)
assert 'pub' in privatekeydata
pub = load_public_key(privatekeydata['pub'])
log("Loading private key")
private_key_error = "Invalid private key"
assert 'key_ops' in privatekeydata, private_key_error
assert "decrypt" in privatekeydata['key_ops'], private_key_error
assert 'p' in privatekeydata, private_key_error
assert 'q' in privatekeydata, private_key_error
assert privatekeydata['kty'] == 'DAJ', private_key_error
_p = phe.util.base64_to_int(privatekeydata['p'])
_q = phe.util.base64_to_int(privatekeydata['q'])
private_key = phe.PaillierPrivateKey(pub, _p, _q)
log("Decrypting ciphertext")
enc = load_encrypted_number(ciphertext, pub)
out = private_key.decrypt(enc)
print(out, file=output)
| 24,000
|
def nearest(a, num):
"""
Finds the array's nearest value to a given num.
Args:
a (ndarray): An array.
num (float): The value to find the nearest to.
Returns:
float. The normalized array.
"""
a = np.array(a, dtype=float)
return a.flat[np.abs(a - num).argmin()]
| 24,001
|
def hex_string(data):
"""Return a hex dump of a string as a string.
The output produced is in the standard 16 characters per line hex +
ascii format:
00000000: 40 00 00 00 00 00 00 00 40 00 00 00 01 00 04 80 @....... @.......
00000010: 01 01 00 00 00 00 00 01 00 00 00 00 ........ ....
"""
pos = 0 # Position in data
line = 0 # Line of data
hex = "" # Hex display
ascii = "" # ASCII display
result = ""
while pos < len(data):
# Start with header
if pos % 16 == 0:
hex = "%08x: " % (line * 16)
ascii = ""
# Add character
hex = hex + "%02x " % (ord(data[pos]))
if ord(data[pos]) < 32 or ord(data[pos]) > 176:
ascii = ascii + '.'
else:
ascii = ascii + data[pos]
pos = pos + 1
# Add separator if half way
if pos % 16 == 8:
hex = hex + " "
ascii = ascii + " "
# End of line
if pos % 16 == 0:
result = result + "%s %s\n" % (hex, ascii)
line = line + 1
# Leftover bits
if pos % 16 != 0:
# Pad hex string
for i in range(0, (16 - (pos % 16))):
hex = hex + " "
# Half way separator
if (pos % 16) < 8:
hex = hex + " "
result = result + "%s %s\n" % (hex, ascii)
return result
| 24,002
|
def team_2_adv():
"""
Google Doc: https://docs.google.com/document/d/19jYjDRX_WR4pPynsAYmeglDpQWDvNVMDgYvd4EUWqm8/edit?usp=sharing
Goes through the berea section of the story. Asks if you want to go to dining or go do homework.
Choosing either will take you to two possibilities of being "dead" or being asked how much money you have.
This choice then will make you "dead" or will continue on the "story" to the next function
"""
global dead
print()
print()
direction = input ("As a Berean, no matter where you end up, you have to constantly make this hard decision: Go to dining or Go do homework, what do you say? [Go to dining/Go do homework]")
if direction == "Go to dining" or "go to dining": # Good choice! asking if they want to go to dining
print()
print("you get tired of this trivial cave and leave and head to dining to meet up friends")
print()
sleep(delay*3)
elif direction == "Go do homework" or "go do homework": # Bad choice! asking if they want to do homework
print()
sleep(delay*3)
print("You think you are making the right choice to conform to this capitalist world.")
sleep(delay*3)
print()
print("When you, at heart, are a leftist Marxist")
sleep(delay*3)
print()
print("You know you shouldn't be enslaved by the institution that is generally accepted in the form of")
sleep(delay*3)
print()
print("'all american college'")
sleep(delay*3)
print()
print("You graduate with a degree.")
sleep(delay*3)
print()
print("Work an 8-5 job.")
sleep(delay*3)
print()
print("Have two children.")
sleep(delay*3)
print()
print("Get old.")
sleep(delay*3)
print()
print("And, when sitting at your white wood porch, looking at your old pictures, quietly sob realizing...")
sleep(delay*3)
print()
print("that you should have gone to the dining hall")
sleep(delay*3)
print()
print("You die a victim of this machine.")
print()
print()
dead = True
else: #user inputs something other than the dining or homework asnwers
sleep(delay)
print()
print("You are not a true Berean.")
print()
sleep(delay)
if dead == True: #end of the homework answer
sleep(delay)
print("Oh no. You are dead. Bye.")
quit()
money = int(input ("How much is your family income?")) #ask what family income is
if money < 30000: #does the print if they asnwer with something below 30000
sleep (delay)
print()
print("Berea, berea, beloved")
elif money > 30000: #does the print if they answer with something above 30000
sleep(delay)
print()
print("you don't belong here")
dead = True
else: #catches any answers that are not the ones we want
sleep(delay)
print("illiterate. bye. *middle finger emoji* *clown emoji* *middle finger emoji*")
if dead == True:
print("go back to the centre college where you belong.")
quit()
| 24,003
|
def load_codes_mat(backup_dir, savefile=False, thread_num=1):
""" load all the code mat file in the experiment folder and summarize it into nparrays"""
if "codes_all.npz" in os.listdir(backup_dir):
# if the summary table exist, just read from it!
with np.load(os.path.join(backup_dir, "codes_all.npz")) as data:
codes_all = data["codes_all"]
generations = data["generations"]
return codes_all, generations
codes_fns = sorted([fn for fn in os.listdir(backup_dir) if "_code.mat" in fn])
codes_all = []
img_ids = []
for i, fn in enumerate(codes_fns[:]):
matdata = loadmat(os.path.join(backup_dir, fn))
codes_all.append(matdata["codes"])
img_ids.extend(list(matdata["ids"]))
codes_all = np.concatenate(tuple(codes_all), axis=0)
img_ids = np.concatenate(tuple(img_ids), axis=0)
img_ids = [img_ids[i][0] for i in range(len(img_ids))]
generations = [int(re.findall("gen(\d+)", img_id)[0]) if 'gen' in img_id else -1 for img_id in img_ids]
if savefile:
np.savez(os.path.join(backup_dir, "codes_all.npz"), codes_all=codes_all, generations=generations)
return codes_all, generations
| 24,004
|
def extract_ego_time_point(history: SimulationHistory) -> npt.NDArray[int]:
"""
Extract time point in simulation history.
:param history: Simulation history.
:return An array of time in micro seconds.
"""
time_point = np.array(
[sample.ego_state.time_point.time_us for sample in history.data]
)
return time_point
| 24,005
|
def compare_data_identifiers(a, b):
"""Checks if all the identifiers match, besides those that are not in both lists"""
a = {tuple(key): value for key, value in a}
b = {tuple(key): value for key, value in b}
matching_keys = a.keys() & b.keys()
a = {k: v for k, v in a.items() if k in matching_keys}
b = {k: v for k, v in b.items() if k in matching_keys}
return a == b
| 24,006
|
def expand_abbr(abbr, doc_type = 'html'):
"""
Разворачивает аббревиатуру
@param abbr: Аббревиатура
@type abbr: str
@return: str
"""
tree = parse_into_tree(abbr, doc_type)
if tree:
result = tree.to_string(True)
if result:
result = re.sub('\|', insertion_point, result, 1)
return re.sub('\|', sub_insertion_point, result)
return ''
| 24,007
|
def SaveSettings (event=None, SettingsNotebook=None, filename = "settings.hdf5", title="Open HDF5 file to save settings", OpenDialog=True ) :
"""
Method for saving setting
"""
if OpenDialog :
# Ask user to select the file
openFileDialog = wx.FileDialog(SettingsNotebook, title, "", filename, "HDF5 files (*.hdf5)|*.hdf5",
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT | wx.FD_CHANGE_DIR)
# Check whether user cancelled
if openFileDialog.ShowModal() == wx.ID_CANCEL: return None
filename = openFileDialog.GetPath()
with h5py.File (filename, 'a') as file_settings :
# Crete the grope if it does not exist
try : parameters_grp = file_settings["settings"]
except KeyError : parameters_grp = file_settings.create_group("settings")
# Loop over all settings tab
for SettingsTabName, SettingsTab in SettingsNotebook.settings_to_tabs.items() :
# Save all settings on a given tab
try : del parameters_grp[SettingsTabName]
except KeyError : pass
grp = parameters_grp.create_group(SettingsTabName)
for key, value in SettingsTab.GetSettings().items() : grp[key] = value
# return valid filename
return filename
| 24,008
|
def primes_above(x):
"""Convenience function that yields primes strictly greater than x.
>>> next(primes_above(200))
211
"""
_validate_num(x)
it = primes()
# Consume the primes below x as fast as possible, then yield the rest.
p = next(it)
while p <= x:
p = next(it)
yield p
for p in it:
yield p
| 24,009
|
def getAfinityCenter(width, height, point, center, radius=7, img_affinity=None):
"""
Function to create the affinity maps,
e.g., vector maps pointing toward the object center.
Args:
width: image wight
height: image height
point: (x,y)
center: (x,y)
radius: pixel radius
img_affinity: tensor to add to
return:
return a tensor
"""
tensor = torch.zeros(2,height,width).float()
# Create the canvas for the afinity output
imgAffinity = Image.new("RGB", (width,height), "black")
totensor = transforms.Compose([transforms.ToTensor()])
draw = ImageDraw.Draw(imgAffinity)
r1 = radius
p = point
draw.ellipse((p[0]-r1,p[1]-r1,p[0]+r1,p[1]+r1),(255,255,255))
del draw
# Compute the array to add the afinity
array = (np.array(imgAffinity)/255)[:,:,0]
angle_vector = np.array(center) - np.array(point)
angle_vector = normalize(angle_vector)
affinity = np.concatenate([[array*angle_vector[0]],[array*angle_vector[1]]])
# print (tensor)
if not img_affinity is None:
# Find the angle vector
# print (angle_vector)
if length(angle_vector) >0:
angle=py_ang(angle_vector)
else:
angle = 0
# print(angle)
c = np.array(colorsys.hsv_to_rgb(angle/360,1,1)) * 255
draw = ImageDraw.Draw(img_affinity)
draw.ellipse((p[0]-r1,p[1]-r1,p[0]+r1,p[1]+r1),fill=(int(c[0]),int(c[1]),int(c[2])))
del draw
re = torch.from_numpy(affinity).float() + tensor
return re, img_affinity
| 24,010
|
def win_array_to_buf_32(data_array, memhandle, first_point, count):
"""Copies 32-bit data from an array into a Windows memory buffer.
Parameters
----------
data_array : POINTER(c_ushort)
The C array containing the data to be copied
memhandle : int
This must be a memory handle that was returned by :func:`.win_buf_alloc_32` when the buffer
was allocated. The data will be copied into this buffer.
first_point : int
Index of first point in memory buffer where data will be copied to
count : int
Number of data points to copy
Notes
-----
This function copies data from an array to a Windows global memory buffer. This would typically
be used to initialize the buffer with data before doing an output scan. Using the first_point
and count parameters it is possible to fill a portion of the buffer. This can be useful if you
want to send new data to the buffer after a :const:`~pyulmcculwms.ScanOptions.BACKGROUND` +
:const:`~pyulmcculwms.ScanOptions.CONTINUOUS` output scan has been started - for example,
during circular buffering.
"""
_check_err(_cbw.cbWinArrayToBuf32(
data_array, memhandle, first_point, count))
| 24,011
|
def test_add_pass_existing():
"""Add to existing set."""
context = Context({
'arbset': {1, 2},
'add': {
'set': 'arbset',
'addMe': 3
}})
add.run_step(context)
context['add']['addMe'] = 4
add.run_step(context)
assert context['arbset'] == {1, 2, 3, 4}
assert len(context) == 2
| 24,012
|
def unwrap_phase_iterative_fft(mat, iteration=4, win_for=None, win_back=None,
weight_map=None):
"""
Unwrap a phase image using an iterative FFT-based method as described in
Ref. [1].
Parameters
----------
mat : array_like
2D array. Wrapped phase-image in the range of [-Pi; Pi].
iteration : int
Number of iteration.
win_for : array_like
2D array. FFT-window for the forward transform. Generated if None.
win_back : array_like
2D array. FFT-window for the backward transform. Making sure there are
no zero-values. Generated if None.
weight_map : array_like
2D array. Using a weight map if provided.
Returns
-------
array_like
2D array. Unwrapped phase-image.
References
----------
.. [1] https://doi.org/10.1364/AO.56.007079
"""
height, width = mat.shape
if win_for is None:
win_for = _make_window(2 * height, 2 * width, direction="forward")
if win_back is None:
win_back = _make_window(2 * height, 2 * width, direction="backward")
if weight_map is None:
weight_map = np.ones_like(mat)
mat_unwrap = unwrap_phase_based_fft(mat * weight_map, win_for, win_back)
for i in range(iteration):
mat_wrap = _wrap_to_pi(mat_unwrap)
mat_diff = mat - mat_wrap
nmean = np.mean(mat_diff)
mat_diff = _wrap_to_pi(mat_diff - nmean)
phase_diff = unwrap_phase_based_fft(mat_diff * weight_map, win_for,
win_back)
mat_unwrap = mat_unwrap + phase_diff
return mat_unwrap
| 24,013
|
def wallunderground(idf, bsdobject, deletebsd=True, setto000=False):
"""return a wall:underground if bsdobject (buildingsurface:detailed) is an
underground wall"""
# ('WALL:UNDERGROUND', Wall, s.startswith('Ground'))
# test if it is an underground wall
if bsdobject.Surface_Type.upper() == 'WALL': # Surface_Type == wall
if bsdobject.Outside_Boundary_Condition.upper().startswith('GROUND'): # Outside_Boundary_Condition startswith 'ground'
simpleobject = idf.newidfobject('WALL:UNDERGROUND')
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Height = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None
| 24,014
|
def loadBar(self, days, symbol='', exchange=''):
"""载入1分钟K线"""
symbol = self.vtSymbol if symbol == '' else symbol
exchange = self.exchange if exchange == '' else exchange
url = 'http://122.144.129.233:60007/hismin?instrumentid={}&datatype=0&exchangeid={}&startday={}&secretkey=1&daynum={}&rtnnum=20'.format(
symbol, exchange, datetime.datetime.now().strftime('%Y%m%d'), days)
r = requests.post(url)
try:
l = json.loads(r.text)
for d in reversed(l):
bar = VtBarData()
bar.vtSymbol = self.vtSymbol
bar.symbol = self.vtSymbol
bar.exchange = self.exchange
bar.open = d['OpenPrice']
bar.high = d['HighestPrice']
bar.low = d['LowestPrice']
bar.close = d['ClosePrice']
bar.volume = d['Volume']
bar.turnover = d['Turnover']
bar.datetime = datetime.datetime.strptime(d['ActionDay'] + d['UpdateTime'], '%Y%m%d%H:%M:%S')
self.onBar(bar)
except:
self.output(u'历史数据获取失败,使用实盘数据初始化')
| 24,015
|
def canonical_order(match):
"""
It does not make sense to define a separate bond between atoms 1 and 2,
and between atoms 2 and 1. This function will swap the atoms in the bond
if the first atom > second atom.
"""
# match[0][0:2] contains the ID numbers for the 2 atoms in the match
atom0 = match[0][0]
atom1 = match[0][1]
# match[1][0:1] contains the ID numbers for the 1 bond
bond0 = match[1][0]
if atom0 < atom1:
# return ((atom0, atom1), (bond0)) same thing as:
return match
else:
return ((atom1, atom0), (bond0))
| 24,016
|
def grant_db_access_to_role(role, db): # pylint: disable=invalid-name
"""Grant the role 'database_name', returns grant permission."""
return grant_obj_permission_to_role(role, db, 'database_access')
| 24,017
|
def execute_compute_job():
"""Call the execution of a workflow.
---
tags:
- services
consumes:
- application/json
parameters:
- name: consumerAddress
in: query
description: The consumer address.
required: true
type: string
- name: serviceAgreementId
in: query
description: The ID of the service agreement.
required: true
type: string
- name: signature
in: query
description: Signature of the documentId to verify that the consumer has rights to download the asset.
type: string
- name: workflowDID
in: query
description: DID of the workflow that is going to start to be executed.
type: string
responses:
200:
description: Call to the operator-service was successful.
400:
description: One of the required attributes is missing.
401:
description: Invalid asset data.
500:
description: Error
"""
data = request.args
required_attributes = [
'serviceAgreementId',
'consumerAddress',
'signature',
'workflowDID'
]
msg, status = check_required_attributes(required_attributes, data, 'consume')
if msg:
return msg, status
if not (data.get('signature')):
return f'`signature is required in the call to "consume".', 400
try:
agreement_id = data.get('serviceAgreementId')
consumer_address = data.get('consumerAddress')
asset_id = keeper_instance().agreement_manager.get_agreement(agreement_id).did
did = id_to_did(asset_id)
if not was_compute_triggered(agreement_id, did, consumer_address, keeper_instance()):
msg = (
'Checking if the compute was triggered failed. Either consumer address does not '
'have permission to executre this workflow or consumer address and/or service '
'agreement id is invalid.')
logger.warning(msg)
return msg, 401
workflow = DIDResolver(keeper_instance().did_registry).resolve(data.get('workflowDID'))
body = {"serviceAgreementId": agreement_id, "workflow": workflow.as_dictionary()}
response = requests_session.post(
get_config().operator_service_url + '/api/v1/operator/init',
data=json.dumps(body),
headers={'content-type': 'application/json'})
return jsonify({"workflowId": response.content.decode('utf-8')})
except Exception as e:
logger.error(f'Error- {str(e)}', exc_info=1)
return f'Error : {str(e)}', 500
| 24,018
|
def files_filter_ext(files: List[Path], ext: str) -> List[Path]:
"""Filter files from a list matching a extension.
Args:
files: List of files.
ext: Extension to filter.
Returns:
List of files that have the extension.
"""
return [f for f in files if f.suffix == ext]
| 24,019
|
def update(locale_dir, pot_dir, languages, line_width=76):
"""
Update specified language's po files from pot.
:param unicode locale_dir: path for locale directory
:param unicode pot_dir: path for pot directory
:param tuple languages: languages to update po files
:param number line_width: maximum line wdith of po files
:return: {'create': 0, 'update': 0, 'notchanged': 0}
:rtype: dict
"""
status = {
'create': 0,
'update': 0,
'notchanged': 0,
}
for dirpath, dirnames, filenames in os.walk(pot_dir):
for filename in filenames:
pot_file = os.path.join(dirpath, filename)
base, ext = os.path.splitext(pot_file)
if ext != ".pot":
continue
basename = relpath(base, pot_dir)
for lang in languages:
po_dir = os.path.join(locale_dir, lang, 'LC_MESSAGES')
po_file = os.path.join(po_dir, basename + ".po")
cat_pot = c.load_po(pot_file)
if os.path.exists(po_file):
cat = c.load_po(po_file)
msgids = set([m.id for m in cat if m.id])
c.update_with_fuzzy(cat, cat_pot)
new_msgids = set([m.id for m in cat if m.id])
if msgids != new_msgids:
added = new_msgids - msgids
deleted = msgids - new_msgids
status['update'] += 1
click.echo('Update: {0} +{1}, -{2}'.format(
po_file, len(added), len(deleted)))
c.dump_po(po_file, cat, line_width)
else:
status['notchanged'] += 1
click.echo('Not Changed: {0}'.format(po_file))
else: # new po file
status['create'] += 1
click.echo('Create: {0}'.format(po_file))
c.dump_po(po_file, cat_pot, line_width)
return status
| 24,020
|
def interp_to_grid(tran,v,expand_x=True,expand_y=True):
"""
Return dense matrix for X,Y and V (from v, or tran[v] if v is str)
expand_x: defaults to 1 more value in the X dimension than in V, suitable for
passing to pcolormesh.
expand_y: defaults to 1 more value in the Y dimension than in V, for pcolormesh
"""
if isinstance(v,six.string_types):
v=tran[v]
x,y,scal,dz=xr.broadcast(get_d_sample(tran),tran.z_ctr,v,get_z_dz(tran))
# important to use .values, as xarray will otherwise muck with
# the indexing
# coll_u=plot_utils.pad_pcolormesh(x.values,y.values,scal.values,ax=ax)
# But we have some additional information on how to pad Y, so do that
# here.
# Move to numpy land
X=x.values
Y=y.values
Dz=dz.values
if expand_y:
# Expands the vertical coordinate in the vertical
Ybot=Y-0.5*Dz
Yexpand=np.concatenate( (Ybot,Ybot[:,-1:]), axis=1)
Yexpand[:,-1]=np.nan
Yexpand[:,1:]=np.where( np.isfinite(Yexpand[:,1:]),
Yexpand[:,1:],
Y+0.5*Dz)
# Expands the horizontal coordinate in the vertical
Xexpand=np.concatenate( (X,X[:,-1:]), axis=1)
else:
Yexpand=Y
Xexpand=X
# And expand in the horizontal
def safe_midpnt(a,b):
ab=0.5*(a+b)
invalid=np.isnan(ab)
ab[invalid]=a[invalid]
invalid=np.isnan(ab)
ab[invalid]=b[invalid]
return ab
if expand_x:
dx=utils.center_to_interval(X[:,0])
Xexpand2=np.concatenate( (Xexpand-0.5*dx[:,None], Xexpand[-1:,:]+0.5*dx[-1:,None]), axis=0)
Yexpand2=np.concatenate( (Yexpand[:1,:],
safe_midpnt(Yexpand[:-1],Yexpand[1:]),
Yexpand[-1:,:]), axis=0)
else:
Xexpand2=Xexpand
Yexpand2=Yexpand
return Xexpand2,Yexpand2,scal.values
| 24,021
|
def simulation(G, tau, gamma, rho, max_time, number_infected_before_release, release_number, background_inmate_turnover,
stop_inflow_at_intervention, p, death_rate, percent_infected, percent_recovered, social_distance,
social_distance_tau, initial_infected_list):
"""Runs a simulation on SIR model.
Args:
G: Networkx graph
tau: transmission rate
gamma: recovery rate
rho: percent of inmates that are initially infected
max_time: # of time steps to run simulation
number_infected_before_release: number of infected at which to perform release on next integer time
release_number: # of inmates to release at release intervention
background_inmate_turnover: background # of inmates added/released at each time step
stop_inflow_at_intervention: should we stop the background inflow of inmates at intervention time?
p: probability of contact between inmate and other inmates
death_rate: percent of recovered inmates that die
percent_infected: percent of general population that is infected
percent_recovered: percent of general population that is recovered
social_distance: boolean flag, if we lower transmission rate after major release
social_distance_tau: new transmission rate after major release
initial_infected_list: sets node numbers of initial infected (default is 0, this parameter is arbitrary)
Returns:
t: array of times at which events occur
S: # of susceptible inmates at each time
I: # of infected inmates at each time
R: # of recovered inmates at each time
D: # of dead inmates at each time step
"""
print('Starting simulation...')
release_occurred = False
background_release_number = background_inmate_turnover
data_list = []
recovered_list = []
delta_recovered_list = []
# Check we are using initial_infected_list
if initial_infected_list is not None:
print('Using initial infected list to set initial infected.')
infected_list = initial_infected_list.copy()
else: # Choose random initial infections based on rho
print('Using rho to set initial infected.')
infected_list = list(np.random.choice(list(G.nodes), int(np.ceil(rho * len(G.nodes))), replace=False))
# Loop over time
for i in range(max_time):
# Run 1 time unit of simulation
data = EoN.fast_SIR(G, tau, gamma, initial_infecteds=infected_list, initial_recovereds=recovered_list,
tmin=i, tmax=i + 1, return_full_data=True)
data_list.append(data)
# Update infected and recovered inmate lists
infected_list, recovered_list = get_infected(data, i + 1), get_recovered(data, i + 1)
# Check if release condition has been met
if not release_occurred and len(infected_list) >= number_infected_before_release:
background_inmate_turnover, r_n, tau = enact_interventions(background_inmate_turnover,
background_release_number, i + 1,
infected_list, release_number,
social_distance,
social_distance_tau,
stop_inflow_at_intervention,
tau)
release_occurred = True
else: # If not, use background release rate
r_n = background_release_number
# Add and release inmates
G, infected_list, recovered_list, delta_recovered = recalibrate_graph(G, infected_list, recovered_list,
background_inmate_turnover, r_n, p,
percent_infected, percent_recovered,
death_rate)
# Track the number of recovered inmates added or released at each time step
delta_recovered_list.append(delta_recovered)
# Process raw data into t, S, I, R, D arrays
t, S, I, R, D = process_data(data_list, delta_recovered_list, death_rate)
print('Simulation completed.\n')
return t, S, I, R, D
| 24,022
|
def relative(link : str):
"""Convert relative link to absolute"""
return f"#{document.URL.split('#')[1]}/{link}"
| 24,023
|
def pypiserver_cmd(root, cmd='run', *args):
"""Yield a command to run pypiserver.
:param Union[str, pathlib.Path] root: the package root from which
to serve packages.
:param args: extra arguments for ``pypiserver <cmd>``
:returns: arguments suitable for passing to ``Popen()``
:rtype: Iterator[str]
"""
# yield '{}/bin/pypiserver'.format(venv_dir)
yield 'pypiserver'
yield cmd
yield str(root)
for arg in args:
yield arg
| 24,024
|
def test_normalized_frequency_outliers_for_multi_columns(agencies):
"""Test frequency outlier detection using normalized frequencies for tuples
of values from multiple columns.
"""
# Use default divide by total as the normalization function. The pair
# ('BK', 'NY') occurs in 6 out of ten rows in the agencies dataset.
# state valye 'NY' occurs in 9 out of 10 rows in the agencies dataset.
outlier = frequency_outliers(agencies, ['borough', 'state'], ge(0.9))
assert len(outlier.values()) == 0
outlier = frequency_outliers(agencies, ['borough', 'state'], ge(0.6))
assert len(outlier.values()) == 1
assert outlier[0]['metadata'] == {'count': 6, 'frequency': 0.6}
| 24,025
|
def _get_axes_names(ndim: int) -> Tuple[List[str], List[str]]:
"""Get needed axes names given the number of dimensions.
Parameters
----------
ndim : int
Number of dimensions.
Returns
-------
axes : List[str]
Axes names.
coords : List[str]
Coordinates names.
"""
if ndim == 2:
axes = [axis for axis in AXES if axis != Axes.ZPLANE.value]
coords = [coord for coord in COORDS if coord != Coordinates.Z.value]
elif ndim == 3:
axes = AXES
coords = COORDS
else:
raise TypeError('expected 2- or 3-D image')
return axes, coords
| 24,026
|
def multi_replace(text, replace_dict):
"""Perform multiple replacements in one go using the replace dictionary
in format: { 'search' : 'replace' }
:param text: Text to replace
:type text: `str`
:param replace_dict: The replacement strings in a dict
:type replace_dict: `dict`
:return: `str`
:rtype:
"""
new_text = text
for search, replace in list(replace_dict.items()):
new_text = new_text.replace(search, str(replace))
return new_text
| 24,027
|
def function_expr(fn: str, args_expr: str = "") -> str:
"""
DEPRECATED. Please do not add anything else here. In order to manipulate the
query, create a QueryProcessor and register it into your dataset.
Generate an expression for a given function name and an already-evaluated
args expression. This is a place to define convenience functions that evaluate
to more complex expressions.
"""
if fn.startswith("apdex("):
match = APDEX_FUNCTION_RE.match(fn)
if match:
return "(countIf({col} <= {satisfied}) + (countIf(({col} > {satisfied}) AND ({col} <= {tolerated})) / 2)) / count()".format(
col=escape_identifier(match.group(1)),
satisfied=match.group(2),
tolerated=int(match.group(2)) * 4,
)
raise ValueError("Invalid format for apdex()")
elif fn.startswith("impact("):
match = IMPACT_FUNCTION_RE.match(fn)
if match:
apdex = "(countIf({col} <= {satisfied}) + (countIf(({col} > {satisfied}) AND ({col} <= {tolerated})) / 2)) / count()".format(
col=escape_identifier(match.group(1)),
satisfied=match.group(2),
tolerated=int(match.group(2)) * 4,
)
return "(1 - {apdex}) + ((1 - (1 / sqrt(uniq({user_col})))) * 3)".format(
apdex=apdex, user_col=escape_identifier(match.group(3)),
)
raise ValueError("Invalid format for impact()")
elif fn.startswith("failure_rate("):
match = FAILURE_RATE_FUNCTION_RE.match(fn)
if match:
return "countIf(notIn(transaction_status, tuple({ok}, {cancelled}, {unknown}))) / count()".format(
ok=SPAN_STATUS_NAME_TO_CODE["ok"],
cancelled=SPAN_STATUS_NAME_TO_CODE["cancelled"],
unknown=SPAN_STATUS_NAME_TO_CODE["unknown"],
)
raise ValueError("Invalid format for failure_rate()")
# For functions with no args, (or static args) we allow them to already
# include them as part of the function name, eg, "count()" or "sleep(1)"
if not args_expr and fn.endswith(")"):
return fn
# Convenience topK function eg "top10", "top3" etc.
topk = TOPK_FUNCTION_RE.match(fn)
if topk:
return "topK({})({})".format(topk.group(1), args_expr)
# turn uniq() into ifNull(uniq(), 0) so it doesn't return null where
# a number was expected.
if fn == "uniq":
return "ifNull({}({}), 0)".format(fn, args_expr)
# emptyIfNull(col) is a simple pseudo function supported by Snuba that expands
# to the actual clickhouse function ifNull(col, '') Until we figure out the best
# way to disambiguate column names from string literals in complex functions.
if fn == "emptyIfNull" and args_expr:
return "ifNull({}, '')".format(args_expr)
# Workaround for https://github.com/ClickHouse/ClickHouse/issues/11622
# Some distributed queries fail when arrays are passed as array(1,2,3)
# and work when they are passed as [1, 2, 3]
if get_config("format_clickhouse_arrays", 1) and fn == "array":
return f"[{args_expr}]"
# default: just return fn(args_expr)
return "{}({})".format(fn, args_expr)
| 24,028
|
def node_delete(node):
"""Delete node.
If the node does not exist, a NotFoundError will be raised.
"""
get_auth_backend().require_admin()
node = get_or_404(model.Node, node)
if node.project:
raise errors.BlockedError(
"Node %r is part of project %r; remove from "
"project before deleting" % (node.label, node.project.label))
if node.nics != []:
raise errors.BlockedError(
"Node %r has nics; remove them before deleting %r." % (node.label,
node.label))
db.session.delete(node)
db.session.commit()
| 24,029
|
def entry_point(action_name: str):
"""Entry point for execute an action"""
__logger.info('Launch action <%s>...', action_name)
action_inst = utils.collection.list.pnext(__actions, lambda a: a.name == action_name)
if action_inst:
action_inst.entry_point(args)
else:
raise Exception('Unknown action: <{}>'.format(action))
| 24,030
|
def many_to_one(nrows=[1000], N=5, percentages=[0.025], p=0.5):
"""
"""
def update_joinable_relation_rows(table1, nrows, selecivity_percentage, \
N, relation_from_percentage=-1, \
relation_to_percentage=-1):
"""
Sample rows for percentage and update the sampled rows
return: updated table 1, table2
"""
prows = nrows * selecivity_percentage
tbl1_sample = sample(table1, int(prows))
rpercentage = nrows
if relation_to_percentage > 0:
rpercentage = nrows * relation_to_percentage
NumOfP1s = rpercentage / N
# print(NumOfP1s, prows, rpercentage)
tbl1_sample = tbl1_sample.reset_index(drop=True)
P1ForJoin = sample(tbl1_sample, int(NumOfP1s+0.7))
values = list(set([row[1]['P1'] for row in P1ForJoin.iterrows()]))
values = values * N
if len(values) > nrows:
values = values[:nrows]
table2 = generate_table(nrows, P1start=nrows+1)
tbl2_sample = sample(table2, len(values))
# print(len(values), len(list(set((tbl2_sample.index)))))
for i, j in zip(values, list(tbl2_sample.index)):
table2.loc[j, 'P1'] = i
return table1, table2
# Number of rows per table
# nrows = [1000, 3000, 10000, 50000, 100000]
# value of N (relation size)
# N = [5, 10, 15]
# 50 % selectivity - percentage of rows, overall, involvd in join from table1 to table2
# p = 0.5
# percentage of rows that are involved in 1-N relation
# percentages = [0.25 , 0.5]
for nrow in nrows:
subprocess.check_call('mkdir -p ../data/relation_type/N-one/'+ str(int(nrow/1000)) + 'k_rows', shell=True)
table1 = generate_table(nrow)
table1.to_csv('../data/relation_type/N-one/'+ str(int(nrow/1000)) + 'k_rows/table2.csv', index=False )
for rp in percentages:
for n in N:
table1, table2 = update_joinable_relation_rows(table1, nrow, p, n, -1, rp)
table2.to_csv('../data/relation_type/N-one/'+ str(int(nrow/1000)) + 'k_rows/table1_' + \
str(int(100*p)) + "_" + str(n) + "_" + str(int(100*rp)) + '_percent.csv', index=False )
| 24,031
|
def distribute(tensor: np.ndarray,
grid_shape: Sequence[int],
pmap: bool = True) -> pxla.ShardedDeviceArray:
"""
Convert a numpy array into a ShardedDeviceArray (distributed according to
`grid_shape`). It is assumed that the dimensions of `tensor`
are evenly divided by `grid`.
Args:
tensor: A distributed array to be converted into a local
numpy tensor.
grid_shape: The shape of the processor grid
according to which `tensor` is distributed.
Returns:
ShardedDeviceArray: The distributed tensor
Raises:
ValueError: If `tensor.shape` is not evenly divisible by `grid_shape`
"""
if not np.all([s % p == 0 for s, p in zip(tensor.shape, grid_shape)]):
raise ValueError(f"tensor.shape = {tensor.shape} not evenly divisible "
f"by grid_shape = {grid_shape}.")
ndim = tensor.ndim
pshape = np.asarray(grid_shape)
shape = misc.flatten(
[p, s] for s, p in zip(np.array(tensor.shape) // pshape, pshape))
perm = list(range(0, 2 * ndim, 2)) + list(range(1, 2 * ndim, 2))
reshaped = tensor.reshape(shape).transpose(perm)
final_shape = (np.prod(reshaped.shape[:ndim]), *reshaped.shape[ndim:])
A = reshaped.reshape(final_shape)
if not pmap:
return A
return jax.pmap(lambda x: x, devices=jax.local_devices())(A)
| 24,032
|
def input_files_exist(paths):
"""Ensure all the input files actually exist.
Args:
paths (list): List of paths.
Returns:
bool: True if they all exist, False otherwise.
"""
for path in paths:
if not os.path.isfile(path):
return False
return True
| 24,033
|
def predict(w, X):
"""
Returns a vector of predictions.
"""
return expit(X.dot(w))
| 24,034
|
def get_argument_parser():
"""
Parse CLI arguments and return a map of options.
"""
parser = argparse.ArgumentParser(
description="DC/OS Install and Configuration Utility")
mutual_exc = parser.add_mutually_exclusive_group()
mutual_exc.add_argument(
'--hash-password',
action=ArgsAction,
dest='password',
metavar='password',
nargs='?',
help='Hash a password and print the results to copy into a config.yaml.'
)
mutual_exc.add_argument(
'--generate-node-upgrade-script',
action=ArgsAction,
metavar='installed_cluster_version',
dest='installed_cluster_version',
nargs='?',
help='Generate a script that upgrades DC/OS nodes running installed_cluster_version'
)
mutual_exc.add_argument(
'--generate-node-upgrade-win-script',
action=ArgsAction,
metavar='installed_cluster_version',
dest='installed_cluster_version',
nargs='?',
help='Generate a powershell script that upgrades Windows nodes running installed_cluster_version'
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Verbose log output (DEBUG).')
parser.add_argument(
'-p',
'--port',
type=int,
default=9000,
help=argparse.SUPPRESS)
def add_mode(name, help_msg):
mutual_exc.add_argument(
'--{}'.format(name),
action='store_const',
const=name,
dest='action',
help=help_msg)
# Add all arg modes
for name, value in dispatch_dict_simple.items():
add_mode(name, value[2])
parser.set_defaults(action='genconf')
return parser
| 24,035
|
def run_report_results(args, probe, dataset, model, loss, reporter, regimen):
"""
Reports results from a structural probe according to args.
By default, does so only for dev set.
Requires a simple code change to run on the test set.
"""
probe_params_path = os.path.join(args['reporting']['root'],args['probe']['params_path'])
probe.load_state_dict(torch.load(probe_params_path))
probe.eval()
dev_dataloader = dataset.get_dev_dataloader()
dev_predictions = regimen.predict(probe, model, dev_dataloader)
reporter(dev_predictions, dev_dataloader, 'dev')
#train_dataloader = dataset.get_train_dataloader(shuffle=False)
#train_predictions = regimen.predict(probe, model, train_dataloader)
#reporter(train_predictions, train_dataloader, 'train')
# Uncomment to run on the test set
#test_dataloader = dataset.get_test_dataloader()
#test_predictions = regimen.predict(probe, model, test_dataloader)
#reporter(test_predictions, test_dataloader, 'test')
| 24,036
|
def move_wm_id_to_d(wm_id, d_num):
"""
Given a wmctrl window manager ID `wm_id`, move the corresponding window to the
desktop number `d_num`, by calling `wmctrl -i -r WM_ID -t D_NUM`.
N.B. wmctrl does not give an informative error code so no way to check it succeeded
without querying wmctrl again for the ID to verify that its desktop number has
successfully updated to the target `d_num` (this function doesn't handle that yet).
"""
wmctrl_cmd = f"wmctrl -i -r {wm_id} -t {d_num}".split()
print(f"Moving {wm_id} to {d_num}")
run(wmctrl_cmd)
return
| 24,037
|
def test_eval_policy(config, tmpdir):
"""Smoke test for imitation.scripts.eval_policy"""
config_updates = {
'render': False,
'log_root': tmpdir,
}
config_updates.update(config)
run = eval_policy_ex.run(config_updates=config_updates,
named_configs=['fast'])
assert run.status == 'COMPLETED'
wrapped_reward = 'reward_type' in config
_check_rollout_stats(run.result, wrapped_reward)
| 24,038
|
def maskrgb_to_class(mask, class_map):
""" decode rgb mask to classes using class map"""
h, w, channels = mask.shape[0], mask.shape[1], mask.shape[2]
mask_out = -1 * np.ones((h, w), dtype=int)
for k in class_map:
matches = np.zeros((h, w, channels), dtype=bool)
for c in range(channels):
matches[:, :, c] = mask[:, :, c] == k[c]
matches_total = np.sum(matches, axis=2)
valid_idx = matches_total == channels
mask_out[valid_idx] = class_map[k]
return mask_out
| 24,039
|
def translate_error_code(error_code):
"""
Return the related Cloud error code for a given device error code
"""
return (CLOUD_ERROR_CODES.get(error_code) if error_code in
CLOUD_ERROR_CODES else error_code)
| 24,040
|
def download(local_dir, project_name):
""" Copy from S3 to local directory
"""
print
| 24,041
|
def generate_oi_quads():
"""Return a list of quads representing a single OI, OLDInstance.
"""
old_instance, err = domain.construct_old_instance(
slug='oka',
name='Okanagan OLD',
url='http://127.0.0.1:5679/oka',
leader='',
state=domain.NOT_SYNCED_STATE,
is_auto_syncing=False)
old_instance_quads = aol_mod.instance_to_quads(
old_instance, domain.OLD_INSTANCE_TYPE)
aol = []
for quad in old_instance_quads:
aol = aol_mod.append_to_aol(aol, quad)
return aol
| 24,042
|
def graph_intersection(pred_graph, truth_graph):
"""
Use sparse representation to compare the predicted graph
and the truth graph so as to label the edges in the predicted graph
to be 1 as true and 0 as false.
"""
array_size = max(pred_graph.max().item(), truth_graph.max().item()) + 1
l1 = pred_graph.cpu().numpy()
l2 = truth_graph.cpu().numpy()
e_1 = sp.sparse.coo_matrix((np.ones(l1.shape[1]), l1), shape=(array_size, array_size)).tocsr()
e_2 = sp.sparse.coo_matrix((np.ones(l2.shape[1]), l2), shape=(array_size, array_size)).tocsr()
e_intersection = (e_1.multiply(e_2) - ((e_1 - e_2)>0)).tocoo()
new_pred_graph = torch.from_numpy(np.vstack([e_intersection.row, e_intersection.col])).long().to(device)
y = e_intersection.data > 0
return new_pred_graph, y
| 24,043
|
def get_qid_for_title(title):
"""
Gets the best Wikidata candidate from the title of the paper.
"""
api_call = f"https://www.wikidata.org/w/api.php?action=wbsearchentities&search={title}&language=en&format=json"
api_result = requests.get(api_call).json()
if api_result["success"] == 1:
return(api_result["search"][0]["id"])
| 24,044
|
def experiment(L, T, dL, dT, dLsystm = 0):
"""
Performs a g-measurement experiment
Args:
L: A vector of length measurements of the pendulum
T: A vector of period measurements of the pendulum
dL: The error in length measurement
dT: The error in period measurement
dLsystm: Systematic error of length measurement, default value 0
Returns:
A dictionary with the mean values of g,
the g-error values and the measured period
values, for each length
"""
L = L + dLsystm # Add systematic error, if it exists
g = np.power(2*np.pi, 2) * L / np.power(T, 2) # Indirect g measurement from
# length and period
dg = gError(L, T, dL, dT) # g measurement error
gMean = np.sum(g)/g.size # Mean value of g measurements
dgMean = np.sqrt(np.sum(dg*dg))/dg.size # Error of mean value of g
return {'g':gMean, 'dg':dgMean}
| 24,045
|
def find_jobs(schedd=None, attr_list=None, **constraints):
"""Query the condor queue for jobs matching the constraints
Parameters
----------
schedd : `htcondor.Schedd`, optional
open scheduler connection
attr_list : `list` of `str`
list of attributes to return for each job, defaults to all
all other keyword arguments should be ClassAd == value constraints to
apply to the scheduler query
Returns
-------
jobs : `list` of `classad.ClassAd`
the job listing for each job found
"""
if schedd is None:
schedd = htcondor.Schedd()
qstr = ' && '.join(['%s == %r' % (k, v) for
k, v in constraints.items()]).replace("'", '"')
if not attr_list:
attr_list = []
return list(schedd.query(qstr, attr_list))
| 24,046
|
def create_vocab(sequences, min_count, counts):
"""Generate character-to-idx mapping from list of sequences."""
vocab = {const.SOS: const.SOS_IDX, const.EOS: const.EOS_IDX,
const.SEP: const.SEP_IDX}
for seq in sequences:
for token in seq:
for char in token:
if char not in vocab and counts[char] >= min_count:
vocab[char] = len(vocab)
vocab[const.UNK] = len(vocab)
return vocab
| 24,047
|
def get_ind_sphere(mesh, ind_active, origin, radius):
"""Retreives the indices of a sphere object coordintes in a mesh."""
return (
(mesh.gridCC[ind_active, 0] <= origin[0] + radius)
& (mesh.gridCC[ind_active, 0] >= origin[0] - radius)
& (mesh.gridCC[ind_active, 1] <= origin[1] + radius)
& (mesh.gridCC[ind_active, 1] >= origin[1] - radius)
& (mesh.gridCC[ind_active, 2] <= origin[2] + radius)
& (mesh.gridCC[ind_active, 2] >= origin[2] - radius)
)
| 24,048
|
def find_frame_times(eegFile, signal_idx=-1, min_interval=40, every_n=1):
"""Find imaging frame times in LFP data using the pockels blanking signal.
Due to inconsistencies in the fame signal, we look for local maxima. This
avoids an arbitrary threshold that misses small spikes or includes two
nearby time points that are part of the same frame pulse.
Parameters
----------
eegFile : str
Path to eeg data file
signal_idx : int
Index of the pockels signal, e.g. eeg[signal_idx, :], default -1
min_interval : int
Minimum radius around local maxima to enforce, default 40
every_n : int
Return every nth frame time, useful for multiplane data, default 1
Returns
-------
frame times : array, shape (n_frame_times, )
"""
pc_signal = loadEEG(eegFile.replace('.eeg', ''))['EEG'][signal_idx, :]
# break ties for local maxima by increasing first point by 1
same_idx = np.where(np.diff(pc_signal) == 0)[0]
pc_signal[same_idx] += 1
pc_signal = np.abs(np.diff(pc_signal))
frame_times = argrelextrema(pc_signal, np.greater, order=min_interval)[0]
return frame_times[::every_n]
| 24,049
|
def _interpolate_signals(signals, sampling_times, verbose=False):
"""
Interpolate signals at given sampling times.
"""
# Reshape all signals to one-dimensional array object (e.g. AnalogSignal)
for i, signal in enumerate(signals):
if signal.ndim == 2:
signals[i] = signal.flatten()
elif signal.ndim > 2:
raise ValueError('elements in fir_rates must have 2 dimensions')
if verbose:
print('create time slices of the rates...')
# Interpolate in the time bins
interpolated_signal = np.vstack([_analog_signal_step_interp(
signal, sampling_times).rescale('Hz').magnitude
for signal in signals]) * pq.Hz
return interpolated_signal
| 24,050
|
def mean_standard_error_residuals(A, b):
"""
Mean squared error of the residuals. The sum of squared residuals
divided by the residual degrees of freedom.
"""
n, k = A.shape
ssr = sum_of_squared_residuals(A, b)
return ssr / (n - k)
| 24,051
|
def cel2gal(ra, dec):
"""
Convert celestial coordinates (J2000) to Galactic
coordinates. (Much faster than astropy for small arrays.)
Parameters
----------
ra : `numpy.array`
dec : `numpy.array`
Celestical Coordinates (in degrees)
Returns
-------
glon : `numpy.array`
glat : `numpy.array`
Galactic Coordinates (in degrees)
"""
dec = np.radians(dec)
sin_dec = np.sin(dec)
cos_dec = np.cos(dec)
ra = np.radians(ra)
ra_gp = np.radians(192.85948)
de_gp = np.radians(27.12825)
sin_ra_gp = np.sin(ra - ra_gp)
cos_ra_gp = np.cos(ra - ra_gp)
lcp = np.radians(122.932)
sin_b = (np.sin(de_gp) * sin_dec) + (np.cos(de_gp) * cos_dec * cos_ra_gp)
lcpml = np.arctan2(cos_dec * sin_ra_gp,
(np.cos(de_gp) * sin_dec)
- (np.sin(de_gp) * cos_dec * cos_ra_gp))
glat = np.arcsin(sin_b)
glon = (lcp - lcpml + (2. * np.pi)) % (2. * np.pi)
return np.degrees(glon), np.degrees(glat)
| 24,052
|
def index(request):
"""Renders main website with welcome message"""
return render(request, 'mapper/welcome.html', {})
| 24,053
|
def tidy_output(differences):
"""Format the output given by other functions properly."""
out = []
if differences:
out.append("--ACLS--")
out.append("User Path Port Protocol")
for item in differences:
#if item[2] != None: #En algunos casos salían procesos con puerto None
out.append("%s %s %s %s" % item)
# En item queda un elemento que es el protocolo
# no se usa en la salida normal
return out
| 24,054
|
def get_issues_overview_for(db_user: User, app_url: str) -> Dict[str, Collection]:
"""
Returns dictionary with keywords 'user' and 'others', which got lists with dicts with infos
IMPORTANT: URL's are generated for the frontend!
:param db_user: User
:param app_url: current applications url
:return: dict
"""
if not db_user or db_user.nickname == nick_of_anonymous_user:
return {
'user': [],
'other': []
}
if db_user.is_admin():
db_issues_other_users = DBDiscussionSession.query(Issue).filter(Issue.author != db_user).all()
else:
db_issues_other_users = [issue for issue in db_user.accessible_issues if issue.author != db_user]
db_issues_of_user = DBDiscussionSession.query(Issue).filter_by(author=db_user).order_by(
Issue.uid.asc()).all()
return {
'user': [__create_issue_dict(issue, app_url) for issue in db_issues_of_user],
'other': [__create_issue_dict(issue, app_url) for issue in db_issues_other_users]
}
| 24,055
|
def load_file(file):
"""Returns an AdblockRules object using the rules specified in file."""
with open(file) as f:
rules = f.readlines()
return AdblockRules(rules)
| 24,056
|
def isSV0_QSO(gflux=None, rflux=None, zflux=None, w1flux=None, w2flux=None,
gsnr=None, rsnr=None, zsnr=None, w1snr=None, w2snr=None,
dchisq=None, maskbits=None, objtype=None, primary=None):
"""Target Definition of an SV0-like QSO. Returns a boolean array.
Parameters
----------
See :func:`~desitarget.cuts.set_target_bits`.
Returns
-------
:class:`array_like` or :class:`float`
``True`` if and only if the object is an SV-like QSO target.
If `floats` are passed, a `float` is returned.
Notes
-----
- Current version (10/14/19) is version 112 on `the SV wiki`_.
- Hardcoded for south=False.
- Combines all QSO-like SV classes into one bit.
"""
if primary is None:
primary = np.ones_like(rflux, dtype='?')
qsocolor_north = isQSO_cuts(
primary=primary, zflux=zflux, rflux=rflux, gflux=gflux,
w1flux=w1flux, w2flux=w2flux,
dchisq=dchisq, maskbits=maskbits,
objtype=objtype, w1snr=w1snr, w2snr=w2snr,
south=False
)
qsorf_north = isQSO_randomforest(
primary=primary, zflux=zflux, rflux=rflux, gflux=gflux,
w1flux=w1flux, w2flux=w2flux,
dchisq=dchisq, maskbits=maskbits,
objtype=objtype, south=False
)
qsohizf_north = isQSO_highz_faint(
primary=primary, zflux=zflux, rflux=rflux, gflux=gflux,
w1flux=w1flux, w2flux=w2flux,
dchisq=dchisq, maskbits=maskbits,
objtype=objtype, south=False
)
qsocolor_high_z_north = isQSO_color_high_z(
gflux=gflux, rflux=rflux, zflux=zflux,
w1flux=w1flux, w2flux=w2flux, south=False
)
qsoz5_north = isQSOz5_cuts(
primary=primary, gflux=gflux, rflux=rflux, zflux=zflux,
gsnr=gsnr, rsnr=rsnr, zsnr=zsnr,
w1flux=w1flux, w2flux=w2flux, w1snr=w1snr, w2snr=w2snr,
dchisq=dchisq, maskbits=maskbits, objtype=objtype,
south=False
)
qsocolor_highz_north = (qsocolor_north & qsocolor_high_z_north)
qsorf_highz_north = (qsorf_north & qsocolor_high_z_north)
qsocolor_lowz_north = (qsocolor_north & ~qsocolor_high_z_north)
qsorf_lowz_north = (qsorf_north & ~qsocolor_high_z_north)
qso_north = (qsocolor_lowz_north | qsorf_lowz_north | qsocolor_highz_north
| qsorf_highz_north | qsohizf_north | qsoz5_north)
# ADM The individual routines return arrays, so we need
# ADM a check to preserve the single-object case.
if _is_row(rflux):
return qso_north[0]
return qso_north
| 24,057
|
def show_df_nans(df, columns=None, plot_width=10, plot_height=8):
"""
Input: df (pandas dataframe), collist (list)
Output: seaborn heatmap plot
Description: Create a data frame for features which may be nan. Set NaN values be 1 and
numeric values to 0. Plots a heat map where dark squares/lines show where
data is missing. The columns to plot can be specified with an input param. Otherwise
all columns will be plotted -- which appear crowded.
"""
if not columns:
plot_cols = df.columns
else:
plot_cols = columns
df_viznan = pd.DataFrame(data=1, index=df.index, columns=plot_cols)
df_viznan[~pd.isnull(df[plot_cols])] = 0
plt.figure(figsize=(plot_width, plot_height))
plt.title('Dark values are nans')
return sns.heatmap(df_viznan.astype(float))
| 24,058
|
def nocheck():
"""Test client for an app that ignores the IP and signature."""
app = flask.Flask(__name__)
app.config['DEBUG'] = True
app.config['VALIDATE_IP'] = False
app.config['VALIDATE_SIGNATURE'] = False
return app.test_client()
| 24,059
|
def evaluate_accuracy(file1, file2):
"""
evaluate accuracy
"""
count = 0
same_count = 0
f1 = open(file1, 'r')
f2 = open(file2, 'r')
while 1:
line1 = f1.readline().strip('\n')
line2 = f2.readline().strip('\n')
if (not line1) or (not line2):
break
count += 1
if int(float(line1)) == int(1 if float(line2) > 0.5 else 0):
same_count += 1
logger.info("evaluate accuracy: ")
logger.info(float(same_count) / count)
return float(same_count) / count
| 24,060
|
def append_binified_csvs(old_binified_rows: DefaultDict[tuple, list],
new_binified_rows: DefaultDict[tuple, list],
file_for_processing: FileToProcess):
""" Appends binified rows to an existing binified row data structure.
Should be in-place. """
for data_bin, rows in new_binified_rows.items():
old_binified_rows[data_bin][0].extend(rows) # Add data rows
old_binified_rows[data_bin][1].append(file_for_processing.pk)
| 24,061
|
def find_files(top, exts):
"""Return a list of file paths with one of the given extensions.
Args:
top (str): The top level directory to search in.
exts (tuple): a tuple of extensions to search for.
Returns:
a list of matching file paths.
"""
return [os.path.join(dirpath, name)
for dirpath, dirnames, filenames in os.walk(top)
for name in filenames
if name.endswith(exts)]
| 24,062
|
def add_hp_label(merged_annotations_column, label_type):
"""Adds prefix to annotation labels that identify the annotation as
belonging to the provided label_type (e.g. 'h@' for host proteins).
Parameters
----------
merged_annotations_column : array-like (pandas Series))
An array containing sets of annotations that need to be labeled.
e.g.
0 {GO:0010008, GO:0070062, IPR036865, GO:0048471...
1 {GO:0006351, GO:0070062, GO:0007623, GO:004851...
2 {GO:0019888, GO:0006470, GO:0001754, GO:009024...
label_type : str
The prefix to be appended (without the "@" separator).
Returns
-------
labeled_annotations : array-like (pandas Series)
A new pandas Series where all annotations have received a prefix.
"""
labeled_annotations = merged_annotations_column.map(
lambda x: set([label_type + '@' + i for i in x]))
return labeled_annotations
| 24,063
|
def decal_component_test():
"""
Basic test for the Decal(Atom) component attached to an entity.
"""
# Create child entity 'decal_1' under Default entity and add decal component to it
component_name = "Decal (Atom)"
search_filter = azlmbr.entity.SearchFilter()
search_filter.names = ['default_level']
default_level_id = azlmbr.entity.SearchBus(azlmbr.bus.Broadcast, 'SearchEntities', search_filter)[0]
decal_1_entity_name = "decal_1"
decal_1 = hydra.Entity(decal_1_entity_name)
decal_1.create_entity(math.Vector3(3.0, 0.0, 1.0), components=[component_name], parent_id=default_level_id)
# Set the Material Property in decal component to "airship_symbol_decal.material" and take screenshot
asset_value = hydra.get_asset_by_path(
os.path.join("Materials", "decal", "airship_symbol_decal.azmaterial")
)
hydra.get_set_test(decal_1, 0, "Controller|Configuration|Material", asset_value)
general.idle_wait(1.0)
hydra.take_screenshot_game_mode("Decal_1", decal_1_entity_name)
# Change the Uniform scale value in Transform component to: 3.0 and take screenshot
azlmbr.components.TransformBus(azlmbr.bus.Event, "SetLocalUniformScale", decal_1.id, 3.0)
general.idle_wait(1.0)
hydra.take_screenshot_game_mode("Decal_2", decal_1_entity_name)
# Set the Attenuation Angle to: 0.75 in Decal component and take screenshot
hydra.get_set_test(decal_1, 0, "Controller|Configuration|Attenuation Angle", 0.75)
general.idle_wait(1.0)
hydra.take_screenshot_game_mode("Decal_3", decal_1_entity_name)
# Set the Set Opacity to: 0.03 in Decal component and take screenshot
hydra.get_set_test(decal_1, 0, "Controller|Configuration|Opacity", 0.03)
general.idle_wait(1.0)
hydra.take_screenshot_game_mode("Decal_4", decal_1_entity_name)
# Set Opacity back to 1.0
hydra.get_set_test(decal_1, 0, "Controller|Configuration|Opacity", 1.0)
# Create another child entity 'decal_2' under Default entity and add decal component to it
decal_2_entity_name = "decal_2"
decal_2 = hydra.Entity(decal_2_entity_name)
decal_2.create_entity(math.Vector3(5.0, 0.0, 0.5), components=[component_name], parent_id=default_level_id)
# Set the material value to "valenaactor_helmetmat.material", Sort Key value to: 0 and take screenshot
asset_value = hydra.get_asset_by_path(
os.path.join("Valena", "valenaactor_helmetmat.azmaterial")
)
hydra.get_set_test(decal_2, 0, "Controller|Configuration|Material", asset_value)
hydra.get_set_test(decal_2, 0, "Controller|Configuration|Sort Key", 0.0)
general.idle_wait(1.0)
hydra.take_screenshot_game_mode("Decal_5", decal_2_entity_name)
# Set the Sort Key value of decal_2 to: 50 and take screenshot
hydra.get_set_test(decal_2, 0, "Controller|Configuration|Sort Key", 50.0)
general.idle_wait(1.0)
hydra.take_screenshot_game_mode("Decal_6", decal_2_entity_name)
| 24,064
|
def JsonObj(data):
""" Returns json object from data
"""
try:
if sys.version >= '3.0':
return json.loads(str(data))
else:
return compat_json(json.loads(str(data), object_hook=compat_json),
ignore_dicts=True)
except Exception as e: # noqa FIXME(sneak)
try:
return data.__str__()
except: # noqa FIXME(sneak)
raise ValueError('JsonObj could not parse %s:\n%s' %
(type(data).__name__, data.__class__))
| 24,065
|
def _serialize_value(
target_expr: str, value_expr: str, a_type: mapry.Type,
auto_id: mapry.py.generate.AutoID, py: mapry.Py) -> str:
"""
Generate the code to serialize a value.
The code serializes the ``value_expr`` into the ``target_expr``.
:param target_expr: Python expression of the JSONable to be set
:param value_expr: Python expression of the value to be serialized
:param a_type: the mapry type of the value
:param auto_id: generator of unique identifiers
:param py: Python settings
:return: generated serialization code
"""
result = ''
serialization_expr = _serialization_expr(
value_expr=value_expr, a_type=a_type, py=py)
if serialization_expr is not None:
result = '{} = {}'.format(target_expr, serialization_expr)
elif isinstance(a_type, mapry.Array):
result = _serialize_array(
target_expr=target_expr,
value_expr=value_expr,
a_type=a_type,
auto_id=auto_id,
py=py)
elif isinstance(a_type, mapry.Map):
result = _serialize_map(
target_expr=target_expr,
value_expr=value_expr,
a_type=a_type,
auto_id=auto_id,
py=py)
else:
raise NotImplementedError(
"Unhandled serialization of type: {}".format(a_type))
return result
| 24,066
|
def return_union_close():
"""union of statements, close statement"""
return " return __result"
| 24,067
|
def parse_unchanged(value: Union[str, List[str]]) -> Tuple[bool, Union[str, List[str]]]:
"""Determine if a value is 'unchanged'.
Args:
value: value supplied by user
"""
unchanges = [
SETTING_UNCHANGED,
str(SETTING_UNCHANGED),
SETTING_UNCHANGED[0],
str(SETTING_UNCHANGED[0]),
]
if value in unchanges:
return True, SETTING_UNCHANGED
return False, value
| 24,068
|
def get_val(tup):
"""Get the value from an index-value pair"""
return tup[1]
| 24,069
|
def get_slurm_job_nodes():
"""Query the SLURM job environment for the number of nodes"""
nodes = os.environ.get('SLURM_JOB_NUM_NODES')
if nodes is None:
nodes = os.environ.get('SLURM_NNODES')
if nodes:
return int(nodes)
print("Warning: could not determine the number of nodes in this SLURM job (%d). Only using 1" % (get_job_id()))
return 1
| 24,070
|
def log(level: str, *messages: str) -> None:
"""Log a message to ``qualichat`` logger.
Parameters
----------
level: :class:`str`
The logger level to send.
*args: :class:`str`
The log messages to send.
"""
for message in messages:
getattr(logger, level)(message)
| 24,071
|
def headers(**kwargs):
"""
Отправка нескольких заголовков
"""
for name, value in kwargs:
header(name, value)
print()
| 24,072
|
def getresuid(*args, **kwargs): # real signature unknown
""" Return a tuple of the current process's real, effective, and saved user ids. """
pass
| 24,073
|
def insert_item(store: dict, cache: dict):
"""
Function Name: insert_item
Input:
:param dict store: Categories created by load_categories().
:param dict cache: The cache of the queries.
Output: None
Function Operation: The function gets a list of categories from the admin as well as an item.
The function adds the item to the categories specified.
"""
# Getting the query.
query = input()
# Splitting query bu colon.
expected_number_of_colons = 1
query = query.split(":", expected_number_of_colons)
# Validating the there is enough data.
assert len(query) == expected_number_of_colons + 1, "Error: not enough data."
# Getting the requested categories, item and price.
requested_categories, item_and_price = query
requested_categories = [category.lstrip() for category in requested_categories.split(",")]
expected_number_of_commas = 1
# Validating that the price exists.
item_and_price = item_and_price.split(',', expected_number_of_commas)
assert len(item_and_price) == expected_number_of_commas + 1, "Error: not enough data."
# Getting the item and price.
item, price = map(str.lstrip, item_and_price)
# Making sure all categories exist.
assert set(requested_categories).issubset(store.keys()), "Error: one of the categories does not exist."
# Checking if price is legal (positive integer).
min_price = 0
assert price.isdigit() and int(price) > min_price, "Error: price is not a positive integer."
# Getting the list of categories the item is in.
matching_categories = [category for category in store if item in store[category]]
if matching_categories:
# Updating the item.
for category in matching_categories:
store[category][item] = price
else:
# Adding the item to the store.
for category in requested_categories:
store[category][item] = price
print(f"Item \"{item}\" added.")
# Clearing the cache.
cache.clear()
| 24,074
|
def repoinit(testconfig, profiler=None):
"""Determines revision and sets up the repo. If given the profiler optional
argument, wil init the profiler repo instead of the default one."""
revision = ''
#Update the repo
if profiler == "gnu-profiler":
if testconfig.repo_prof is not None:
os.chdir(testconfig.repo_prof)
else:
raise ValueError('Profiling repo is not defined')
elif profiler == "google-profiler":
if testconfig.repo_gprof is not None:
os.chdir(testconfig.repo_gprof)
else:
raise ValueError('Profiling repo is not defined')
else:
os.chdir(testconfig.repo)
#Checkout specific branch, else maintain main branch
if testconfig.branch != 'master':
subprocess.call(['git', 'checkout', testconfig.branch])
rev, _ = subprocess.Popen(['git', 'rev-parse', 'HEAD'],\
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
revision = str(rev).replace("\\n'", '').replace("b'", '')
else:
subprocess.call(['git checkout master'], shell=True)
#Check a specific revision. Else checkout master.
if testconfig.revision:
subprocess.call(['git', 'checkout', testconfig.revision])
revision = testconfig.revision
elif testconfig.branch == 'master':
subprocess.call(['git pull'], shell=True)
rev, _ = subprocess.Popen(['git rev-parse HEAD'], stdout=subprocess.PIPE,\
stderr=subprocess.PIPE, shell=True).communicate()
revision = str(rev).replace("\\n'", '').replace("b'", '')
return revision
| 24,075
|
def test_export_formatted_data(
ref_tree,
res_tree_equal,
ref_csv,
res_csv_equal,
tmp_conftest,
pytester,
do_export,
export_suffix,
registry_reseter,
):
"""Test that the formatted files are properly exported."""
args = []
if not do_export:
expected_dir = """res_path.with_name(res_path.name + "_FORMATTED")"""
tester = f"""assert not {expected_dir}.exists()"""
else:
if export_suffix is None:
suffix = "_FORMATTED"
else:
suffix = export_suffix
expected_dir = f"""res_path.with_name(res_path.name + "{suffix}")"""
args.append("--dcd-export-formatted-data")
if export_suffix is not None:
args.append("--dcd-export-suffix")
args.append(export_suffix)
tester = """assert list(expected_dir.iterdir()) == [expected_dir / "file.csv"]"""
expected_dir_str = f"""expected_dir = {expected_dir}"""
remover = """rmtree(expected_dir, ignore_errors=True)"""
# create a temporary conftest.py file
pytester.makeconftest(tmp_conftest)
# create a temporary pytest test file
pytester.makepyfile(
f"""
from shutil import rmtree
import dir_content_diff
import dir_content_diff.pandas
from dir_content_diff import assert_equal_trees
dir_content_diff.reset_comparators()
dir_content_diff.pandas.register()
def test_export_formatted_data_default(ref_path, res_path):
{expected_dir_str}
{remover}
assert_equal_trees(ref_path, res_path)
{tester}
def test_export_formatted_data_no_suffix(ref_path, res_path):
expected_dir = res_path.with_name(res_path.name + "_FORMATTED")
rmtree(expected_dir, ignore_errors=True)
assert_equal_trees(ref_path, res_path, export_formatted_files=True)
assert list(expected_dir.iterdir()) == [expected_dir / "file.csv"]
def test_export_formatted_data_suffix(ref_path, res_path):
expected_dir = res_path.with_name(res_path.name + "_NEW_SUFFIX")
rmtree(expected_dir, ignore_errors=True)
assert_equal_trees(ref_path, res_path, export_formatted_files="_NEW_SUFFIX")
assert list(expected_dir.iterdir()) == [expected_dir / "file.csv"]
"""
)
# run all tests with pytest
result = pytester.runpytest(*args)
# check that all 3 tests passed
result.assert_outcomes(passed=3)
| 24,076
|
def test_nested_evented_model_serialization():
"""Test that encoders on nested sub-models can be used by top model."""
class NestedModel(EventedModel):
obj: MyObj
class Model(EventedModel):
nest: NestedModel
m = Model(nest={'obj': {"a": 1, "b": "hi"}})
raw = m.json()
assert raw == r'{"nest": {"obj": {"a": 1, "b": "hi"}}}'
deserialized = Model.parse_raw(raw)
assert deserialized == m
| 24,077
|
def lnprior_d(d,L=default_L):
""" Expotentially declining prior. d, L in kpc (default L=0.5) """
if d < 0: return -np.inf
return -np.log(2) - 3*np.log(L) + 2*np.log(d) - d/L
| 24,078
|
def test_isomorphism_known_outcome(node_data1, edges1, node_data2, edges2, expected):
"""
Tests for the function ``isomorphism``. May give a false failure (!) if
a different (but still correct) answer is returned due to an implementation
change.
"""
reference = basic_molecule(node_data1, edges1)
graph = basic_molecule(node_data2, edges2)
ism = vermouth.ismags.ISMAGS(reference, graph, node_match=nx.isomorphism.categorical_node_match('element', None))
found = list(ism.subgraph_isomorphisms_iter())
found = make_into_set(found)
expected = make_into_set(expected)
assert found == expected
| 24,079
|
def textile(text, **args):
"""This is Textile.
Generates XHTML from a simple markup developed by Dean Allen.
This function should be called like this:
textile(text, head_offset=0, validate=0, sanitize=0,
encoding='latin-1', output='ASCII')
"""
return Textiler(text).process(**args)
| 24,080
|
def binary_info_gain(df, feature, y):
"""
:param df: input dataframe
:param feature: column to investigate
:param y: column to predict
:return: information gain from binary feature column
"""
return float(sum(np.logical_and(df[feature], df[y])))/len(df[feature])
| 24,081
|
def draw_shapes(shapes):
"""
Renders the shapes to a separate display surface and then blits them to the main screen too
:arg
shapes: list of shapes classes to render
"""
for i in range(0, len(shapes)):
shapes[i].draw_shape()
main_screen.blit(image_screen, (0, 0))
| 24,082
|
def load_inputs(mod, switch_data, inputs_dir):
"""
The cost penalty of unserved load in units of $/MWh is the only parameter
that can be inputted. The following file is not mandatory, because the
parameter defaults to a value of 500 $/MWh.
optional input files:
lost_load_cost.dat
unserved_load_penalty
"""
lost_load_path = os.path.join(inputs_dir, 'lost_load_cost.dat')
if os.path.isfile(lost_load_path):
switch_data.load(filename=lost_load_path)
| 24,083
|
def get_all_instances(region):
"""
Returns a list of all the type of instances, and their instances, managed
by the scheduler
"""
ec2 = boto3.resource('ec2', region_name=region)
rds = boto3.client('rds', region_name=region)
return {
'EC2': [EC2Schedulable(ec2, i) for i in ec2.instances.all()],
'RDS': [RDSSchedulable(rds, i) for i in rds.describe_db_instances()['DBInstances']]
}
| 24,084
|
def futures_navigating(urls: list, amap: bool = True) -> dict:
"""
异步 基于 drive url list 通过请求高德接口 获得 路径规划结果
:param urls:
:param amap: 开关
:return:
"""
data_collections = [None] * len(urls)
pack_data_result = {}
all_tasks = []
# 准备
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
try:
event_loop = asyncio.get_event_loop()
except Exception as _:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
event_loop = asyncio.get_event_loop()
# 线程池
# for idx in range(len(urls)):
# all_tasks.append(event_loop.run_in_executor(register.pool, request_navigating, urls[idx], idx, data_collections))
# event_loop.run_until_complete(asyncio.wait(all_tasks))
# 异步io
if amap:
event_loop.run_until_complete(async_request_navigating(urls, data_collections))
# 获取结果,只获取 ['route']['paths'][0] ,也即只获取第一种策略的数据
for idx in range(len(urls)):
# 如果新url请求失败
if not data_collections[idx]:
if amap:
register.logger.error(f"futures_navigating request failed,new url:{urls[idx]},url_idx:{idx}")
data_collections[idx] = default_data_with_navigating_url(urls[idx], idx, data_collections)
api_data_result = data_collections[idx]
if not pack_data_result:
pack_data_result = api_data_result
pack_data_result['route']['paths'] = [pack_data_result['route']['paths'][0]]
else:
pack_data_result['route']['destination'] = api_data_result['route']['destination']
pack_data_result['route']['taxi_cost'] = str(
float(pack_data_result['route']['taxi_cost']) + float(api_data_result['route']['taxi_cost']))
pack_data_result['route']['paths'][0]['distance'] = str(
float(pack_data_result['route']['paths'][0]['distance']) + float(api_data_result['route']['paths'][0]['distance']))
pack_data_result['route']['paths'][0]['duration'] = str(
float(pack_data_result['route']['paths'][0]['duration']) + float(api_data_result['route']['paths'][0]['duration']))
pack_data_result['route']['paths'][0]['tolls'] = str(
float(pack_data_result['route']['paths'][0]['tolls']) + float(api_data_result['route']['paths'][0]['tolls']))
pack_data_result['route']['paths'][0]['toll_distance'] = str(
float(pack_data_result['route']['paths'][0]['toll_distance']) + float(
api_data_result['route']['paths'][0]['toll_distance']))
pack_data_result['route']['paths'][0]['steps'].extend(api_data_result['route']['paths'][0]['steps'])
return pack_data_result
| 24,085
|
def getdim(s):
"""If s is a representation of a vector, return the dimension."""
if len(s) > 4 and s[0] == "[" and s[-1] == "]":
return len(splitargs(s[1:-1], ["(", "["], [")", "]"]))
else:
return 0
| 24,086
|
def unstruct2grid(coordinates,
quantity,
cellsize,
k_nearest_neighbors=3,
boundary=None,
crop=True):
"""Convert unstructured model outputs into gridded arrays.
Interpolates model variables (e.g. depth, velocity) from an
unstructured grid onto a Cartesian grid using inverse-distance-weighted
interpolation. Assumes projected (i.e. "flat") geographic coordinates.
Accepts coordinates in meters or decimal degrees. Extent of output
rasters are based on extent of coordinates.
(Function modeled after ANUGA plot_utils code)
**Inputs** :
coordinates : `list`
List [] of (x,y) pairs or tuples of coordinates at which the
interpolation quantities are located (e.g. centroids or vertices
of an unstructured hydrodynamic model).
quantity : `list`
List [] of data to be interpolated with indices matching
each (x,y) location given in coordinates. If quantity is
depth, list would be formatted as [d1, d2, ... , dn].
cellsize : `float or int`
Length along one square cell face.
k_nearest_neighbors : `int`, optional
Number of nearest neighbors to use in the interpolation.
If k>1, inverse-distance-weighted interpolation is used.
boundary : `list`, optional
List [] of (x,y) coordinates used to delineate the boundary of
interpolation. Points outside the polygon will be assigned as nan.
Format needs to match requirements of matplotlib.path.Path()
crop : `bool`, optional
If a boundary is specified, setting crop to True will eliminate
any all-NaN borders from the interpolated rasters.
**Outputs** :
interp_func : `function`
Nearest-neighbor interpolation function for gridding additional
quantities. Quicker to use this output function on additional
variables (e.g. later time-steps of an unsteady model) than
to make additional function calls to unstruct2grid. Function
assumes data have the same coordinates. It is used as follows:
"new_gridded_quantity = interp_func(new_quantity)".
gridded_quantity : `numpy.ndarray`
Array of quantity after interpolation.
"""
import matplotlib
import scipy
from scipy import interpolate
cellsize = float(cellsize)
# Make sure all input values are floats
x = [float(i) for i, j in coordinates]
y = [float(j) for i, j in coordinates]
quantity = np.array([float(i) for i in quantity])
if len(quantity) != len(x):
raise ValueError("Coordinate and quantity arrays must be equal length")
# Get some dimensions and make x,y grid
nx = int(np.ceil((max(x)-min(x))/cellsize)+1)
xvect = np.linspace(min(x), min(x)+cellsize*(nx-1), nx)
ny = int(np.ceil((max(y)-min(y))/cellsize)+1)
yvect = np.linspace(min(y), min(y)+cellsize*(ny-1), ny)
gridX, gridY = np.meshgrid(xvect, yvect)
inputXY = np.array([x[:], y[:]]).transpose()
gridXY_array = np.array([np.concatenate(gridX),
np.concatenate(gridY)]).transpose()
gridXY_array = np.ascontiguousarray(gridXY_array)
# If a boundary has been specified, create array to index outside it
if boundary is not None:
path = matplotlib.path.Path(boundary)
outside = ~path.contains_points(gridXY_array)
# Create Interpolation function
if k_nearest_neighbors == 1: # Only use nearest neighbor
index_qFun = interpolate.NearestNDInterpolator(inputXY,
np.arange(len(x), dtype='int64').transpose())
gridqInd = index_qFun(gridXY_array)
# Function to do the interpolation
def interp_func(data):
if isinstance(data, list):
data = np.array(data)
gridded_data = data[gridqInd].astype(float)
if boundary is not None:
gridded_data[outside] = np.nan # Crop to bounds
gridded_data.shape = (len(yvect), len(xvect))
gridded_data = np.flipud(gridded_data)
if boundary is not None and crop is True:
mask = ~np.isnan(gridded_data) # Delete all-nan border
gridded_data = gridded_data[np.ix_(mask.any(1),
mask.any(0))]
return gridded_data
else:
# Inverse-distance interpolation
index_qFun = scipy.spatial.cKDTree(inputXY)
NNInfo = index_qFun.query(gridXY_array, k=k_nearest_neighbors)
# Weights for interpolation
nn_wts = 1./(NNInfo[0]+1.0e-100)
nn_inds = NNInfo[1]
def interp_func(data):
if isinstance(data, list):
data = np.array(data)
denom = 0.
num = 0.
for i in list(range(k_nearest_neighbors)):
denom += nn_wts[:, i]
num += data[nn_inds[:, i]].astype(float)*nn_wts[:, i]
gridded_data = (num/denom)
if boundary is not None:
gridded_data[outside] = np.nan # Crop to bounds
gridded_data.shape = (len(yvect), len(xvect))
gridded_data = np.flipud(gridded_data)
if boundary is not None and crop is True:
mask = ~np.isnan(gridded_data) # Delete all-nan border
gridded_data = gridded_data[np.ix_(mask.any(1),
mask.any(0))]
return gridded_data
# Finally, call the interpolation function to create array:
gridded_quantity = interp_func(quantity)
return interp_func, gridded_quantity
| 24,087
|
def users_bulk_update(file, set_fields, jump_to_index, jump_to_user, limit,
workers):
"""
Bulk-update users from a CSV or Excel (.xlsx) file
The CSV file *must* contain a "profile.login" OR an "id" column.
All columns which do not contain a dot (".") are ignored. You can only
update fields of sub-structures, not top level fields in okta (e.g. you
*can* update "profile.site", but you *cannot* update "id").
"""
def excel_reader():
wb = load_workbook(filename=file)
rows = wb.active.rows
# Get the header values as keys and move the iterator to the next item
keys = [c.value for c in next(rows)]
num_keys = len(keys)
for row in rows:
values = [c.value for c in row]
rv = dict(zip(keys, values[:num_keys]))
if any(rv.values()):
yield rv
def csv_reader():
with open(file, "r", encoding="utf-8") as infile:
dialect = csv.Sniffer().sniff(infile.read(4096))
infile.seek(0)
dr = csv.DictReader(infile, dialect=dialect)
for row in dr:
if any(row.values()):
yield row
def file_reader():
dr = excel_reader() \
if splitext(file)[1].lower() == ".xlsx" else csv_reader()
if jump_to_user:
tmp = next(dr)
while jump_to_user not in (
tmp.get("profile.login", ""), tmp.get("id", "")):
tmp = next(dr)
elif jump_to_index:
# prevent both being used at the same time :)
for _ in range(jump_to_index):
next(dr)
_cnt = 0
for row in dr:
if limit and _cnt == limit:
break
yield row
_cnt += 1
def update_user_parallel(_row, index):
user_id = None
# this is a closure, let's use the outer scope's variables
# Set preference to "id" first
for field in ("id", "profile.login"):
if field in _row and user_id is None:
user_id = _row.pop(field)
# you can't set top-level fields. pop all of them.
_row = {k: v for k, v in _row.items() if k.find(".") > -1}
# fields_dict - from outer scope.
final_dict = _dict_flat_to_nested(_row, defaults=fields_dict)
# user_id check
if user_id is None:
upd_err.append((
index + jump_to_index,
final_dict,
"missing user_id column (id or profile.login)"
))
return
try:
upd_ok.append(okta_manager.update_user(user_id, final_dict))
except RequestsHTTPError as e:
upd_err.append((index + jump_to_index, final_dict, str(e)))
print("Bulk update might take a while. Please be patient.", flush=True)
upd_ok = []
upd_err = []
fields_dict = {k: v for k, v in map(lambda x: x.split("="), set_fields)}
dr = file_reader()
with ThreadPoolExecutor(max_workers=workers) as ex:
runs = {idx: ex.submit(update_user_parallel, row, idx)
for idx, row in enumerate(dr)}
for job in as_completed(runs.values()):
pass
print(f"{len(runs)} - done.", file=sys.stderr)
tmp = {"ok": upd_ok, "errors": upd_err}
timestamp_str = dt.now().strftime("%Y%m%d_%H%M%S")
rv = ""
for name, results in tmp.items():
if len(results):
file_name = f"okta-bulk-update-{timestamp_str}-{name}.json"
with open(file_name, "w") as outfile:
outfile.write(json.dumps(results, indent=2, sort_keys=True))
rv += f"{len(results):>4} {name:6} - {file_name}\n"
else:
rv += f"{len(results):>4} {name:6}\n"
return rv + f"{len(upd_ok) + len(upd_err)} total"
| 24,088
|
def numToString(num: int) -> str:
"""Write a number in base 36 and return it as a string
:param num: number to encode
:return: number encoded as a base-36 string
"""
base36 = ''
while num:
num, i = divmod(num, 36)
base36 = BASE36_ALPHABET[i] + base36
return base36 or BASE36_ALPHABET[0]
| 24,089
|
def real_check(*args, stacklevel=2):
"""Check if arguments are *real numbers* (``int`` and/or ``float``).
Args:
*args: Arguments to check.
stacklevel (int): Stack level to fetch originated function name.
Raises:
RealError: If any of the arguments is **NOT** *real number* (``int`` and/or ``float``).
"""
for var in args:
if not isinstance(var, numbers.Real):
name = type(var).__name__
func = inspect.stack()[stacklevel][3]
raise RealError(f'Function {func} expected real number, {name} got instead.')
| 24,090
|
def cir(request):
"""
Return current cirplot
"""
config={
"markerSizeFactor": float(request.GET.get('marker-size-factor', 1)),
"markerColorMap": request.GET.get('marker-color-map', 'winter'),
"xAxisLabels": [
(math.radians(10), 'Kontra'),
(math.radians(90), 'Unentschieden'),
(math.radians(170), 'Pro')
]
}
data = get_data(request)
compass = Compass(data=data, config=config)
xml = compass.plotAsXml()
# XML POST-MODIFICATIONS
# following nodes are renderes as last (for on-top/vertical ordering.)
selectedNodeIds = [60]
selectedNodeEls = []
# set 100% size
root = ET.fromstring(xml) # Convert to XML Element Templat
root.attrib['width'] = '100%'
root.attrib.pop('height')
# Add interactivity to dot-nodes
# TODO: do more efficient xpath...
scatgrid = root.find('.//*[@id="scatgrid"]')
if scatgrid:
nodes = scatgrid.findall('.//*[@clip-path]/*')
for i in range(len(nodes)):
node = nodes[i]
node.attrib['id'] = "dot%s" % i # Temporary
node.attrib['value'] = "%s" % round(compass.dots[i].value) # Temporary
node.attrib['pos'] = "%s" % i # Original Position in List (used for z-index reordering)
node.attrib['onclick'] = "dmclick(this, %s);" % i
node.attrib['onmouseover'] = "dmover(this, %s);" % i
if i in selectedNodeIds:
selectedNodeEls.append(node)
for sel in selectedNodeEls:
g = sel.getparent()
scatgrid.append(g)
# test_list.insert(0, test_list.pop())
pass
# Ad new element
# ET.SubElement(root,"use", id='placeholder')
# Append Background to XML Image
# z = compass.config['zoomFactor']/2
# x, y, r = compass._matplotlib_get_polar_chart_position()
# bgEl = ET.fromstring("""<g id="bgpattern">
# <defs>
# <path id="meab67247b1" d="M 0 7.284288 C 1.931816 7.284288 3.784769 6.516769 5.150769 5.150769 C 6.516769 3.784769 7.284288 1.931816 7.284288 0 C 7.284288 -1.931816 6.516769 -3.784769 5.150769 -5.150769 C 3.784769 -6.516769 1.931816 -7.284288 0 -7.284288 C -1.931816 -7.284288 -3.784769 -6.516769 -5.150769 -5.150769 C -6.516769 -3.784769 -7.284288 -1.931816 -7.284288 0 C -7.284288 1.931816 -6.516769 3.784769 -5.150769 5.150769 C -3.784769 6.516769 -1.931816 7.284288 0 7.284288 z " style="stroke: #1f77b4; stroke-opacity: 0.75"/>
# <linearGradient id="myGradient" >
# <stop offset="0%%" stop-color="gold" />
# <stop offset="100%%" stop-color="blue" />
# </linearGradient>
# </defs>
# <circle cx="%s" cy="%s" r="%s" fill="url('#myGradient')" />
# </g>""" % (x*z, y*z, r*z))
# axes1El = root.find('.//*[@id="axes_1"]')
# axes1El.insert(1, bgEl)
# export XML
content = ET.tostring(root, xml_declaration=True, encoding="UTF-8")
return content.decode("utf-8")
| 24,091
|
def parse_enumeration(enumeration_bytes):
"""Parse enumeration_bytes into a list of test_ids."""
# If subunit v2 is available, use it.
if bytestream_to_streamresult is not None:
return _v2(enumeration_bytes)
else:
return _v1(enumeration_bytes)
| 24,092
|
def freeze_blobs(input_file, output_file):
"""Write the frozen blobs to the output file"""
blob_lines = []
for line in input_file:
if line.startswith('//!'):
output_file.write(b'const char '
+ bytearray(line[4:].strip(), 'ascii')
+ b'[] = {\n')
xxd = Popen(['xxd', '-i'], stdout=PIPE, stdin=PIPE, stderr=PIPE)
output_file.write(xxd.communicate(input=bytearray(
''.join(blob_lines), 'ascii'))[0])
output_file.write(b'};\n\n')
blob_lines = []
else:
blob_lines.append(line)
| 24,093
|
def register_user(username, passwd, email): # type: (str, str, str) -> Optional[str]
"""Returns an error message or None on success."""
if passwd == "":
return "The password can't be empty!"
if email: # validate the email only if it is provided
result = validate_email_address(email)
if result:
return result
username = username.strip()
if not re.match(config.get('nick_regex'), username):
return "Invalid username!"
crypted_pw = encrypt_pw(passwd)
with crawl_db(config.get('password_db')) as db:
db.c.execute("select username from dglusers where username=? collate nocase",
(username,))
result = db.c.fetchone()
if result:
return "User already exists!"
with crawl_db(config.get('password_db')) as db:
query = """
INSERT INTO dglusers
(username, email, password, flags, env)
VALUES
(?, ?, ?, 0, '')
"""
db.c.execute(query, (username, email, crypted_pw))
db.conn.commit()
return None
| 24,094
|
def _request_helper_chunked(rxgobj, tid):
"""Private helper to get requests with chunks.
Potentially multiple threads will execute this, each with a unique tid."""
thisrequest = rxgobj.get_next_xorrequest(tid)
#the socket is fixed for each thread, so we only need to do this once
socket = thisrequest[0]['sock']
rqtype = thisrequest[3] #the request type is also fixed
# go until there are no more requests
while thisrequest != ():
chunks = thisrequest[2]
try:
# request the XOR block...
if rqtype == 1: # chunks and seed expansion
lib.request_xorblock_chunked_rng(socket, chunks)
elif rqtype == 2: # chunks, seed expansion and parallel
lib.request_xorblock_chunked_rng_parallel(socket, chunks)
else: # only chunks (redundancy)
lib.request_xorblock_chunked(socket, chunks)
except Exception as e:
if 'socked' in str(e):
rxgobj.notify_failure(thisrequest)
sys.stdout.write('F')
sys.stdout.flush()
else:
# otherwise, re-raise...
raise
# regardless of failure or success, get another request...
thisrequest = rxgobj.get_next_xorrequest(tid)
# and that's it!
return
| 24,095
|
def refresh_kubeconfig(ctx,
**_):
"""
Refresh access token in kubeconfig for cloudify.nodes.aws.eks.Cluster
target node type.
"""
if utils.is_node_type(ctx.target.node,
CLUSTER_TYPE):
resource_config = ctx.target.instance.runtime_properties.get(
'resource_config',
{})
iface = EKSCluster(ctx.target.node,
logger=ctx.logger,
resource_id=utils.get_resource_id(
node=ctx.target.node,
instance=ctx.target.instance,
raise_on_missing=True))
if ctx.target.node.properties['store_kube_config_in_runtime']:
_store_kubeconfig_in_runtime_properties(
node=ctx.target.node,
instance=ctx.target.instance,
iface=iface,
params=resource_config)
| 24,096
|
def _train_n_hmm(data: _Array, m_states: int, n_trails: int):
"""Trains ``n_trails`` HMMs each initialized with a random tpm.
Args:
data: Possibly unporcessed input data set.
m_states: Number of states.
n_trails: Number of trails.
Returns:
Best model regarding to log-likelihood.
"""
feat = data.round().astype(int)
trails = []
for i in range(n_trails):
hmm = PoissonHmm(feat, m_states, init_gamma='softmax')
hmm.fit(feat)
if hmm.success:
trails.append(hmm)
if len(trails) == 0:
return None
return min(trails, key=lambda hmm: abs(hmm.quality.nll))
| 24,097
|
def get_metric_function(name):
"""
Get a metric from the supported_sklearn_metric_functions dictionary.
Parameters
----------
name : str
The name of the metric to get.
Returns
-------
metric : function
The metric function.
"""
if name in supported_sklearn_metric_functions:
return supported_sklearn_metric_functions[name]
raise ValueError(
"The metric {} is not supported. Supported metrics are: {}".format(
name, list(supported_sklearn_metrics)
)
)
| 24,098
|
def ping(enode, count, destination, interval=None, quiet=False, shell=None):
"""
Perform a ping and parse the result.
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param int count: Number of packets to send.
:param str destination: The destination host.
:param float interval: The wait interval in seconds between each packet.
:param str shell: Shell name to execute commands. If ``None``, use the
Engine Node default shell.
:rtype: dict
:return: The parsed result of the ping command in a dictionary of the form:
::
{
'transmitted': 0,
'received': 0,
'errors': 0,
'loss_pc': 0,
'time_ms': 0
}
"""
assert count > 0
assert destination
addr = ip_address(destination)
cmd = 'ping'
if addr.version == 6:
cmd = 'ping6'
cmd = [cmd, '-c', str(count), destination]
if interval is not None:
assert interval > 0
cmd.append('-i')
cmd.append(str(interval))
if quiet:
cmd.append('-q')
ping_raw = enode(' '.join(cmd), shell=shell)
assert ping_raw
for line in ping_raw.splitlines():
m = match(PING_RE, line)
if m:
return {
k: (int(v) if v is not None else 0)
for k, v in m.groupdict().items()
}
raise Exception('Could not parse ping result')
| 24,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.