content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def notebook(request, id=0):
"""
:param request:
:param id:
:return:
"""
get_notebook = JupyterNotebooks.objects.get(id=id)
return render(request, "customdashboard/notebook.html",
{'get_notebook': get_notebook})
| 12,100
|
def encrypt_decrypt(data_string, password, mode='encrypt'):
"""Encrypts OR Decrypts data_string w.r.t password based on mode specified
Parameters:
data_string: Text that needs to be encoded. passed in string format
password: a string to encrypt data before encoding into an image.
mode:
'encrypt' --> encrypts the data
'decrypt' --> decrypts the data
Returns:
Data string either encrypted or decrypted based on mode specified
"""
_hash = md5(password.encode())
hash_value = _hash.hexdigest()
key = urlsafe_b64encode(hash_value.encode())
cipher = Fernet(key) # 32-byte key - URLsafe - base64-encoded
if mode=='encrypt':
data_bytes = data_string.encode()
encrypted_bytes = cipher.encrypt(data_bytes)
encrypted_data_string = encrypted_bytes.decode()
return encrypted_data_string
elif mode=='decrypt':
encrypted_bytes = data_string.encode()
decrypted_bytes = cipher.decrypt(encrypted_bytes)
decrypted_data_string = decrypted_bytes.decode()
return decrypted_data_string
else:
raise InvalidModeError("Expected 'encrypt' OR 'decrypt' ")
| 12,101
|
def test_atanh():
"""Test atanh function behavior."""
dep_vector = np.array([0.1, 0.98, 0.5])
strict_compare_waves(dep_vector, peng.atanh, "atanh", "", np.arctanh)
| 12,102
|
def compute_alphabet(sequences):
"""
Returns the alphabet used in a set of sequences.
"""
alphabet = set()
for s in sequences:
alphabet = alphabet.union(set(s))
return alphabet
| 12,103
|
def numpy_dtype_arg_type(string: str) -> np.dtype:
"""argument type for string reps of numpy dtypes"""
try:
ret = np.dtype(string)
except TypeError as error:
raise argparse.ArgumentTypeError(error.message)
return ret
| 12,104
|
def Diff(a, b):
"""Returns the number of different elements between 2 interables.
Args:
a(iterable): first iterable.
b(iterable): second iterable.
Returns:
int: the number of different elements.
"""
return sum(map(lambda x, y: bool(x-y), a, b))
| 12,105
|
def create_piechart(AttrJson, FnameImage, Title=None, Yunit="perc", LabelsMap=None, Silent=False):
"""
todo: add cycle-free subset to plot using pairs of similar colours
todo: add unit tests
Creates a pie chart of the basins of attraction specified in *AttrJson*.
Requires that *AttrJson* has been extended with basins information by :ref:`compute_basins`.
Requires https://matplotlib.org.
**arguments**:
* *AttrJson* (dict): json attractor data, see :ref:`attractors_compute_json`
* *FnameImage* (str): create image for pie chart
* *Title* (str): optional title of plot
* *Yunit* (str): "perc" for percentage of state space and "size" for number of states
* *LabelsMap* (function): a map from minimal trap space dictionary of attractor to label str
* *Silent* (bool): print infos to screen
**returns**:
* *None*
**example**::
>>> attrs = Attractors.compute_json(primes, update)
>>> compute_basins(attrs)
>>> create_piechart(attrs, "piechart.pdf")
created piechart.pdf
"""
import matplotlib.pyplot
Primes = AttrJson["primes"]
Attrs = AttrJson["attractors"]
assert(all(basin in x for basin in ["weak_basin", "strong_basin", "cyclefree_basin"] for x in Attrs))
assert(Yunit in ["perc", "size"])
if not Silent: print("Basins.create_piechart(..)")
total = PyBoolNet.StateTransitionGraphs.size_state_space(Primes)
strong = sum(x["strong_basin"]["size"] for x in Attrs)
outside = total - strong
indeces = list(range(len(Attrs)))
indeces.sort(key=lambda i: Attrs[i]["strong_basin"]["perc"], reverse=True)
figure = matplotlib.pyplot.figure()
sizes = [Attrs[i]["strong_basin"]["size"] for i in indeces] + [outside]
if len(Attrs)<=9:
colors = [PIE_COLORS[i] for i,x in enumerate(Attrs)]
else:
colors = [matplotlib.pyplot.cm.rainbow(1.*x/(len(indeces)+1)) for x in range(len(indeces)+2)][1:-1]
colors.append(BASIN_COLORS[0]) # for slice that represents "outside" states
explode = [0]*len(indeces)+[.08]
if not LabelsMap:
labels = [Attrs[i]["mintrapspace"]["str"] for i in indeces] + [""]
else:
labels = [LabelsMap(Attrs[i]["mintrapspace"]["dict"]) for i in indeces] + [""]
autopct = (lambda p: '{:.0f}'.format(p * total / 100)) if Yunit=="size" else "%1.1f%%"
stuff = matplotlib.pyplot.pie(sizes, explode=explode, labels=labels, colors=colors, autopct=autopct, shadow=True, startangle=140)
patches = stuff[0] # required because matplotlib.pyplot.pie returns variable number of things depending on autopct!!
for i, patch in enumerate(patches):
patch.set_ec("black")
matplotlib.pyplot.axis('equal')
if Title==None:
Title = 'Strong Basins of Attraction'
matplotlib.pyplot.title(Title, y=1.08)
matplotlib.pyplot.tight_layout()
figure.savefig(FnameImage, bbox_inches='tight')
matplotlib.pyplot.close(figure)
if not Silent:
print("created %s"%FnameImage)
| 12,106
|
def load_description(model):
"""Load description of the <model>."""
desc = get_available_pkgyaml(model)
entry = read_mlhubyaml(desc)
return entry
| 12,107
|
def timolo():
"""
Main motion and or motion tracking
initialization and logic loop
"""
# Counter for showDots() display if not motion found
# shows system is working
dotCount = 0
checkMediaPaths()
timelapseNumCount = 0
motionNumCount = 0
tlstr = "" # Used to display if timelapse is selected
mostr = "" # Used to display if motion is selected
moCnt = "non"
tlCnt = "non"
daymode = False # Keep track of night and day based on dayPixAve
# Forcing motion if no motion for motionForce time exceeded
forceMotion = False
motionFound = False
takeTimeLapse = True
stopTimeLapse = False
takeMotion = True
stopMotion = False
firstTimeLapse = True
timelapseStart = datetime.datetime.now()
timelapseExitStart = timelapseStart
checkMotionTimer = timelapseStart
startTL = getSchedStart(timelapseStartAt)
startMO = getSchedStart(motionStartAt)
trackLen = 0.0
if spaceTimerHrs > 0:
lastSpaceCheck = datetime.datetime.now()
if timelapseOn:
tlstr = "TimeLapse"
# Check if timelapse subDirs reqd and create one if non exists
tlPath = subDirChecks(timelapseSubDirMaxHours,
timelapseSubDirMaxFiles,
timelapseDir, timelapsePrefix)
if timelapseNumOn:
timelapseNumCount = getCurrentCount(timelapseNumPath,
timelapseNumStart)
tlCnt = str(timelapseNumCount)
else:
logging.warn("Timelapse is Suppressed per timelapseOn=%s",
timelapseOn)
stopTimeLapse = True
logging.info("Start PiVideoStream ....")
vs = PiVideoStream().start()
vs.camera.rotation = imageRotation
vs.camera.hflip = imageHFlip
vs.camera.vflip = imageVFlip
time.sleep(1)
if motionTrackOn:
mostr = "Motion Tracking"
# Check if motion subDirs required and
# create one if required and non exists
moPath = subDirChecks(motionSubDirMaxHours,
motionSubDirMaxFiles,
motionDir,
motionPrefix)
if motionNumOn:
motionNumCount = getCurrentCount(motionNumPath, motionNumStart)
moCnt = str(motionNumCount)
trackTimeout = time.time()
trackTimer = TRACK_TIMEOUT
startPos = []
startTrack = False
image1 = vs.read()
image2 = vs.read()
grayimage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
else:
image2 = vs.read() # use video stream to check for daymode
logging.info("Motion Tracking is Suppressed per variable motionTrackOn=%s",
motionTrackOn)
stopMotion = True
daymode = checkIfDayStream(daymode, image2)
pixAve = getStreamPixAve(image2)
if timelapseOn and motionTrackOn:
tlstr = " and " + tlstr
displayInfo(moCnt, tlCnt) # Display config.py settings
if logDataToFile:
logging.info("logDataToFile=%s Logging to Console Disabled.",
logDataToFile)
logging.info("Sending Console Messages to %s", logFilePath)
logging.info("Entering Loop for %s%s", mostr, tlstr)
else:
if pluginEnable:
logging.info("plugin %s - Start %s%s Loop ...",
pluginName, mostr, tlstr)
else:
logging.info("Start %s%s Loop ... ctrl-c Exits", mostr, tlstr)
if motionTrackOn and not checkSchedStart(startMO):
logging.info('Motion Track: motionStartAt = "%s"', motionStartAt)
logging.info("Motion Track: Sched Start Set For %s Please Wait ...",
startMO)
if timelapseOn and not checkSchedStart(startTL):
logging.info('Timelapse : timelapseStartAt = "%s"', timelapseStartAt)
logging.info("Timelapee : Sched Start Set For %s Please Wait ...",
startTL)
logging.info("daymode=%s motionDotsOn=%s ", daymode, motionDotsOn)
dotCount = showDots(motionDotsMax) # reset motion dots
while True: # Start main program Loop.
motionFound = False
forceMotion = False
if (motionTrackOn and (not motionNumRecycle)
and (motionNumCount > motionNumStart + motionNumMax)
and (not stopMotion)):
logging.warning("motionNumRecycle=%s and motionNumCount %i Exceeds %i",
motionNumRecycle, motionNumCount,
motionNumStart + motionNumMax)
logging.warn("Suppressing Further Motion Tracking")
logging.warn("To Reset: Change %s Settings or Archive Images",
configName)
logging.warn("Then Delete %s and Restart %s \n",
motionNumPath, progName)
takeMotion = False
stopMotion = True
if stopTimeLapse and stopMotion:
logging.warn("NOTICE: Both Motion and Timelapse Disabled")
logging.warn("per Num Recycle=False and "
"Max Counter Reached or timelapseExitSec Settings")
logging.warn("Change %s Settings or Archive/Save Media Then",
configName)
logging.warn("Delete appropriate .dat File(s) to Reset Counter(s)")
logging.warn("Exiting %s %s \n", progName, progVer)
sys.exit(1)
# if required check free disk space and delete older files (jpg)
if spaceTimerHrs > 0:
lastSpaceCheck = freeDiskSpaceCheck(lastSpaceCheck)
# use image2 to check daymode as image1 may be average
# that changes slowly, and image1 may not be updated
if motionTrackOn:
if daymode != checkIfDayStream(daymode, image2):
daymode = not daymode
image2 = vs.read()
image1 = image2
else:
image2 = vs.read()
else:
image2 = vs.read()
# if daymode has changed, reset background
# to avoid false motion trigger
if daymode != checkIfDayStream(daymode, image2):
daymode = not daymode
pixAve = getStreamPixAve(image2)
rightNow = datetime.datetime.now() # refresh rightNow time
if not timeToSleep(daymode):
# Don't take images if noNightShots
# or noDayShots settings are valid
if timelapseOn and checkSchedStart(startTL):
# Check for a scheduled date/time to start timelapse
if firstTimeLapse:
firstTimeLapse = False
takeTimeLapse = True
else:
takeTimeLapse = checkForTimelapse(timelapseStart)
if ((not stopTimeLapse) and takeTimeLapse and
timelapseExitSec > 0):
if ((datetime.datetime.now() -
timelapseExitStart).total_seconds() >
timelapseExitSec):
logging.info("timelapseExitSec=%i Exceeded.",
timelapseExitSec)
logging.info("Suppressing Further Timelapse Images")
logging.info("To RESET: Restart %s to Restart "
"timelapseExitSec Timer. \n", progName)
# Suppress further timelapse images
takeTimeLapse = False
stopTimeLapse = True
if ((not stopTimeLapse) and timelapseNumOn
and (not timelapseNumRecycle)):
if (timelapseNumMax > 0 and
timelapseNumCount > (timelapseNumStart + timelapseNumMax)):
logging.warn("timelapseNumRecycle=%s and Counter=%i Exceeds %i",
timelapseNumRecycle, timelapseNumCount,
timelapseNumStart + timelapseNumMax)
logging.warn("Suppressing Further Timelapse Images")
logging.warn("To RESET: Change %s Settings or Archive Images",
configName)
logging.warn("Then Delete %s and Restart %s \n",
timelapseNumPath, progName)
# Suppress further timelapse images
takeTimeLapse = False
stopTimeLapse = True
if takeTimeLapse and (not stopTimeLapse):
if motionDotsOn and motionTrackOn:
# reset motion dots
dotCount = showDots(motionDotsMax + 2)
else:
print("")
if pluginEnable:
if timelapseExitSec > 0:
exitSecProgress = (datetime.datetime.now() -
timelapseExitStart).total_seconds()
logging.info("%s Sched TimeLapse daymode=%s Timer=%i sec"
" ExitSec=%i/%i Status",
pluginName, daymode, timelapseTimer,
exitSecProgress, timelapseExitSec)
else:
logging.info("%s Sched TimeLapse daymode=%s"
" Timer=%i sec ExitSec=%i 0=Continuous",
pluginName, daymode,
timelapseTimer, timelapseExitSec)
else:
if timelapseExitSec > 0:
exitSecProgress = (datetime.datetime.now() -
timelapseExitStart).total_seconds()
logging.info("Sched TimeLapse daymode=%s Timer=%i sec"
" ExitSec=%i/%i Status",
daymode, timelapseTimer,
exitSecProgress, timelapseExitSec)
else:
logging.info("Sched TimeLapse daymode=%s Timer=%i sec"
" ExitSec=%i 0=Continuous",
daymode, timelapseTimer,
timelapseExitSec)
imagePrefix = timelapsePrefix + imageNamePrefix
filename = getImageName(tlPath, imagePrefix,
timelapseNumOn, timelapseNumCount)
logging.info("Stop PiVideoStream ...")
vs.stop()
time.sleep(motionStreamStopSec)
# reset time lapse timer
timelapseStart = datetime.datetime.now()
if daymode:
takeDayImage(filename, timelapseCamSleep)
else:
takeNightImage(filename, pixAve)
timelapseNumCount = postImageProcessing(timelapseNumOn,
timelapseNumStart,
timelapseNumMax,
timelapseNumCount,
timelapseNumRecycle,
timelapseNumPath,
filename, daymode)
if timelapseRecentMax > 0:
saveRecent(timelapseRecentMax, timelapseRecentDir,
filename, imagePrefix)
if timelapseMaxFiles > 0:
deleteOldFiles(timelapseMaxFiles, timelapseDir,
imagePrefix)
dotCount = showDots(motionDotsMax)
logging.info("Restart PiVideoStream ....")
vs = PiVideoStream().start()
vs.camera.rotation = imageRotation
vs.camera.hflip = imageHFlip
vs.camera.vflip = imageVFlip
time.sleep(1)
tlPath = subDirChecks(timelapseSubDirMaxHours,
timelapseSubDirMaxFiles,
timelapseDir, timelapsePrefix)
if motionTrackOn and checkSchedStart(startMO) and takeMotion and (not stopMotion):
# IMPORTANT - Night motion tracking may not work very well
# due to long exposure times and low light
image2 = vs.read()
grayimage2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
movePoint1 = trackPoint(grayimage1, grayimage2)
grayimage1 = grayimage2
if movePoint1 and not startTrack:
startTrack = True
trackTimeout = time.time()
startPos = movePoint1
image2 = vs.read()
grayimage2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
movePoint2 = trackPoint(grayimage1, grayimage2)
if movePoint2 and startTrack: # Two sets of movement required
trackLen = trackDistance(startPos, movePoint2)
# wait until track well started
if trackLen > TRACK_TRIG_LEN_MIN:
# Reset tracking timer object moved
trackTimeout = time.time()
if motionTrackInfo:
logging.info("Track Progress From(%i,%i) To(%i,%i) trackLen=%i/%i px",
startPos[0], startPos[1],
movePoint2[0], movePoint2[1],
trackLen, TRACK_TRIG_LEN)
# Track length triggered
if trackLen > TRACK_TRIG_LEN:
# reduce chance of two objects at different positions
if trackLen > TRACK_TRIG_LEN_MAX:
motionFound = False
if motionTrackInfo:
logging.info("TrackLen %i px Exceeded %i px Max Trig Len Allowed.",
trackLen, TRACK_TRIG_LEN_MAX)
else:
motionFound = True
if pluginEnable:
logging.info("%s Motion Triggered Start(%i,%i)"
" End(%i,%i) trackLen=%.i/%i px",
pluginName, startPos[0], startPos[1],
movePoint2[0], movePoint2[1],
trackLen, TRACK_TRIG_LEN)
else:
logging.info("Motion Triggered Start(%i,%i)"
" End(%i,%i) trackLen=%i/%i px",
startPos[0], startPos[1],
movePoint2[0], movePoint2[1],
trackLen, TRACK_TRIG_LEN)
image1 = vs.read()
image2 = image1
grayimage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
grayimage2 = grayimage1
startTrack = False
startPos = []
trackLen = 0.0
# Track timed out
if (time.time() - trackTimeout > trackTimer) and startTrack:
image1 = vs.read()
image2 = image1
grayimage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
grayimage2 = grayimage1
if motionTrackInfo:
logging.info("Track Timer %.2f sec Exceeded. Reset Track",
trackTimer)
startTrack = False
startPos = []
trackLen = 0.0
rightNow = datetime.datetime.now()
timeDiff = (rightNow - checkMotionTimer).total_seconds()
if motionForce > 0 and timeDiff > motionForce:
image1 = vs.read()
image2 = image1
grayimage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
grayimage2 = grayimage1
dotCount = showDots(motionDotsMax + 2) # New Line
logging.info("No Motion Detected for %s minutes. "
"Taking Forced Motion Image.",
(motionForce / 60))
checkMotionTimer = rightNow
forceMotion = True
if motionFound or forceMotion:
imagePrefix = motionPrefix + imageNamePrefix
if motionTrackQuickPic: # Do not stop PiVideoStream
filename = getImageName(moPath,
imagePrefix,
motionNumOn,
motionNumCount)
takeTrackQuickPic(image2, filename)
motionNumCount = postImageProcessing(motionNumOn,
motionNumStart,
motionNumMax,
motionNumCount,
motionNumRecycle,
motionNumPath,
filename, daymode)
if motionRecentMax > 0:
saveRecent(motionRecentMax,
motionRecentDir,
filename,
imagePrefix)
else:
if motionTrackOn:
logging.info("Stop PiVideoStream ...")
vs.stop()
time.sleep(motionStreamStopSec)
checkMotionTimer = rightNow
if forceMotion:
forceMotion = False
# check if motion Quick Time Lapse option is On.
# This option supersedes motionVideoOn
if motionQuickTLOn and daymode:
filename = getImageName(moPath,
imagePrefix,
motionNumOn,
motionNumCount)
with picamera.PiCamera() as camera:
camera.resolution = (imageWidth, imageHeight)
camera.vflip = imageVFlip
camera.hflip = imageHFlip
# valid rotation values 0, 90, 180, 270
camera.rotation = imageRotation
time.sleep(motionCamSleep)
# This uses yield to loop through time lapse
# sequence but does not seem to be faster
# due to writing images
camera.capture_sequence(takeQuickTimeLapse(moPath,
imagePrefix,
motionNumOn,
motionNumCount,
daymode,
motionNumPath))
camera.close()
motionNumCount = getCurrentCount(motionNumPath,
motionNumStart)
else:
if motionVideoOn:
filename = getVideoName(motionPath,
imagePrefix,
motionNumOn,
motionNumCount)
takeVideo(filename, motionVideoTimer,
motionVideoFPS)
else:
filename = getImageName(moPath,
imagePrefix,
motionNumOn,
motionNumCount)
if daymode:
takeDayImage(filename, motionCamSleep)
else:
takeNightImage(filename, pixAve)
motionNumCount = postImageProcessing(motionNumOn,
motionNumStart,
motionNumMax,
motionNumCount,
motionNumRecycle,
motionNumPath,
filename,
daymode)
if motionRecentMax > 0:
if not motionVideoOn:
# prevent h264 video files from
# being copied to recent
saveRecent(motionRecentMax,
motionRecentDir,
filename,
imagePrefix)
if motionTrackOn:
logging.info("Restart PiVideoStream ....")
vs = PiVideoStream().start()
vs.camera.rotation = imageRotation
vs.camera.hflip = imageHFlip
vs.camera.vflip = imageVFlip
time.sleep(1)
image1 = vs.read()
image2 = image1
grayimage1 = cv2.cvtColor(image1,
cv2.COLOR_BGR2GRAY)
grayimage2 = grayimage1
trackLen = 0.0
trackTimeout = time.time()
startPos = []
startTrack = False
forceMotion = False
moPath = subDirChecks(motionSubDirMaxHours,
motionSubDirMaxFiles,
motionDir, motionPrefix)
if motionFound and motionCode:
# ===========================================
# Put your user code in userMotionCode() function
# In the File user_motion_code.py
# ===========================================
try:
user_motion_code.userMotionCode(filename)
dotCount = showDots(motionDotsMax)
except ValueError:
logging.error("Problem running userMotionCode function from File %s",
userMotionFilePath)
else:
# show progress dots when no motion found
dotCount = showDots(dotCount)
| 12,108
|
def test_server_cow_sends_400_response():
"""test cow route sends 400"""
response = requests.get('http://127.0.0.1:3000/cow?msg=hello')
assert response.status_code == 400
assert response.text == 'Incorrect format'
| 12,109
|
def show_statistics(simulation_type, results, client_results, client_counters):
"""
Show statistics from queue.
"""
title = '\n\n\n{} Avg Delays for {} simulation {}'.format(
10 * '*', simulation_type, 10 * '*')
print(title)
if simulation_type == SimType.COM_SIM:
avg_delays, p0_x, p0_y, p0l2, p0l4, p0l6 = calculate_delays_pO_values(results)
avg_delay = calculate_avg_delay_wait(avg_delays)
for lambda_rate in avg_delay:
rate = lambda_rate/c.MI_RATE
avg = avg_delay[lambda_rate]
# Caculate analytical delay.
delay_analytical = 1 / (c.MI_RATE - lambda_rate)
stat = '{}. - rate, {:f} - avg, {:f} - avg analytical '.format(
rate, avg, delay_analytical)
print(stat)
# draw plot
x1 = np.array(p0_x[LAMBDA_RATES[0]])
y1 = np.array(p0_y[LAMBDA_RATES[0]])
p1 = np.poly1d(np.polyfit(x1, y1, 3))
x2 = np.array(p0_x[LAMBDA_RATES[1]])
y2 = np.array(p0_y[LAMBDA_RATES[1]])
p2 = np.poly1d(np.polyfit(x2, y2, 3))
x3 = np.array(p0_x[LAMBDA_RATES[2]])
y3 = np.array(p0_y[LAMBDA_RATES[2]])
p3 = np.poly1d(np.polyfit(x3, y3, 3))
t = np.linspace(0, 200, 200)
xp0 = [0, 200]
yp0l2 = [p0l2, p0l2]
yp0l4 = [p0l4, p0l4]
yp0l6 = [p0l6, p0l6]
mpl.plot(t, p1(t), 'g-', t, p2(t), 'b-', t, p3(t), 'r-', xp0, yp0l2, 'g-o', xp0, yp0l4, 'b-o', xp0, yp0l6,
'r-o')
mpl.xlabel('Time')
mpl.ylabel('Probability')
mpl.title("p0(t) for lambda = [2.0, 4.0, 6.0]")
mpl.legend(
['p0(t) for lambda 2.0', 'p0(t) for lambda 4.0', 'p0(t) for lambda 6.0', 'p0 for lambda 2.0',
'p0 for lambda 4.0',
'p0 for lambda 6.0'], prop={'size': 5}, loc='lower right')
elif simulation_type == SimType.CON_SIM:
print()
qu_times, avg_delays = calculate_avg_time_in_queue(results)
avg_delay = calculate_avg_delay_wait(avg_delays)
avg_qu = calculate_avg_delay_wait(qu_times)
cl_stats = calculate_client_stats(client_results)
cl_counter_stats = calculate_client_counters(client_counters)
for lambda_rate in avg_delay:
avg_d = avg_delay[lambda_rate]
avg_q = avg_qu[lambda_rate]
# Calculate analytical delay and wait time.
rate = lambda_rate / c.MI_RATE
delay_analytical = ((2 - rate) * rate) / (lambda_rate * (1 - rate))
wait_analytical = rate / (lambda_rate * (1 - rate))
stat = '\n{}. - rate, {:f} - avg_delay, {:f} - avg_delay_analytical'.format(
rate, avg_d, delay_analytical)
print(stat)
stat = '{:f} - avg_wait, {:f} - avg_wait_analytical'.format(
avg_q, wait_analytical)
print(stat)
# Calculate analytical clients rates.
in_sys, in_q = (
cl_stats[lambda_rate]["in_system"], cl_stats[lambda_rate]["in_queue"])
an_nr_clients_in_queue = rate / (1 - rate)
an_nr_clients_in_system = ((2 - rate) * rate) / (1 - rate)
stat = '{:f} - avg_client_in_system, {:f} - avg_client_in_system_analytical'.format(
in_sys, an_nr_clients_in_system)
print(stat)
stat = '{:f} - avg_client_in_queue, {:f} - avg_client_in_queue_analytical'.format(
in_q, an_nr_clients_in_queue)
print(stat)
# Calculate clients counters.
im_clients = cl_counter_stats[lambda_rate][ClientType.IMAGINED_CLIENT]
real_clients = cl_counter_stats[lambda_rate][ClientType.REAL_CLIENT]
im_propability = im_clients / (im_clients + real_clients)
stat = '{:f} - imagine client propability\n'.format(im_propability)
print(stat)
| 12,110
|
def generate_star(rect: RectType, line_count: int = 20) -> vp.LineCollection:
"""Generate a set of line from a random point."""
orig_x = np.random.uniform(rect[0], rect[0] + rect[2])
orig_y = np.random.uniform(rect[1], rect[1] + rect[3])
r = math.hypot(rect[2], rect[3])
angles = np.linspace(0, 2 * math.pi, num=line_count, endpoint=False)
phase = np.random.normal(0, math.pi / 4)
mls = MultiLineString(
[
([orig_x, orig_y], [orig_x + r * math.cos(a), orig_y + r * math.sin(a)])
for a in angles + phase
]
)
return vp.LineCollection(mls.intersection(rect_to_polygon(rect)))
| 12,111
|
def failure(request):
"""Display failure message"""
return HttpResponse(f"Failure! {request.session['failure message'] if request.session['failure message'] is not None else ''}")
| 12,112
|
def visualize_image(cam, rgb_img, target_category):
"""
Visualize output for given image
"""
input_tensor = preprocess_image(rgb_img)
grayscale_cam = cam(input_tensor=input_tensor, target_category=target_category)
grayscale_cam = grayscale_cam[0, :]
output = cam.activations_and_grads(input_tensor)
softmax = torch.nn.Softmax(dim = 1)
print("PRED: ", softmax(output).tolist())
visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
return visualization
| 12,113
|
def read_csr_matrix(f):
"""Read A in compressed sparse row format from file f. Return dense ndarray.
Text file format:
- one number per line
- First number is m = number of rows
- Second number is n = number of columns
- Next m+1 numbers are the row pointers (int, zero-based)
- Next nnz numbers are the column indices (int, zero-based)
- Next nnz numbers are the matrix values (float)
"""
logging.debug("attempting to read matrix in CSR format...")
# strip comment header
row = f.next().strip()
while row[0] == '#':
row = f.next().strip()
# read shape
m = int(row)
row = f.next().strip()
n = int(row)
logging.debug("attempting to read (%s x %s) matrix in CSR format...", m, n)
# read m+1 row pointers
counter = 0
rowPointers = np.empty(m + 1, dtype=int) # we store nnz in the last place
for i in range(m + 1):
rowPointers[i] = int(f.next().strip())
nnz = rowPointers[m]
# read nnz colum indices
colIndices = np.empty(nnz, dtype=int)
for i in range(nnz):
colIndices[i] = int(f.next().strip())
if colIndices[i] >= n:
errMsg = "Inconsistent dims, col idx %d > %d" % (colIndices[i], n)
logging.error(errMsg)
raise RuntimeError(errMsg)
# read nnz values
values = np.empty(nnz)
for i in range(nnz):
values[i] = float(f.next().strip())
# populate matrix
res = np.zeros((m, n))
for i in range(m):
for nzi in range(rowPointers[i], rowPointers[i + 1]):
res[i, colIndices[nzi]] = values[nzi]
logging.info(
"successfully read (%s x %s) matrix in CSR format with nnz %s.",
m, n, nnz)
return res
| 12,114
|
def test_analysis():
"""Test the full analysis of a portfolio."""
asset1_close = [22.9, 24.8, 40.3]
asset2_close = [101.3, 129.9, 104.5]
analysis = Analysis.PortfolioAnalysis(Analysis.Portfolio([Datasource.DataSource(asset1_close, "Asset 1"),
Datasource.DataSource(asset2_close, "Asset 2")]))
assert analysis is not None
assert analysis.get_portfolio() is not None
assert analysis.get_portfolio().num_assets() == 2
biscan = analysis.scan_asset_pairs()
assert type(biscan) is list
assert len(biscan) == 100
minvar = analysis.minimum_variance_montecarlo()
assert type(minvar) is tuple
assert len(minvar) == 2
| 12,115
|
def _get_random_hangul(count=(0xd7a4 - 0xac00)):
"""Generate a sequence of random, unique, valid Hangul characters.
Returns all possible modern Hangul characters by default.
"""
valid_hangul = [chr(_) for _ in range(0xac00, 0xd7a4)]
return random.sample(valid_hangul, count)
| 12,116
|
def test_prepare_for_install_on_installed(install_mockery, monkeypatch):
"""Test of _prepare_for_install's early return for installed task path."""
const_arg = installer_args(['dependent-install'], {})
installer = create_installer(const_arg)
request = installer.build_requests[0]
install_args = {'keep_prefix': True, 'keep_stage': True, 'restage': False}
task = create_build_task(request.pkg, install_args)
installer.installed.add(task.pkg_id)
monkeypatch.setattr(inst.PackageInstaller, '_ensure_install_ready', _noop)
installer._prepare_for_install(task)
| 12,117
|
def test_execute(fake: Faker, Context: MagicMock = None, **kwargs) -> None:
"""Test the Execution of a Chain.
Ensures that we can execute a chain given a specific context.
"""
# Given
context = Context()
prev_args, prev_kwargs = (tuple(), dict())
function = MagicMock(return_value=None)
context_merge = MagicMock()
chain_split_output = MagicMock(return_value=(prev_args, prev_kwargs))
chain = Chain(function)
prop_context_merge = PropertyMock(return_value=context_merge)
prop_context_output = PropertyMock(return_value=fake.word())
chain.initial_state = fake.word()
chain._Chain__split_output = chain_split_output
type(context).output = prop_context_output
type(context).merge_context = prop_context_merge
# When
result = chain.execute(context=context)
# Then
chain_split_output.assert_called_once_with(context.output)
context_merge.assert_called_once_with(chain.initial_state)
function.assert_called_once_with(*prev_args, **prev_kwargs, context=context.current)
assert result == context
| 12,118
|
def build(dir, **kwargs):
"""run cmake to generate a project buildsystem
Parameters:
----------
dir str: Location of the CMake build directory
Keyword Args:
----------
parallel int: The maximum number of concurrent processes to use when building. Default: 1 less than
the number of available logical cores.
target str: Path to directory which CMake will use as the root of build directory.
config str: For multi-configuration tools, choose specified configuration
flags seq(str): Sequence of flags (or any other unlisted argument). Include preceding dash(es).
tooloptions seq(str): Sequence of options to be passed onto the build tool
env: A mapping that defines the environment variables for the new process
"""
# prune empty entries
kwargs = {key: value for key, value in kwargs.items() if value}
# add defaults if not specified
if not "parallel" in kwargs:
kwargs["parallel"] = _getWorkerCount()
# build cmake arguments
args = [findexe("cmake"), "--build", dir]
env = None
for key, value in kwargs.items():
if key in ("parallel", "target", "config"):
args.append(f"--{key}")
args.append(f"{value}")
elif key == "flags":
for f in value:
args.append(f)
elif key == "env":
env = value
elif key is not "tooloptions":
raise KeyError
if "tooloptions" in kwargs:
args.append("--")
for f in value:
args.append(f)
return run(args, env=env).check_returncode()
| 12,119
|
def is_builtin_model(target: type) -> bool:
"""returns ``True`` if the given type is a model subclass"""
return is_builtin_class_except(target, ["MetaModel", "Model", "DataBag"])
| 12,120
|
def runQ(qparsed, params=dict(), nbcircuits=1, nbqubits = None):
"""
qparsed: qlang circuit (already parsed)
params:{x:np.array, t:np.array}
"""
#*** verify if parameters are ok for qparsed circuit ****
_ , vector_parameters = parseqlang.parameters_from_gates(qparsed)
for pname, dimension in vector_parameters.items():
if pname not in params:
raise Exception(f'Vector parameter "{pname}" not provided')
if params[pname].shape[0] != dimension:
raise Exception(f"Vector parameter {pname} is of dimension {dimension} but %d are provided"%params[pname].shape[0])
if len(params[pname].shape)==1: nb_rows = 1
else: nb_rows =params[pname].shape[1]
if nbcircuits==1 and nb_rows>1: nbcircuits= nb_rows
elif nbcircuits != nb_rows and nb_rows != 1:
raise Exception(f"{pname}: got {nb_rows} rows ({nbcircuits} expected)")
#*** determine nb qubits ****
qbits = parseqlang.nbqubits_from_gates(qparsed)
if(nbqubits==None): nbqubits = qbits
elif nbqubits<qbits: raise Exception(f"{nbqubits} qubits asked, but {qbits} qubits are needed")
#*** run circuit(s) with manyq ****
initQreg(nbqubits,n=nbcircuits)
for gate in qparsed:
gname = gate[0]
gparam = gate[1]
qbit0 = gparam[0]
# print(gate)
# print(f"qbit0: {qbit0}")
if gname in {"CZ","CX"}:
qbit1 = gparam[1]
# print(f"Running {gname}({qbit0},{qbit1})")
globals()[gname](qbit0,qbit1)
continue
pname = gparam[1][0]
pindex = gparam[1][1]
# print(f"Running {gname}({qbit0},{pname}_{pindex})")
globals()[gname](qbit0,params.get(pname)[pindex])
return Qreg.outQ
| 12,121
|
def add_CNNB_loss_v2(true_labels,
hidden,
embed_model,
bsz=512,
dataset='imagenet2012',
hidden_norm=True,
temperature=1.0,
strategy=None,
loss_type='ce',
clip_min=0,
method='onehot'):
"""Compute loss for model.
Args:
true_labels: vector of labels.
hidden: hidden vector (`Tensor`) of shape (bsz, dim).
hidden_norm: whether or not to use normalization on the hidden vector.
temperature: a `floating` number for temperature scaling.
strategy: context information for tpu.
Returns:
A loss scalar.
The logits for contrastive prediction task.
The labels for contrastive prediction task.
"""
# Get (normalized) hidden1 and hidden2.
if hidden_norm:
hidden = tf.math.l2_normalize(hidden, -1)
hidden1, hidden2 = tf.split(hidden, 2, 0)
batch_size = tf.shape(hidden1)[0]
# Gather hidden1/hidden2 across replicas and create local labels.
if strategy is not None:
hidden1_large = tpu_cross_replica_concat(hidden1, strategy)
hidden2_large = tpu_cross_replica_concat(hidden2, strategy)
enlarged_batch_size = tf.shape(hidden1_large)[0]
# TODO(iamtingchen): more elegant way to convert u32 to s32 for replica_id.
replica_context = tf.distribute.get_replica_context()
reps = strategy.num_replicas_in_sync
sims=get_batch_sims(true_labels, embed_model, bsz//reps, dataset, method)
sims=tf.cast(sims > clip_min, sims.dtype) * sims
#sims.set_shape([512//reps, 512//reps])
replica_id = tf.cast(
tf.cast(replica_context.replica_id_in_sync_group, tf.uint32), tf.int32)
labels_idx = tf.range(batch_size) + replica_id * batch_size
labels1=tf.concat([sims if i==replica_id else tf.zeros(sims.shape) for i in range(reps)],1)
labels2=tf.concat([sims-tf.linalg.diag(tf.linalg.diag_part(sims)) if i==replica_id else tf.zeros(sims.shape) for i in range(reps)],1)
labels=tf.concat([labels1,labels2],1)
masks = tf.one_hot(labels_idx, enlarged_batch_size)
else:
#sims.set_shape([batch_size, batch_size])
sims=get_batch_sims(true_labels, embed_model, bsz, dataset, method)
sims=tf.cast(sims > clip_min, sims.dtype) * sims
hidden1_large = hidden1
hidden2_large = hidden2
labels=tf.concat([sims,sims-tf.linalg.diag(tf.linalg.diag_part(sims))],1)
masks = tf.one_hot(tf.range(batch_size), batch_size)
slabels=tf.split(labels, 2, axis=1)
#Calculate similarity between hidden representations from aug1 and from aug1
logits_aa = tf.matmul(hidden1, hidden1_large, transpose_b=True) / temperature
# tf.print(true_labels)
# tf.print(logits_aa)
#Calculate similarity between hidden representations from aug2 and from aug2
logits_bb = tf.matmul(hidden2, hidden2_large, transpose_b=True) / temperature
if loss_type not in ['fro']:
#Mask out entries corresponding to diagonal (self-similarity) so they are 0 once softmaxed
logits_aa = logits_aa - masks * LARGE_NUM
#Mask out entries corresponding to diagonal (self-similarity) so they are 0 once softmaxed
logits_bb = logits_bb - masks * LARGE_NUM
else:
logits_aa = logits_aa - masks * logits_aa
logits_bb = logits_bb - masks * logits_bb
#Calculate similarity between hidden representations from aug1 and from aug2
logits_ab = tf.matmul(hidden1, hidden2_large, transpose_b=True) / temperature
#Calculate similarity between hidden representations from aug2 and from aug1
#-> identical to above case if using single GPU
logits_ba = tf.matmul(hidden2, hidden1_large, transpose_b=True) / temperature
#Calculate loss for aug1 samples by taking softmax over logits and then applying cross_entropy
# tf.print(slabels[0].shape)
# tf.print(slabels[1].shape)
# tf.print(logits_ab.shape)
# tf.print(logits_aa.shape)
if loss_type=='ce':
loss_fn = tf.nn.softmax_cross_entropy_with_logits
loss_a = tf.reduce_mean(loss_fn(slabels[0],logits_ab)+loss_fn(slabels[1]-masks*slabels[1],logits_aa))
loss_b = tf.reduce_mean(loss_fn(slabels[0],logits_ba)+loss_fn(slabels[1]-masks*slabels[1],logits_bb))
elif loss_type=='softmax-ce':
loss_fn = tf.nn.softmax_cross_entropy_with_logits
slabels[0]=tf.nn.softmax(slabels[0]/temperature)
slabels[1]=tf.nn.softmax((slabels[1]/temperature)-masks*LARGE_NUM)
loss_a = tf.reduce_mean(loss_fn(slabels[0],logits_ab)+loss_fn(slabels[1],logits_aa))
loss_b = tf.reduce_mean(loss_fn(slabels[0],logits_ba)+loss_fn(slabels[1],logits_bb))
elif loss_type=='kl': # Consider softmaxing labels here
loss_fn = KLDivergence(tf.keras.losses.Reduction.NONE)
loss_a = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ab))+loss_fn(slabels[1]-masks*slabels[1],tf.nn.softmax(logits_aa)))
loss_b = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ba))+loss_fn(slabels[1]-masks*slabels[1],tf.nn.softmax(logits_bb)))
elif loss_type=='klsoft':
loss_fn = KLDivergence(tf.keras.losses.Reduction.NONE)
slabels[0]=tf.nn.softmax(slabels[0]/temperature)
slabels[1]=tf.nn.softmax((slabels[1]/temperature)-masks*LARGE_NUM)
loss_a = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ab))+loss_fn(slabels[1],tf.nn.softmax(logits_aa)))
loss_b = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ba))+loss_fn(slabels[1],tf.nn.softmax(logits_bb)))
elif loss_type=='fro': #Consider softmaxing labels here
loss_fn=tf.norm
loss_a = tf.reduce_mean(loss_fn(slabels[0]-logits_ab, ord='fro', axis=(0,1))+loss_fn(slabels[1]-logits_aa, ord='fro', axis=(0,1)))
loss_b = tf.reduce_mean(loss_fn(slabels[0]-logits_ba, ord='fro', axis=(0,1))+loss_fn(slabels[1]-logits_bb, ord='fro', axis=(0,1)))
loss = tf.reduce_mean(loss_a + loss_b)
return loss, logits_ab, labels
| 12,122
|
def create_container_group_multi(aci_client, resource_group,
container_group_name,
container_image_1, container_image_2):
"""Creates a container group with two containers in the specified
resource group.
Arguments:
aci_client {azure.mgmt.containerinstance.ContainerInstanceManagementClient}
-- An authenticated container instance management client.
resource_group {azure.mgmt.resource.resources.models.ResourceGroup}
-- The resource group in which to create the container group.
container_group_name {str}
-- The name of the container group to create.
container_image_1 {str}
-- The first container image name and tag, for example:
microsoft\aci-helloworld:latest
container_image_2 {str}
-- The second container image name and tag, for example:
microsoft\aci-tutorial-sidecar:latest
"""
print("Creating container group '{0}'...".format(container_group_name))
# Configure the containers
container_resource_requests = ResourceRequests(memory_in_gb=2, cpu=1.0)
container_resource_requirements = ResourceRequirements(
requests=container_resource_requests)
container_1 = Container(name=container_group_name + '-1',
image=container_image_1,
resources=container_resource_requirements,
ports=[ContainerPort(port=80)])
container_2 = Container(name=container_group_name + '-2',
image=container_image_2,
resources=container_resource_requirements)
# Configure the container group
ports = [Port(protocol=ContainerGroupNetworkProtocol.tcp, port=80)]
group_ip_address = IpAddress(
ports=ports, dns_name_label=container_group_name, type='Public')
group = ContainerGroup(location=resource_group.location,
containers=[container_1, container_2],
os_type=OperatingSystemTypes.linux,
ip_address=group_ip_address)
# Create the container group
aci_client.container_groups.create_or_update(resource_group.name,
container_group_name, group)
# Get the created container group
container_group = aci_client.container_groups.get(resource_group.name,
container_group_name)
print("Once DNS has propagated, container group '{0}' will be reachable at"
" http://{1}".format(container_group_name,
container_group.ip_address.fqdn))
| 12,123
|
def simulation_wells(self):
"""Get a list of all simulation wells for a case
Returns:
:class:`rips.generated.generated_classes.SimulationWell`
"""
wells = self.descendants(SimulationWell)
return wells
| 12,124
|
def getSmartMeter() -> Optional[str]:
"""Return smartmeter name used in recording."""
mapping = getDeviceMapping()
# Identifier for smartmeter is meter with phase 0
try: return next(key for key in mapping if mapping[key]["phase"] == 0)
except StopIteration: return None
| 12,125
|
def olbp(array, point):
"""Perform simple local binary pattern calculation with a fixed
3x3 neighbourhood. Thanks to:
http://www.bytefish.de/blog/local_binary_patterns/
for a really nice explanation of LBP.
Won't return correct results around the image boundaries.
Because it's only a 3x3 neighbourhood, probably very susceptible
to noise.
TODO: Bigger neighbourhood. Variable size maybe?
Returns: A single decimal number (the encoded pattern)
"""
x, y = point
# Make sure we're within the array bounds.
if x < 1:
x = 1
if x > (array.shape[0] - 2):
x = array.shape[0] - 2
if y < 1:
y = 1
if y > (array.shape[1] - 2):
y = array.shape[1] - 2
center = array[x, y]
code = 0
code |= (array[x - 1, y - 1] > center) << 7
code |= (array[x - 1, y] > center) << 6
code |= (array[x - 1, y + 1] > center) << 5
code |= (array[x, y - 1] > center) << 4
code |= (array[x, y + 1] > center) << 3
code |= (array[x + 1, y - 1] > center) << 2
code |= (array[x + 1, y] > center) << 1
code |= (array[x + 1, y + 1] > center) << 0
return code
| 12,126
|
def handle_ref(p):
"""
参考文献
"""
if p.text:
if is_bold(p): # 标题
reference + PRef(R16(p.text))
else:
reference + PRef(p.text)
else:
pass
| 12,127
|
def view_index(
request: http.HttpRequest,
workflow: Optional[models.Workflow] = None,
) -> http.HttpResponse:
"""Render the list of views attached to a workflow.
:param request: Http request received.
:param workflow: Workflow being processed
:return: HTTP response with the table
"""
# Get the views
views = workflow.views.values(
'id',
'name',
'description_text',
'modified')
# Build the table only if there is anything to show (prevent empty table)
return render(
request,
'table/view_index.html',
{
'query_builder_ops': workflow.get_query_builder_ops_as_str(),
'table': services.ViewTable(views, orderable=False),
},
)
| 12,128
|
def inverseLPSB(data, mu, g):
"""Compute regularized L-PSB step."""
mUpd = data.mUpd
gamma = data.gamma
gammah = data.gamma + mu
Q22 = np.tril(data.STY[:mUpd, :mUpd], -1) + np.tril(data.STY[:mUpd, :mUpd], -1).T + \
np.diag(np.diag(data.STY[:mUpd, :mUpd])) + \
gamma * np.diag(np.diag(data.STS[:mUpd, :mUpd]))
Q = np.block([
[np.zeros((mUpd, mUpd)), np.triu(data.STS[:mUpd, :mUpd])],
[np.triu(data.STS[:mUpd, :mUpd]).T, Q22]
])
Q += 1/gammah * np.block([
[data.STS[:mUpd, :mUpd], data.STY[:mUpd, :mUpd]],
[data.STY[:mUpd, :mUpd].T, data.YTY[:mUpd, :mUpd]]
])
ATg = np.block([data.S[:, :mUpd].T @ g, data.Y[:, :mUpd].T @ g])
p = np.linalg.solve(Q, ATg)
#p = scipy.linalg.solve(Q, ATg, assume_a='sym')
Ap = data.S[:, :mUpd] @ p[:mUpd] + data.Y[:, :mUpd] @ p[mUpd:]
d = 1/gammah**2 * Ap - 1/gammah * g
return d
| 12,129
|
def canPlay(request):
"""
Endpoint qui retourne la liste des cartes qui peuvent être jouées ( pour le Player )
rq : {
"cards_played" : [
{
"card_name": "As",
"value_non_atout": 0,
"value_atout": 0,
"id" : "A"
},
{
"card_name": "7s",
"value_non_atout": 0,
"value_atout": 0,
"id" : "7"
},
{
"card_name": "8s",
"value_non_atout": 0,
"value_atout": 0,
"id" : "8"
}
],
"atout" : "c",
"opening_color" : "s",
"remaining_cards": [
{
"card_name": "7d",
"value_non_atout": 0,
"value_atout": 0,
"id":"7"
},
{
"card_name": "Kh",
"value_non_atout": 4,
"value_atout": 4,
"id":"K"
},
{
"card_name": "Ks",
"value_non_atout": 4,
"value_atout": 4,
"id":"K"
},
{
"card_name": "Ac",
"value_non_atout": 11,
"value_atout": 11,
"id":"A"
},
{
"card_name": "9c",
"value_non_atout": 0,
"value_atout": 14,
"id":"9"
}
]
}
"""
body = json.loads(request.body)
cards_played = body['cards_played']
remaining_cards = body['remaining_cards']
opening_color = body['opening_color']
atout = body['atout']
can_play = []
cards_atout = []
order_no_a = ['7','8','9','J','Q','K','10','A']
order_a = ['7','8','Q','K','10','A','9','J']
if cards_played:
if opening_color == atout:
for x in remaining_cards:
if opening_color in x['card_name']:
cards_atout.append(x)
if not cards_atout:
can_play=remaining_cards
else:
max=0
if len(cards_played)==1:
max=order_a.index(cards_played[0]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
elif len(cards_played)==2:
max = order_a.index(cards_played[0]['idc'])
if atout in cards_played[1]['card_name']:
if order_a.index(cards_played[1]['idc']) > max :
max = order_a.index(cards_played[1]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
max = order_a.index(cards_played[0]['idc'])
if atout in cards_played[1]['card_name']:
if order_a.index(cards_played[1]['idc']) > max :
max = order_a.index(cards_played[1]['idc'])
if atout in cards_played[2]['card_name']:
if order_a.index(cards_played[2]['idc']) > max :
max = order_a.index(cards_played[2]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
if atout in cards_played[2]['card_name']:
if order_a.index(cards_played[2]['idc']) > max :
max = order_a.index(cards_played[2]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
if atout in cards_played[2]['card_name']:
if order_a.index(cards_played[2]['idc']) > max :
max = order_a.index(cards_played[2]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for x in remaining_cards:
if opening_color in x['card_name']:
can_play.append(x)
if not can_play:
i=0
for x in remaining_cards:
if atout in x['card_name']:
i+=1
cards_atout.append(x)
if i==0:
can_play=remaining_cards
else:
# Le joueur possede un atout, il faut regarder qui est maître
if len(cards_played)==3:
max=0
if atout in cards_played[1]['card_name']:
max = order_a.index(cards_played[1]['idc'])
if atout in cards_played[2]['card_name']:
if order_a.index(cards_played[2]['idc']) > max :
max = order_a.index(cards_played[2]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
can_play=remaining_cards
else:
if atout in cards_played[2]['card_name']:
max = order_a.index(cards_played[2]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
if order_no_a.index(cards_played[2]['idc'])<order_no_a.index(cards_played[1]['idc']) and order_no_a.index(cards_played[1]['idc']) >order_no_a.index(cards_played[0]['idc']):
can_play=remaining_cards
else:
can_play=cards_atout
elif len(cards_played)==1:
can_play=cards_atout
else:
max=0
if atout in cards_played[1]['card_name']:
max = order_a.index(cards_played[1]['idc'])
for e in cards_atout:
if order_a.index(e['idc']) > max:
can_play.append(e)
if not can_play:
can_play=cards_atout
else:
if order_no_a.index(cards_played[1]['idc'])<order_no_a.index(cards_played[0]['idc']):
can_play=remaining_cards
else:
can_play=cards_atout
else:
can_play=remaining_cards
return Response(can_play)
| 12,130
|
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Genie Aladdin component."""
return True
| 12,131
|
def _module_available(module_path: str) -> bool:
"""Testing if given module is avalaible in your env
>>> _module_available('os')
True
>>> _module_available('bla.bla')
False
"""
mods = module_path.split('.')
assert mods, 'nothing given to test'
# it has to be tested as per partets
for i in range(len(mods)):
module_path = '.'.join(mods[:i + 1])
if importlib.util.find_spec(module_path) is None:
return False
return True
| 12,132
|
def delete_file_with_object(instance, **kwargs):
"""
Deletes files from system when UploadedFile object is deleted from database
:param instance: UploadedFile object (file that is being deleted)
:param kwargs:
:return:
"""
instance.file.delete()
| 12,133
|
def stale_token():
"""
Handler function for a no more fresh token
"""
message = 'The JWT Token is not fresh. Please request a new Token directly with the /auth resource.'
log_unauthorized(message)
abort(403, message)
| 12,134
|
def _GetPathBeforeFinalDir(uri):
"""
Returns the part of the path before the final directory component for the
given URI, handling cases for file system directories, bucket, and bucket
subdirectories. Example: for gs://bucket/dir/ we'll return 'gs://bucket',
and for file://dir we'll return file://
Args:
uri: StorageUri.
Returns:
String name of above-described path, sans final path separator.
"""
sep = uri.delim
# If the source uri argument had a wildcard and wasn't expanded by the
# shell, then uri.names_file() will always be true, so we check for
# this case explicitly.
assert ((not uri.names_file()) or ContainsWildcard(uri.object_name))
if uri.names_directory():
past_scheme = uri.uri[len('file://'):]
if past_scheme.find(sep) == -1:
return 'file://'
else:
return 'file://%s' % past_scheme.rstrip(sep).rpartition(sep)[0]
if uri.names_bucket():
return '%s://' % uri.scheme
# Else it names a bucket subdir.
return uri.uri.rstrip(sep).rpartition(sep)[0]
| 12,135
|
def extend_requests(request_id=None, workload_id=None, lifetime=30, session=None):
"""
extend an request's lifetime.
:param request_id: The id of the request.
:param workload_id: The workload_id of the request.
:param lifetime: The life time as umber of days.
:param session: The database session in use.
:raises NoObject: If no request is founded.
:raises DatabaseException: If there is a database error.
"""
try:
query = session.query(models.Request)
if request_id:
query = query.filter_by(request_id=request_id)
else:
query = query.filter_by(workload_id=workload_id)
update_items = {'expired_at': datetime.datetime.utcnow() + datetime.timedelta(days=lifetime)}
query.update(update_items, synchronize_session=False)
except sqlalchemy.orm.exc.NoResultFound as error:
raise exceptions.NoObject('Request (workload_id: %s) cannot be found: %s' % (workload_id, error))
| 12,136
|
def iter_key_path_items(d, key_path_prefix=None, path_sep='.'):
"""
iterate through items of dict recursively, yielding (key_path, val) pairs for all nested values that are not dicts.
That is, if a value is a dict, it won't generate a yield, but rather, will be iterated through recursively.
:param d: input dict
:param key_path_so_far: string to be prepended to all key paths (for use in recursion, not meant for direct use)
:return: a (key_path, val) iterator
>>> input_dict = {
... 'a': {
... 'a': 'a.a',
... 'b': 'a.b',
... 'c': {
... 'a': 'a.c.a'
... }
... },
... 'b': 'b',
... 'c': 3
... }
>>> list(iter_key_path_items(input_dict))
[('a.a', 'a.a'), ('a.b', 'a.b'), ('a.c.a', 'a.c.a'), ('b', 'b'), ('c', 3)]
"""
if key_path_prefix is None:
for k, v in d.items():
if not isinstance(v, dict):
yield k, v
else:
for kk, vv in iter_key_path_items(v, k, path_sep):
yield kk, vv
else:
for k, v in d.items():
if not isinstance(v, dict):
yield key_path_prefix + path_sep + k, v
else:
for kk, vv in iter_key_path_items(v, k, path_sep):
yield key_path_prefix + path_sep + kk, vv
| 12,137
|
def fulfil_defaults_for_data_model_identifiers(
data_model_identifiers,
context
):
"""
Intelligently set default entries for partially specified data model identifiers.
:param data_model_identifiers:
An list (or iterable) of dictionaries, where each dictionary contains keyword arguments
to specify a data model product.
:param context:
The Airflow context dictionary. This is only used to infer the 'release' context,
if it is not given, based on the execution date.
:returns:
A list of data model identifiers, where all required parameters are provided.
:raises RuntimeError:
If all data model identifiers could not be fulfilled.
"""
try:
releases = infer_releases(context["ds"], context["next_ds"])
except:
log.exception(f"Could not infer release from context {context}")
default_release = None
else:
# Take the 'most recent' release.
default_release = releases[-1]
trees = {}
defaults = {
"sdss5": {
"apStar": {
"apstar": "stars",
"apred": "daily",
"telescope": lambda obj, **_: "apo25m" if "+" in obj else "lco25m",
"healpix": lambda obj, **_: str(healpix(obj)),
}
}
}
for dmi in data_model_identifiers:
try:
filetype = dmi["filetype"]
except KeyError:
raise KeyError(f"no filetype given for data model identifiers {dmi} "
f"-- set 'filetype': 'full' and use 'full': <PATH> to set explicit path")
except:
raise TypeError(f"data model identifiers must be dict-like object (not {type(dmi)}: {dmi}")
source = dmi.copy()
release = source.setdefault("release", default_release)
try:
tree = trees[release]
except KeyError:
trees[release] = tree = SDSSPath(release=release)
missing_keys = set(tree.lookup_keys(filetype)).difference(dmi)
for missing_key in missing_keys:
try:
default = defaults[release][filetype][missing_key]
except KeyError:
raise RuntimeError(f"no default function found for {missing_key} for {release} / {filetype}")
if callable(default):
default = default(**source)
log.warning(f"Filling '{missing_key}' with default value '{default}' for {source}")
source[missing_key] = default
yield source
| 12,138
|
def _add_default_arguments(parser: argparse.ArgumentParser):
"""Add the default arguments username, password, region to the parser."""
parser.add_argument("username", help="Connected Drive username")
parser.add_argument("password", help="Connected Drive password")
parser.add_argument("region", choices=valid_regions(), help="Region of the Connected Drive account")
| 12,139
|
def load_page_details(data, filename=None):
"""
# Raises
ValueError of (filename, error)
"""
try:
options = toml.loads(data)
except toml.TomlDecodeError as exc:
raise ValueError(filename, exc)
if not isinstance(options, dict):
raise ValueError(filename, 'page details could not be parsed into a JSON object')
return options
| 12,140
|
def nested_render(cfg, fully_rendered_cfgs, replacements):
"""
Template render the provided cfg by recurisevly replacing {{var}}'s which values
from the current "namespace".
The nested config is treated like nested namespaces where the inner variables
are only available in current block and further nested blocks.
Said the opposite way: the namespace with available vars that can be used
includes the current block's vars and parent block vars.
This means that you can do replacements for top-level
(global namespaced) config vars anywhere, but you can only use inner configs within
that block or further nested blocks.
An example is worth a thousand words:
---------------------------------------------------------------------------------
fence-config.yaml
--------------------------------------------------------------------------------
BASE_URL: 'http://localhost/user'
OPENID_CONNECT:
fence:
api_base_url: 'http://other_fence/user'
client_kwargs:
redirect_uri: '{{BASE_URL}}/login/fence/login'
authorize_url: '{{api_base_url}}/oauth2/authorize'
THIS_WONT_WORK: '{{api_base_url}}/test'
--------------------------------------------------------------------------------
"redirect_uri" will become "http://localhost/user/login/fence/login"
- BASE_URL is in the global namespace so it can be used in this nested cfg
"authorize_url" will become "http://other_fence/user/oauth2/authorize"
- api_base_url is in the current namespace, so it is available
"THIS_WONT_WORK" will become "/test"
- Why? api_base_url is not in the current namespace and so we cannot use that
as a replacement. the configuration (instead of failing) will replace with
an empty string
Args:
cfg (TYPE): Description
fully_rendered_cfgs (TYPE): Description
replacements (TYPE): Description
Returns:
dict: Configurations with template vars replaced
"""
try:
for key, value in cfg.iteritems():
replacements.update(cfg)
fully_rendered_cfgs[key] = {}
fully_rendered_cfgs[key] = nested_render(
value,
fully_rendered_cfgs=fully_rendered_cfgs[key],
replacements=replacements,
)
# new namespace, remove current vars (no longer available as replacements)
for old_cfg, value in cfg.iteritems():
replacements.pop(old_cfg, None)
return fully_rendered_cfgs
except AttributeError:
# it's not a dict, so lets try to render it. But only if it's
# truthy (which means there's actually something to replace)
if cfg:
t = Template(str(cfg))
rendered_value = t.render(**replacements)
try:
cfg = yaml_load(rendered_value)
except ScannerError:
# it's not loading into yaml, so let's assume it's a string with special
# chars such as: {}[],&*#?|:-<>=!%@\)
#
# in YAML, we have to "quote" a string with special chars.
#
# since yaml_load isn't loading from a file, we need to wrap the Python
# str in actual quotes.
cfg = yaml_load('"{}"'.format(rendered_value))
return cfg
| 12,141
|
def element_by_atomic_number(atomic_number):
"""Search for an element by its atomic number
Look up an element from a list of known elements by atomic number.
Return None if no match found.
Parameters
----------
atomic_number : int
Element atomic number that need to look for
if a string is provided, only numbers are considered during the search
Returns
-------
matched_element : element.Element
Return an element from the periodic table if we find a match,
otherwise raise GMSOError
"""
if isinstance(atomic_number, str):
atomic_number_trimmed = int(sub('[a-z -]', '', atomic_number.lower()).lstrip('0'))
msg = '''Letters and spaces are not considered when searching by element atomic number. \n
{} became {}'.format(atomic_number, atomic_number_trimmed)'''
warnings.warn(msg)
else:
atomic_number_trimmed = atomic_number
matched_element = atomic_dict.get(atomic_number_trimmed)
if matched_element is None:
raise GMSOError(f'Failed to find an element with atomic number {atomic_number_trimmed}')
return matched_element
| 12,142
|
def get_data_from_db(cursor):
"""
Get data from the database given a query-instantiated cursor
:param cursor: query-instantiated database cursor
:return: tuple of labels and training data
"""
training_data, labels = [], []
cols = [desc[0] for desc in cursor.description]
for record in tqdm(cursor, total=cursor.rowcount):
record = dict(record)
record['purposes'] = [purpose_to_english[p] for p in record['purposes']]
# just duplicate for house_number and year of construction
record['house_number_vec'] = record['house_number']
record['year_of_construction_vec'] = record['year_of_construction']
# one-hot encoding for house number addition
if record['house_number_addition']:
hna = np.zeros(shape=(len(record['house_number_addition']), len(VOCABULARY)))
for idx, char in enumerate(record['house_number_addition']):
hna[idx, VOCABULARY.index(char.lower())] = 1.
else:
hna = np.zeros(shape=(1, len(VOCABULARY)))
record['house_number_addition_vec'] = hna
# 'multi-hot' encoding for building purposes
purposes = np.zeros(shape=(len(PURPOSES,)))
for purpose in record['purposes']:
purposes[PURPOSES.index(purpose)] = 1.
record['purposes_vec'] = purposes
# character-level vectorization of postal code
pc = np.zeros((len(record['postal_code']), len(VOCABULARY)))
for idx, char in enumerate(record['postal_code']):
pc[idx, VOCABULARY.index(char.lower())] = 1.
record['postal_code_vec'] = pc
# building geometry vectorization
geom = record['geometry_crs84']
geom = vectorize_wkt(geom)
record['geometry_vec'] = geom
record['centroid_vec'] = vectorize_wkt(record['centroid_crs84'])[0, :2]
# vectorization of neighbouring buildings
neighbours = record['neighbouring_buildings_crs84']
neighbours = vectorize_wkt(neighbours)
record['neighbouring_buildings_vec'] = neighbours
rd = record['recorded_date']
record['recorded_date_vec'] = [rd.year, rd.month, rd.day, rd.weekday()]
rgd = record['registration_date']
record['registration_date_vec'] = [rgd.year, rgd.month, rgd.day, rgd.weekday()]
training_data.append(record)
labels.append({
'energy_performance_index': record['energy_performance_index'],
'energy_performance_label': record['energy_performance_label'],
'energy_performance_vec': ENERGY_CLASSES.index(record['energy_performance_label'])
})
return training_data, labels
| 12,143
|
def parse(input_str, file_path=True):
"""
Parse a GLM into an omf.feeder tree. This is so we can walk the tree,
change things in bulk, etc.
Input can be a file path or GLM string.
"""
tokens = _tokenize_glm(input_str, file_path)
return _parse_token_list(tokens)
| 12,144
|
def pred(model, x_pred_scaled, scaler_y):
"""
Predict
:param model: model for prediction
:param x_pred_scaled: scaled x values we need to predict for
:param scaler_y: scaler for y values
:return:
"""
MAX_PREDICT_SIZE = 10000
g_mean_full = g_std_full = None
start = 0
while start < len(x_pred_scaled):
end = start + MAX_PREDICT_SIZE
x_pred_scaled_slice = x_pred_scaled[start:end]
g_mean_scaled, g_std_scaled = model_gpflow.predict_gpflow(model, x_pred_scaled_slice)
g_mean = scaler_y.inverse_transform(g_mean_scaled)
g_std = g_std_scaled * scaler_y.scale_
if g_mean_full is None:
g_mean_full = g_mean
g_std_full = g_std
else:
g_mean_full = np.vstack((g_mean_full, g_mean))
g_std_full = np.vstack((g_std_full, g_std))
start = end
return g_mean_full, g_std_full
| 12,145
|
def metadataAbstractElementEmptyValuesTest3():
"""
Empty value for unknown attribute.
>>> doctestMetadataAbstractElementFunction(
... testMetadataAbstractElementEmptyValue,
... metadataAbstractElementEmptyValuesTest3(),
... requiredAttributes=["required1"],
... optionalAttributes=["optional1"])
[]
"""
metadata = """<?xml version="1.0" encoding="UTF-8"?>
<test required1="foo" optional1="foo" unknown1="" />
"""
return ElementTree.fromstring(metadata)
| 12,146
|
def orthonormal_initializer(input_size, output_size):
"""from https://github.com/patverga/bran/blob/32378da8ac339393d9faa2ff2d50ccb3b379e9a2/src/tf_utils.py#L154"""
I = np.eye(output_size)
lr = .1
eps = .05/(output_size + input_size)
success = False
tries = 0
while not success and tries < 10:
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
for i in range(100):
QTQmI = Q.T.dot(Q) - I
loss = np.sum(QTQmI**2 / 2)
Q2 = Q**2
Q -= lr*Q.dot(QTQmI) / (np.abs(Q2 + Q2.sum(axis=0,
keepdims=True) + Q2.sum(axis=1, keepdims=True) - 1) + eps)
if np.max(Q) > 1e6 or loss > 1e6 or not np.isfinite(loss):
tries += 1
lr /= 2
break
success = True
if success:
print('Orthogonal pretrainer loss: %.2e' % loss)
else:
print('Orthogonal pretrainer failed, using non-orthogonal random matrix')
Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)
return Q.astype(np.float32)
| 12,147
|
def stick_figure (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" General function for drawing stick based parts (e.g., ribozyme and protease sites).
"""
# Default options
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 5.0
y_extent = 10.0
linestyle = '-'
linetype = "";
shapetype = "";
if(type == "Ribozyme"):
linetype = 'dash'
headgroup = 'O'
elif(type == "Protease"):
linetype = 'dash'
headgroup = 'X'
elif(type == "ProteinStability"):
linetype = 'solid'
headgroup = 'O'
elif(type == "Ribonuclease"):
linetype = 'solid'
headgroup = 'X'
# Reset defaults if provided
if opts != None:
if 'color' in opts.keys():
color = opts['color']
if 'start_pad' in opts.keys():
start_pad = opts['start_pad']
if 'end_pad' in opts.keys():
end_pad = opts['end_pad']
if 'x_extent' in opts.keys():
x_extent = opts['x_extent']
if 'y_extent' in opts.keys():
y_extent = opts['y_extent']
if 'linestyle' in opts.keys():
linestyle = opts['linestyle']
if 'linewidth' in opts.keys():
linewidth = opts['linewidth']
if 'scale' in opts.keys():
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
if start > end:
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
rbs_center = (end+((start-end)/2.0),-y_extent)
c1 = Circle(rbs_center, x_extent/2.0, linewidth=linewidth, edgecolor=color,
facecolor=(1,1,1), zorder=8)
x1 = Line2D([start,end],[-y_extent*1.25,-y_extent/1.5],
linewidth=linewidth, color=color, zorder=12, linestyle='-')
x2 = Line2D([start,end],[-y_extent/1.5,-y_extent*1.25],
linewidth=linewidth, color=color, zorder=12, linestyle='-')
dash1 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent/4],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
dash2 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[-y_extent/2,-y_extent+(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
solidO = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent+(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
solidX = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
if(headgroup == "O" and linetype == "dash"):
ax.add_patch(c1)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "X" and linetype == "dash"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "O" and linetype == "solid"):
ax.add_patch(c1)
ax.add_line(solidO)
elif(headgroup == "X" and linetype == "solid"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(solidX)
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
rbs_center = (start+((end-start)/2.0),y_extent)
c1 = Circle(rbs_center, x_extent/2.0, linewidth=linewidth, edgecolor=color,
facecolor=(1,1,1), zorder=8)
x1 = Line2D([start,end],[y_extent*1.25,y_extent/1.5],
linewidth=linewidth, color=color, zorder=12, linestyle='-')
x2 = Line2D([start,end],[y_extent/1.5,y_extent*1.25],
linewidth=linewidth, color=color, zorder=12, linestyle='-')
dash1 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent/4],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
dash2 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[y_extent/2,y_extent-(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
solidO = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent-(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
solidX = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent],
linewidth=linewidth, color=color, zorder=8, linestyle=linestyle)
if(headgroup == 'O' and linetype == 'dash'):
ax.add_patch(c1)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "X" and linetype == "dash"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "O" and linetype == "solid"):
ax.add_patch(c1)
ax.add_line(solidO)
elif(headgroup == "X" and linetype == "solid"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(solidX)
if opts != None and 'label' in opts.keys():
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
| 12,148
|
def display_datetime(datetime_str, time_zone=None, verbose=True):
"""Returns a formatted datetime with TZ (if provided) or 'Error (Missing)"""
"""
>>> print(datetime.datetime.utcnow().strftime("%Y/%m/%d %a %I:%M %p"))
2019/05/19 Sun 01:10 AM
"""
if datetime_str: # and type(datetime_str) == datetime.datetime.now():
if verbose:
return f'{datetime_str.strftime("%Y/%m/%d %a %I:%M %p")}{f" ({time_zone})" if time_zone else ""}'
else:
return f'{datetime_str.strftime("%a %I:%M %p")}{f" ({time_zone})" if time_zone else ""}'
else:
return 'Error (Missing)'
| 12,149
|
def test1():
"""The test runs a loop to check the consistency of the random init file
generating process and the following simulation.
"""
for _ in range(1000):
random_init()
simulate("test.boupy.yml")
| 12,150
|
def mask_channels(mask_type, in_channels, out_channels, data_channels=3):
"""
Creates an autoregressive channel mask.
Input:
mask_type: str
Either 'A' or 'B'. 'A' for first layer of network, 'B' for all others.
in_channels: int
Number of input channels to layer.
out_channels: int
Number of output channels of layer.
data_channels: int
Number of channels in the input data, e.g. 3 for RGB images. (default = 3).
Output:
mask: torch.FloatTensor
Shape (out_channels, in_channels).
A mask with 0 in places for masked elements.
"""
in_factor = in_channels // data_channels + 1
out_factor = out_channels // data_channels + 1
base_mask = torch.ones([data_channels,data_channels])
if mask_type == 'A':
base_mask = base_mask.tril(-1)
else:
base_mask = base_mask.tril(0)
mask_p1 = torch.cat([base_mask]*in_factor, dim=1)
mask_p2 = torch.cat([mask_p1]*out_factor, dim=0)
mask = mask_p2[0:out_channels,0:in_channels]
return mask
| 12,151
|
def patchCombinedLogFormatter() -> None:
"""
Patch the twisted.web.http.combinedLogFormatter to include USER.
"""
twisted.web.http.combinedLogFormatter = combinedLogFormatter
| 12,152
|
def dedent(text):
"""
Remove all common indentation from every line but the 0th.
This will avoid getting <code> blocks when rendering text via markdown.
Ignoring the 0th line will also allow the 0th line not to be aligned.
Args:
text: A string of text to dedent.
Returns:
String dedented by above rules.
For example:
assertEquals("bar\nline1\nline2", dedent("bar\n line1\n line2"))
assertEquals("bar\nline1\nline2", dedent(" bar\n line1\n line2"))
assertEquals("bar\n line1\nline2", dedent(" bar\n line1\n line2"))
"""
text = textwrap.dedent(text)
text_lines = text.split('\n')
text_not_first = "\n".join(text_lines[1:])
text_not_first = textwrap.dedent(text_not_first)
text = text_lines[0] + "\n" + text_not_first
return text
| 12,153
|
def MockConnRecord(conn_record):
"""Mock ConnRecord fixture."""
with mock.patch("didcomm_resolver.resolver.ConnRecord") as patched:
patched.retrieve_by_id = mock.CoroutineMock(return_value=conn_record)
yield patched
| 12,154
|
def open_text_file_for_write(output_directory:str, file_name:str, verbose=False) -> TextIOWrapper:
"""Open a text file for writing"""
if verbose:
print(f"opening text file for write: {output_directory}/{file_name}", file=stderr)
return open(f"{output_directory}/{file_name}", 'w', encoding='utf-8')
| 12,155
|
def dpsplit(n,k, sig):
""" Perform the dynamic programming optimal segmentation, using the sig function
to determine the cost of a segment sig(i,j) is the cost of the i,j segment. These
are then added together
"""
# Set up the tracking tables
K = k + 1
N = n
segtable = np.zeros((n,K)) + np.nan
segtable[:,0] = [ sig(0,j+1) for j in xrange(N) ]
segindtable = np.zeros((N,K), dtype='int') - 1
# fill up the table in a clever order
for k in xrange(1,K):
for j in xrange(k,N):
#fill the j,k element
ans = min( ( (segtable[l,k-1] + sig(l+1,j+1), l+1 )
for l in xrange(k-1,j) ) )
segtable[j,k] = ans[0]
segindtable[j,k] = ans[1]
# read out the path
current_pointer = segindtable[-1,K-1]
path = [current_pointer]
for k in xrange(K-2, 0, -1):
current_pointer = segindtable[current_pointer-1, k]
path.append(current_pointer)
return sorted(path + [N]), segtable[-1,K-1]
| 12,156
|
def get_wind_tiles() -> List[Tile]:
"""return a list of four wind tiles
"""
return [Tile(Suit.JIHAI.value, Jihai.TON.value),
Tile(Suit.JIHAI.value, Jihai.NAN.value),
Tile(Suit.JIHAI.value, Jihai.SHAA.value),
Tile(Suit.JIHAI.value, Jihai.PEI.value)]
| 12,157
|
def test_custom_envvars(caplog):
"""Test using environment variables for configuration. """
root = Path(__file__).with_suffix('') / 'gh_pages_envvars'
env = {
'DOCTR_VERSIONS_MENU_LATEST': 'master',
'DOCTR_VERSIONS_MENU_DEBUG': "true",
'DOCTR_VERSIONS_MENU_VERSIONS': "<branches>, <releases>",
'DOCTR_VERSIONS_MENU_SUFFIX_LATEST': " [latest]",
'DOCTR_VERSIONS_MENU_WRITE_VERSIONS_PY': 'false',
'DOCTR_VERSIONS_MENU_WRITE_INDEX_HTML': 'false',
'DOCTR_VERSIONS_MENU_ENSURE_NO_JEKYLL': 'false',
'DOCTR_VERSIONS_MENU_DOWNLOADS_FILE': '',
'DOCTR_VERSIONS_MENU_WARNING': "post: <post-releases>; outdated: (<releases> < 0.2); prereleased:",
'DOCTR_VERSIONS_MENU_LABEL': "<releases>: {{ folder | replace('v', '', 1) }}; doc-testing: doc; testing: {{ folder }} (latest dev branch)",
}
runner = CliRunner()
caplog.set_level(logging.DEBUG)
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(doctr_versions_menu_command, env=env)
assert result.exit_code == 0
assert (cwd / 'versions.json').is_file()
assert not (cwd / 'versions.py').is_file()
assert not (cwd / 'index.html').is_file()
assert not (cwd / '.nojekyll').is_file()
with (cwd / 'versions.json').open() as versions_json:
versions_data = json.load(versions_json)
assert versions_data == {
'downloads': {
'doc-testing': [],
'master': [],
'testing': [],
'v0.1.0': [],
'v0.2.0': [],
'v1.0.0': [],
'v1.0.0+dev': [],
'v1.0.0-dev0': [],
'v1.0.0-post1': [],
'v1.0.0-rc1': [],
'v1.1.0-rc1': [],
},
'folders': [
'doc-testing',
'master',
'testing',
'v0.1.0',
'v0.2.0',
'v1.0.0',
'v1.0.0+dev',
'v1.0.0-dev0',
'v1.0.0-post1',
'v1.0.0-rc1',
'v1.1.0-rc1',
],
'labels': {
'v0.1.0': '0.1.0',
'v0.2.0': '0.2.0',
'v1.0.0-dev0': '1.0.0-dev0',
'v1.0.0-rc1': '1.0.0-rc1',
'v1.0.0': '1.0.0',
'v1.0.0+dev': '1.0.0+dev',
'v1.0.0-post1': '1.0.0-post1',
'v1.1.0-rc1': '1.1.0-rc1',
'doc-testing': 'doc',
'master': 'master [latest]',
'testing': 'testing (latest dev branch)',
},
'latest': 'master',
'versions': [
'v1.1.0-rc1',
'v1.0.0-post1',
'v1.0.0+dev',
'v1.0.0',
'v1.0.0-rc1',
'v1.0.0-dev0',
'v0.2.0',
'v0.1.0',
'testing',
'master',
'doc-testing',
],
'warnings': {
'doc-testing': ['unreleased'],
'master': ['unreleased'],
'testing': ['unreleased'],
'v0.1.0': ['outdated'],
'v0.2.0': [],
'v1.0.0': [],
'v1.0.0+dev': ['unreleased'],
'v1.0.0-dev0': [],
'v1.0.0-post1': ['post'],
'v1.0.0-rc1': [],
'v1.1.0-rc1': [],
},
}
| 12,158
|
def train_mnist_classifier(lr=0.001, epochs=50, model_dir='.'):
"""train mnist classifier for inception score"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {0!s}".format(device))
train_loader = load_mnist(batchSize=100, train=True)
test_loader = load_mnist(batchSize=100, train=False)
model = LeNet().to(device)
def evaluate():
model.eval()
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.argmax(dim=1)
correct += pred.eq(target).sum().item()
accuracy = 100. * correct / len(test_loader.dataset)
return accuracy
train_criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# training loop
print('Started training...')
best_test_acc = 0.0
best_test_epoch = 0
for epoch in range(1, epochs + 1):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data).squeeze(1)
loss = train_criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % 20 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
test_acc = evaluate()
print('Test Accuracy: {:.2f}\n'.format(test_acc))
if test_acc > best_test_acc:
best_test_epoch = epoch
best_test_acc = test_acc
torch.save(model.state_dict(), os.path.join(model_dir, "mnist_classifier.pt"))
print('Finished.')
print('Best: Epoch: {}, Test-Accuracy: {:.4f}\n'.format(best_test_epoch, best_test_acc))
| 12,159
|
def datetime_now_filename_string():
"""create a string representation for now() for use as part of the MHL filename"""
return datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d_%H%M%S")
| 12,160
|
def test_remote_backup(host):
"""Check if the remote backup runs successfully"""
cmd = host.run("/usr/local/bin/remote-backup.sh")
assert cmd.succeeded
| 12,161
|
def generate_simulation_dataset(path, runs, **kawrgs):
"""Generate and save a simulation dataset.
Parameters
----------
path : str
Root path where simulation data will be stored.
runs : int, array
If int then number of runs to use. If array then
array must be of one dim more than simulation grid
dim.
kawrgs :
run_multiple_sources kwargs.
Returns
-------
dataset : zarr.hierarchy.Group
Simulation dataset.
"""
# Convert path to pathlib path
path = Path(path)
# Create dataset
dataset = zarr.open(path.as_posix(), mode='w')
if not isinstance(runs, int):
full_speed_array = runs
runs = len(runs)
else:
full_speed_array = None
# Add dataset attributes
dataset.attrs['waver'] = True
dataset.attrs['dataset'] = True
dataset.attrs['runs'] = runs
# Add simulation attributes based on kwargs and defaults
parameters = inspect.signature(run_multiple_sources).parameters
for param, value in parameters.items():
if param in kawrgs:
dataset.attrs[param] = kawrgs[param]
else:
dataset.attrs[param] = value.default
# Initialize speed and wave arrays
speed_array = None
wave_array = None
# Move through runs
for run in tqdm(range(runs), leave=False):
if full_speed_array is not None:
kawrgs['speed'] = full_speed_array[run]
wave, speed = run_multiple_sources(**kawrgs)
if speed_array is None:
speed_array = dataset.zeros('speed', shape=(runs, ) + speed.shape, chunks=(1,) + (64,) * speed.ndim)
if wave_array is None:
wave_array = dataset.zeros('wave', shape=(runs, ) + wave.shape, chunks=(1,) + (64,) * wave.ndim)
speed_array[run] = speed
wave_array[run] = wave
return dataset
| 12,162
|
def num_sites(sequence, rule, **kwargs):
"""Count the number of sites where `sequence` can be cleaved using
the given `rule` (e.g. number of miscleavages for a peptide).
Parameters
----------
sequence : str
The sequence of a polypeptide.
rule : str or compiled regex
A regular expression describing the site of cleavage. It is recommended
to design the regex so that it matches only the residue whose
C-terminal bond is to be cleaved. All additional requirements should be
specified using `lookaround assertions
<http://www.regular-expressions.info/lookaround.html>`_.
labels : list, optional
A list of allowed labels for amino acids and terminal modifications.
Returns
-------
out : int
Number of cleavage sites.
"""
return len(_cleave(sequence, rule, **kwargs)) - 1
| 12,163
|
def get_packages(code: str) -> defaultdict:
"""Extracts the packages that were included in the file being inspected.
Source for this code: https://stackoverflow.com/questions/2572582/
Example:
input:
'from collections import Counter\n
import kivy\n
from stats import median as stats_median\n'
output:
defaultdict(<class 'list'>,
{'import_name': ['collections', 'kivy', 'stats'],
'import_from': ['Counter', 'median']}
)
"""
instructions = get_instructions(code)
import_instructions = [i for i in instructions if "IMPORT" in i.opname]
imports = defaultdict(list)
for instr in import_instructions:
imports[instr.opname.lower()].append(instr.argval)
return imports
| 12,164
|
def beam_name():
"""Session level fixture for beam path."""
return str(beam_path)
| 12,165
|
def get_mv_sandwich(a_blade_indices, b_blade_indices, signature, prod="gp"):
"""a b ~a"""
out_indices = []
out_blade_indices = []
out_signs = []
out_indices = []
indices_a = []
indices_b = []
indices_a_r = []
blade_to_index = {}
for (i_a, index_a), (i_b, index_b), (i_a_r, index_a_r) in itertools.product(
enumerate(a_blade_indices),
enumerate(b_blade_indices),
enumerate(reverse_indices(a_blade_indices))
):
out_sign_1, out_index_1 = reduce_bases(index_a, index_b, signature)
out_sign_2, out_index = reduce_bases(out_index_1, index_a_r, signature)
out_sign = out_sign_1 * out_sign_2
if out_sign != 0 and (
prod == "gp" or
(prod == "op" and len(out_index) == abs(len(index_a) + len(index_b))) or
(prod == "ip" and len(out_index) == abs(len(index_a) - len(index_b)))
):
out_signs.append(out_sign)
indices_a.append(i_a)
indices_b.append(i_b)
indices_a_r.append(i_a_r)
if out_index in blade_to_index:
out_indices.append(blade_to_index[out_index])
else:
blade_to_index[out_index] = len(blade_to_index)
out_indices.append(blade_to_index[out_index])
out_blade_indices.append(out_index)
if len(out_indices) == 0:
def _values_mv_sandwich(a_values, b_values):
return jnp.zeros((), dtype=jnp.float32)
else:
out_size = max(out_indices) + 1
def _values_mv_sandwich(a_values, b_values):
out_batch_shape = jnp.broadcast_shapes(
a_values.shape[1:], b_values.shape[1:]
)
out_values = jnp.zeros(
[out_size, *out_batch_shape], dtype=jnp.float32
)
for index_a, index_b, index_a_r, out_sign, out_index in zip(indices_a, indices_b, indices_a_r, out_signs, out_indices):
out_values = out_values.at[out_index].add(
out_sign * a_values[index_a] * b_values[index_b] * a_values[index_a_r]
)
return out_values
_values_mv_sandwich_jit = jax.jit(_values_mv_sandwich)
return _values_mv_sandwich_jit, tuple(out_blade_indices)
| 12,166
|
def clear():
"""Clear the buffer."""
global _pixel_map
_pixel_map = deepcopy(_empty_map)
| 12,167
|
def ldns_dnssec_create_nsec3(*args):
"""LDNS buffer."""
return _ldns.ldns_dnssec_create_nsec3(*args)
| 12,168
|
def S_difference_values(_data_lista, _data_listb):
"""
Returns new data samples where values are transformed by transformer values.
"""
d_data = []
dsa = len(_data_lista)
dsb = len(_data_listb)
if dsa != dsb:
return []
for i in range(dsa):
d_data.append(_data_lista[i] - _data_listb[i])
return d_data
| 12,169
|
def parseFixedZone(s):
"""Convert a +hhmm or -hhmm zone suffix.
[ s is a string ->
if s is a time zone suffix of the form "+hhmm" or "-hhmm" ->
return that zone information as an instance of a class
that inherits from datetime.tzinfo
else -> raise SyntaxError ]
"""
#-- 1 --
if s.startswith('+'): sign = 1
elif s.startswith('-'): sign = -1
else:
raise SyntaxError("Expecting zone modifier as {0}hhmm: "
"'{1}'".format(s[0], s))
#-- 2 --
# [ if s[1:] matches HHMM_PAT ->
# hours := the HH part as an int
# minutes := the MM part as an int
# else -> raise SyntaxError ]
rawHHMM = s[1:]
m = HHMM_PAT.match(rawHHMM)
if m is None:
raise SyntaxError("Expecting zone modifier as {0}HHMM: "
"'{1}'".format(s[0], s))
else:
hours = int(rawHHMM[:2])
minutes = int(rawHHMM[2:])
#-- 3 --
return FixedZone(sign*hours, sign*minutes, s)
| 12,170
|
def tica_eigenvalues_plot(tica, num=12, plot_file=None):
"""
Plots the highest eigenvalues over the number of the time-lagged independent components.
Parameters
----------
tica : TICA obj
Time-lagged independent components information.
num : int, default = 12
Number of eigenvalues to plot.
plot_file : str, optional, default = None
Path and name of the file to save the plot.
"""
# Plot eigenvalues over component numbers.
fig,ax = plt.subplots(1, 1, figsize=[4,3], dpi=300)
componentnr = np.arange(num)+1
eigenvalues = tica.eigenvalues[:num]
ax.bar(componentnr, eigenvalues)
ax.set_xlabel('component number')
ax.set_ylabel('eigenvalue')
fig.tight_layout()
# Save the figure to a file.
if plot_file: fig.savefig(plot_file, dpi=300)
return componentnr, eigenvalues
| 12,171
|
def latex2svg(code, params=default_params, working_directory=None):
"""Convert LaTeX to SVG using dvisvgm and scour (or svgo).
Parameters
----------
code : str
LaTeX code to render.
params : dict
Conversion parameters.
working_directory : str or None
Working directory for external commands and place for temporary files.
Returns
-------
dict
Dictionary of SVG output and output information:
* `svg`: SVG data
* `width`: image width in *em*
* `height`: image height in *em*
* `valign`: baseline offset in *em*
"""
if working_directory is None:
with TemporaryDirectory() as tmpdir:
return latex2svg(code, params, working_directory=tmpdir)
# Caution: TeX & dvisvgm work with TeX pt (1/72.27"), but we need DTP pt (1/72")
# so we need a scaling factor for correct output sizes
# dvisvgm will produce a viewBox in DTP pt but SHOW TeX pt in its output.
scaling = 1.00375 # (1/72)/(1/72.27)
fontsize = params['fontsize']
document = (params['template']
.replace('{{ preamble }}', params['preamble'])
.replace('{{ fontsize }}', str(fontsize))
.replace('{{ code }}', code))
with open(os.path.join(working_directory, 'code.tex'), 'w') as f:
f.write(document)
# Run LaTeX and create DVI file
try:
ret = subprocess.run(shlex.split(params['latex_cmd']+' code.tex'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory)
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('latex not found')
# Add LIBGS to environment if supplied
env = os.environ.copy()
if params['libgs']:
env['LIBGS'] = params['libgs']
# Convert DVI to SVG
dvisvgm_cmd = params['dvisvgm_cmd'] + ' --scale=%f' % params['scale']
dvisvgm_cmd += ' code.dvi'
try:
ret = subprocess.run(shlex.split(dvisvgm_cmd),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory, env=env)
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('dvisvgm not found')
# Parse dvisvgm output for size and alignment
def get_size(output):
regex = r'\b([0-9.]+)pt x ([0-9.]+)pt'
match = re.search(regex, output)
if match:
return (float(match.group(1)) / fontsize * scaling,
float(match.group(2)) / fontsize * scaling)
else:
return None, None
def get_measure(output, name):
regex = r'\b%s=([0-9.e-]+)pt' % name
match = re.search(regex, output)
if match:
return float(match.group(1)) / fontsize * scaling
else:
return None
output = ret.stderr.decode('utf-8')
width, height = get_size(output)
depth = get_measure(output, 'depth')
# no baseline offset if depth not found
if depth is None:
depth = 0.0
# Modify SVG attributes, to a get a self-contained, scaling SVG
from lxml import etree
# read SVG, discarding all comments ("<-- Generated by… -->")
parser = etree.XMLParser(remove_comments=True)
xml = etree.parse(os.path.join(working_directory, 'code.svg'), parser)
svg = xml.getroot()
svg.set('width', f'{width:.6f}em')
svg.set('height', f'{height:.6f}em')
svg.set('style', f'vertical-align:{-depth:.6f}em')
xml.write(os.path.join(working_directory, 'code.svg'))
# Run optimizer to get a minified oneliner with (pseudo-)unique Ids
# generate random prefix using ASCII letters (ID may not start with a digit)
import random, string
prefix = ''.join(random.choice(string.ascii_letters) for n in range(3))
svgo_cmd = (params['svgo_cmd']
.replace('{{ infile }}', 'code.svg')
.replace('{{ outfile }}', 'optimized.svg'))
svgo_config = (params['svgo_config']
.replace('{{ prefix }}', prefix))
# with scour, input & output files must be different
scour_cmd = (params['scour_cmd']
.replace('{{ prefix }}', prefix+'_')
.replace('{{ infile }}', 'code.svg')
.replace('{{ outfile }}', 'optimized.svg'))
if params['optimizer'] == 'scour':
# optimize SVG using scour (default)
try:
ret = subprocess.run(shlex.split(scour_cmd),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory, env=env)
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('scour not found')
with open(os.path.join(working_directory, 'optimized.svg'), 'r') as f:
svg = f.read()
elif params['optimizer'] == 'svgo':
# optimize SVG using svgo (optional)
# write svgo params file
with open(os.path.join(working_directory, 'svgo.config.js'), 'w') as f:
f.write(svgo_config)
try:
ret = subprocess.run(shlex.split(svgo_cmd),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory, env=env)
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('svgo not found')
with open(os.path.join(working_directory, 'optimized.svg'), 'r') as f:
svg = f.read()
else:
# no optimization, just return SVG
with open(os.path.join(working_directory, 'code.svg'), 'r') as f:
svg = f.read()
return {'svg': svg, 'valign': round(-depth,6),
'width': round(width,6), 'height': round(height,6)}
| 12,172
|
def _split_train_test(features, labels, train_set, random_seed):
"""Split the dataset into training and test sets.
Parameters
----------
features : pandas.DataFrame
Features of the dataset events.
labels : pandas.DataFrame
Labels of the dataset events.
train_set : {float, list-like}
If float, it is the fraction of objects that will be used as training
set. If list, it is the IDs of the objects to use as training set.
random_seed : {int, RandomState instance}
Random seed or random state instance to use. It allows reproducible
results.
Returns
-------
X_train : pandas.DataFrame
Features of the events with which to train the classifier.
X_test : pandas.DataFrame
Features of the events with which to test the classifier.
y_train : pandas.core.series.Series
Labels of the events with which to train the classifier.
y_test : pandas.core.series.Series
Labels of the events with which to test the classifier.
"""
if np.isscalar(train_set): # `train_set` was the size of training set
X_train, X_test, y_train, y_test = model_selection.train_test_split(
features, labels, train_size=train_set,
random_state=random_seed)
else: # `train_set` was a list of object names
X_train = features.loc[train_set]
y_train = labels.loc[train_set]
is_not_train_set = ~ features.index.isin(train_set)
X_test = features[is_not_train_set]
y_test = labels[is_not_train_set]
return X_train, X_test, y_train, y_test
| 12,173
|
def parse_config(requesting_file_path, is_plugin=True):
"""
Parse the config files for a given sideboard plugin, or sideboard itself.
It's expected that this function is called from one of the files in the
top-level of your module (typically the __init__.py file)
Args:
requesting_file_path (str): The __file__ of the module requesting the
parsed config file. An example value is::
/opt/sideboard/plugins/plugin-package-name/plugin_module_name/__init__.py
the containing directory (here, `plugin_module_name`) is assumed
to be the module name of the plugin that is requesting a parsed
config.
is_plugin (bool): Indicates whether a plugin is making the request or
Sideboard itself is making the request. If True (default) add
plugin-relevant information to the returned config. Also, treat it
as if it's a plugin
Returns:
ConfigObj: The resulting configuration object.
"""
module_dir, root_dir = get_module_and_root_dirs(requesting_file_path, is_plugin)
specfile = os.path.join(module_dir, 'configspec.ini')
spec = configobj.ConfigObj(specfile, interpolation=False, list_values=False, encoding='utf-8', _inspec=True)
# to allow more/better interpolations
root_conf = ['root = "{}"\n'.format(root_dir), 'module_root = "{}"\n'.format(module_dir)]
temp_config = configobj.ConfigObj(root_conf, interpolation=False, encoding='utf-8')
for config_path in get_config_files(requesting_file_path, is_plugin):
# this gracefully handles nonexistent files
temp_config.merge(configobj.ConfigObj(config_path, encoding='utf-8', interpolation=False))
# combining the merge files to one file helps configspecs with interpolation
with NamedTemporaryFile(delete=False) as config_outfile:
temp_config.write(config_outfile)
temp_name = config_outfile.name
config = configobj.ConfigObj(temp_name, encoding='utf-8', configspec=spec)
validation = config.validate(Validator(), preserve_errors=True)
unlink(temp_name)
if validation is not True:
raise ConfigurationError('configuration validation error(s) (): {!r}'.format(
configobj.flatten_errors(config, validation))
)
if is_plugin:
sideboard_config = globals()['config']
config['plugins'] = deepcopy(sideboard_config['plugins'])
if 'rpc_services' in config:
from sideboard.lib._services import _register_rpc_services
_register_rpc_services(config['rpc_services'])
if 'default_url' in config:
priority = config.get('default_url_priority', 0)
if priority >= sideboard_config['default_url_priority']:
sideboard_config['default_url'] = config['default_url']
return config
| 12,174
|
def generate_enc_keypair():
"""
Generate Curve25519 keypair
:returns tuple: A byte pair containing the encryption key and decryption
key.
"""
private_key = PrivateKey.generate()
return private_key.public_key.encode(), private_key.encode()
| 12,175
|
def filter_molecular_components(
components: List[Component],
) -> Tuple[List[Component], List[Component]]:
"""Separate list of components into molecular and non-molecular components.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`.
Returns:
The filtered components as a tuple of ``(molecular_components,
other_components)``.
"""
molecular_components = [c for c in components if c["dimensionality"] == 0]
other_components = [c for c in components if c["dimensionality"] != 0]
return molecular_components, other_components
| 12,176
|
async def test_client_proto_packet_received_log(caplog: Any) -> None:
"""
Ensure we properly process received packets
"""
proto = tpt.SNMPClientProtocol(b"SNMP-packet")
with caplog.at_level(logging.DEBUG):
proto.datagram_received(b"fake-packet", ("192.0.2.1", 42))
assert "66 61 6b" in caplog.text, "hexdump not found in logs"
assert "192.0.2.1:42" in caplog.text, "remote endpoint not in logs"
| 12,177
|
def enter_card_details(driver, ccn, exp, cvc):
"""set credit card number, valid till. cvc"""
driver.find_element_by_xpath("//input[@placeholder='Card number']").send_keys(ccn)
driver.find_element_by_xpath("//input[@placeholder='MM / YY']").send_keys(exp)
driver.find_element_by_xpath("//input[@placeholder='CVC']").send_keys(cvc)
| 12,178
|
def argument(name, type):
"""
Set the type of a command argument at runtime. This is useful for more
specific types such as mitmproxy.types.Choice, which we cannot annotate
directly as mypy does not like that.
"""
def decorator(f: types.FunctionType) -> types.FunctionType:
assert name in f.__annotations__
f.__annotations__[name] = type
return f
return decorator
| 12,179
|
def add_menu_item(method, label, **kwargs):
# type: (callable, Union[str, int], Any) -> None
"""wrapper for xbmcplugin.addDirectoryItem"""
args = kwargs.get("args", {})
label = ku.localize(label) if isinstance(label, int) else label
list_item = ListItem(label)
list_item.setArt(kwargs.get("art"))
list_item.setInfo("video", kwargs.get("info"))
if method == search and "q" in args:
list_item.addContextMenuItems([(
ku.localize(32019),
"XBMC.RunPlugin({})".format(plugin.url_for(search, delete=True, q=label))
)])
if method == play_film:
list_item.setProperty("IsPlayable", "true")
xbmcplugin.addDirectoryItem(
plugin.handle,
plugin.url_for(method, **args),
list_item,
kwargs.get("directory", True))
| 12,180
|
def init_logging(human_log_level: str = "info") -> None:
"""Init the `logging.Logger`.
It should be called only once in the app (e.g. in `main`). It sets
the log_level to one of `HUMAN_LOG_LEVELS`. And sets up a handler
for stderr. The logging level is propagated to all subprocesses.
"""
_new_logger(_human_log_level_to_int(human_log_level))
_set_log_formatter()
| 12,181
|
async def async_google_actions_request_sync(cloud):
"""Request a Google Actions sync request."""
return await cloud.websession.post(
f"{cloud.google_actions_report_state_url}/request_sync",
headers={AUTHORIZATION: f"Bearer {cloud.id_token}"},
)
| 12,182
|
def convert_time_string_to_secs(string: str) -> int:
"""
Takes a string in the format '1h30m25s' and converts it to an integer
in seconds. This functions uses the regular expression RE_CONVERT_TIME
above for matching the string.
"""
match = regexp_time.match(string)
if not match:
raise ValueError("String {0} has an invalid representation")
h, m, s, ms, us = match.groups()
h = int(h) if h else 0
m = int(m) if m else 0
s = int(float(s)) if s else 0
total_time_seconds = h*3600 + m*60 + s
return total_time_seconds
| 12,183
|
def polar_cube(c, index, n=512, interp='cubic'):
"""VIMS cube polar projected.
Parameters
----------
c: pyvims.VIMS
Cube to interpolate.
index: int, float, str, list, tuple
VIMS band or wavelength to plot.
n: int, optional
Number of pixel for the grid interpolation.
interp: str, optional
Interpolation method
"""
# Pixel data
data = c[index]
# Choose which pole to display
n_pole = c.sc_lat > 0
# Pixel positions in polar projection
pixels = polar_proj(c.ground_lon, c.ground_lat, n_pole=n_pole)
# Contour positions in polar projection
contour = polar_proj(*c.clonlat, n_pole=n_pole)
# Interpolate data (with mask)
z, grid, extent = polar_interp(pixels, data, contour, n=n, method=interp)
return z, grid, extent, pixels, contour, n_pole
| 12,184
|
def _load_surf_files_gifti_gzip(surf_file):
"""Load surface data Gifti files which are gzipped. This
function is used by load_surf_mesh and load_surf_data for
extracting gzipped files.
Part of the code can be removed while bumping nibabel 2.0.2
"""
with gzip.open(surf_file) as f:
as_bytes = f.read()
if LooseVersion(nibabel.__version__) >= LooseVersion('2.1.0'):
parser = gifti.GiftiImage.parser()
parser.parse(as_bytes)
gifti_img = parser.img
else:
from nibabel.gifti.parse_gifti_fast import ParserCreate, Outputter
parser = ParserCreate()
parser.buffer_text = True
out = Outputter()
parser.StartElementHandler = out.StartElementHandler
parser.EndElementHandler = out.EndElementHandler
parser.CharacterDataHandler = out.CharacterDataHandler
parser.Parse(as_bytes)
gifti_img = out.img
return gifti_img
| 12,185
|
def WriteTo(linelist, outfile):
"""writes the matches to a CSV file with name outfile"""
file = open(str(outfile),'w')
i=0
for line in linelist:
file.write(linelist[i].borough+ ',')
file.write(linelist[i].block+ ',')
file.write(linelist[i].lot+ ',')
file.write(linelist[i].numbers+ ',')
file.write(linelist[i].spaced_numbers+ ',')
file.write(linelist[i].letters+ ',')
file.write(linelist[i].BBL+ ',')
file.write(linelist[i].x+ ',')
file.write(linelist[i].y+ ',')
file.write(linelist[i].E_des)
i+=1
file.close()
| 12,186
|
def get_platform_system():
"""return platform.system
platform module has many regexp, so importing it is slow...
import only if required
"""
import platform
return platform.system()
| 12,187
|
def populate_sql(
sql: sqlparse.sql.Statement, example: NLToSQLExample, anonymize_values: bool
) -> bool:
"""
Creates a sequence of output / decoder actions from a raw SQL query.
Args:
sql: The SQL query to convert.
example: The NLToSQLExample object to add output actions.
anonymize_values: Whether to anonymize values by replacing with a placeholder.
Raises:
ParseError: if the SQL query can't be parsed.
Returns:
Boolean indicating whether all actions copying values from the input utterance were successfully completed.
"""
successful_copy = True
for item in sql:
if item.ttype == sqlparse.tokens.Text.Whitespace:
continue
if _is_punctuation(item) and (item.value in ("(", ")")):
_add_simple_step(item, example)
continue
if _is_punctuation(item) and (item.value in (",",)):
_add_simple_step(item, example)
continue
if _is_parenthesis(item):
successful_copy = (
populate_sql(item, example, anonymize_values) and successful_copy
)
continue
if _is_wildcard(item):
_add_simple_step(item, example)
continue
if _is_select(item) or _is_from(item):
_add_simple_step(item, example)
continue
if _is_where(item):
successful_copy = (
_parse_where(item, example, anonymize_values) and successful_copy
)
continue
if _is_function(item):
successful_copy = (
_parse_function(item, example, anonymize_values) and successful_copy
)
continue
if _is_identifier(item):
successful_copy = (
_parse_identifier(item, example, anonymize_values) and successful_copy
)
continue
if _is_identifier_list(item):
successful_copy = (
_parse_identifier_list(item, example, anonymize_values)
and successful_copy
)
continue
if _is_keyword(item) and item.value.lower() in (
"group",
"order",
"by",
"having",
"order by",
"group by",
):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in (
"count",
"avg",
"min",
"max",
"sum",
):
_add_simple_step(item, example)
continue
if _is_operation(item):
successful_copy = (
_parse_operation(item, example, anonymize_values) and successful_copy
)
continue
if _is_keyword(item) and item.value.lower() in ("between", "and", "or"):
_add_simple_step(item, example)
continue
if _is_order(item):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("is", "not null", "in", "not"):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("distinct",):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("limit",):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("join", "on"):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("intersect", "union"):
_add_simple_step(item, example)
continue
if _is_keyword(item) and item.value.lower() in ("except",):
_add_simple_step(item, example)
continue
if _is_integer(item) and example.gold_sql_query.actions[
len(example.gold_sql_query.actions) - 1
].symbol in ("limit", "between", "and"):
prev_len = len(example.gold_sql_query.actions)
successful_copy = (
_add_simple_value(item, example, anonymize_values) and successful_copy
)
if len(example.gold_sql_query.actions) == prev_len:
raise ValueError(
"Gold query did not change length when adding simple value!"
)
continue
if _is_comparison(item):
successful_copy = (
_parse_comparison(item, example, anonymize_values) and successful_copy
)
continue
_debug_state(item, example)
raise ParseError("Incomplete _parse_sql")
return successful_copy
| 12,188
|
def factorial(n):
"""Stop sympy blindly calculating factorials no matter how large.
If 'n' is a number of some description, ensure that it is smaller than
a cutoff, otherwise sympy will simply evaluate it, no matter how long that
may take to complete!
- 'n' should be a sympy object, that sympy.factorial(...) can use.
"""
if isinstance(n, (Integer, Float, Rational)) and n > 50:
raise ValueError("[Factorial]: Too large integer to compute factorial effectively!")
else:
return sympy.factorial(n)
| 12,189
|
def getSupplier(num):
"""" get the supplier for a card number
Attributes:
@num: card number
"""
supplier = str()
for key, value in suppliers.items():
if bool(re.match(value, num)):
supplier = key
break
if supplier == "":
supplier = "Ukwnow"
return supplier
| 12,190
|
def load_torch_hub_model(repo: str, model: str, *args, **kwargs):
"""Tries to load a torch hub model and handles different exceptions that could be raised.
Args:
repo: The GitHub repository containing the models.
model: The model name to download.
max_retries: The maximum number of tries to download the model.
Returns:
The downloaded torch model.
"""
error: Optional[Exception] = None
for _ in range(TORCH_HUB_DOWNLOAD_MAX_RETRIES + 1):
try:
try:
return torch.hub.load(
repo,
model,
*args,
**kwargs,
)
except RuntimeError:
return torch.hub.load(
repo,
model,
*args,
**kwargs,
force_reload=True,
)
except Exception as e:
error = e
assert error is not None
raise error
| 12,191
|
def report_unmerged(unmerged):
"""
Stores a list of deletable unmerged files in the orphan table
:param list unmerged: A list of tuples,
where each tuple is a name, info dict pair
"""
_report_files('unmerged', unmerged)
| 12,192
|
def compute_medoid(data):
"""
Get medoid of data
Parameters
----------
data: ndarray
Data points
Returns
------
medoid: ndarray
Medoid
"""
dist_mat = pairwise_distances(data)
return data[np.argmin(dist_mat.sum(axis=0))]
| 12,193
|
def int_sphere(fx, xgrid):
"""
Computes integrals over the sphere defined by the logarithmic
grid provided as input
Parameters
----------
fx : array_like
The function (array) to be integrated
xgrid : ndarray
The logarithmic radial grid
Returns
-------
I_sph : float
The value of the integrand
Notes
-----
The integral formula is given by
.. math:: I = 4 \pi \int \dd{x} e^{3x} f(x)
"""
func_int = 4.0 * pi * np.exp(3.0 * xgrid) * fx
I_sph = np.trapz(func_int, xgrid)
return I_sph
| 12,194
|
def run(
func,
args=[],
kwargs={},
service="lambda",
capture_response=False,
remote_aws_lambda_function_name=None,
remote_aws_region=None,
**task_kwargs
):
"""
Instead of decorating a function with @task, you can just run it directly.
If you were going to do func(*args, **kwargs), then you will call this:
import zappa.asynchronous.run
zappa.asynchronous.run(func, args, kwargs)
If you want to use SNS, then do:
zappa.asynchronous.run(func, args, kwargs, service='sns')
and other arguments are similar to @task
"""
lambda_function_name = remote_aws_lambda_function_name or os.environ.get(
"AWS_LAMBDA_FUNCTION_NAME"
)
aws_region = remote_aws_region or os.environ.get("AWS_REGION")
task_path = get_func_task_path(func)
return ASYNC_CLASSES[service](
lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response,
**task_kwargs
).send(task_path, args, kwargs)
| 12,195
|
def run_webserver(destination_root_dir):
""" Run a local """
destination_root_dir = destination_root_dir
if destination_root_dir.startswith('/'):
destination_root_dir = destination_root_dir[1:]
if destination_root_dir.endswith('/'):
destination_root_dir = destination_root_dir[:-1]
app = Flask(__name__)
@app.route('/')
@app.route('/<path:filename>')
def serve_static_html(filename='index.html'):
""" Serve static HTML files
:type filename: str
:param filename: Path to the static HTML file
"""
if filename.startswith(destination_root_dir):
filename = filename.replace('{}/'.format(destination_root_dir), '')
return redirect('/{}'.format(filename))
response = make_response(
send_from_directory('/{}'.format(destination_root_dir), filename))
response.cache_control.no_cache = True
return response
app.run()
| 12,196
|
def sym_to_elm(symbols: Union[str, List, np.ndarray],
order: Union[np.ndarray, List[str]]):
"""Transform symbols to elements."""
if not isinstance(order, list):
order = order.tolist()
if not isinstance(symbols, (str, list)):
symbols = symbols.tolist()
if isinstance(symbols, str):
if symbols in order:
return order.index(symbols)
else:
return -1
else:
return np.array([sym_to_elm(s, order) for s in symbols])
| 12,197
|
def set_custom_field(
custom_field_id: str = None,
person_id: str = None,
owner_id: str = None,
term_id: str = None,
value: str = None,
option_index: str = None):
"""
Sets a custom field value for a particular person, organization, or donation.
:param custom_field_id: The numeric ID of the custom field you're interested in.
:param person_id: The numeric ID of the person you're interested in.
:param owner_id: The numeric ID of object you're interested in, if they are not a person.
:param term_id: The numeric ID of the term you're interested in.
:param value: The value for this field.
:param option_index: For RADIOs and SELECTs, you can pass in the index of the selected option.
:returns: String containing xml or an lxml element.
"""
return get_anonymous(
'setCustomField',
custom_field_id=custom_field_id,
person_id=person_id,
owner_id=owner_id,
term_id=term_id,
value=value,
option_index=option_index)
| 12,198
|
def ixn_is_increases_activity(ixn: ChemGeneIxn):
"""Checks if the interaction results in the decrease of the activity of the protein of the gene
:param pyctd.manager.models.ChemGeneIxn ixn: A chemical-gene interaction
:rtype: bool
"""
return _ixn_is_changes_protein(ixn, 'increases^activity')
| 12,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.