content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def send(instance, client, formatter, format_config, messages, gap, batch):
"""sends data and prints the time it took to send it
:param instance: transport class instance
:param client: client to use to send send
:param string format: formatter to use
:param dict format_config: formatter configuration to use
:param int messages: number of messages to send
:param float gap: gap in seconds between 2 messages
:param int batch: number of messages per batch
"""
message_count = 0
lgr.debug('configuring formatter...')
# get formatter instance
# TODO: (IMPRV) move formatter instance definition to function inside
# TODO: (IMPRV) the current function and add _
if hasattr(forms, formatter):
formatter_instance = getattr(forms, formatter)(format_config)
else:
lgr.error('could not find formatter: {0}. please make sure the '
'formatter you\'re calling exists.'.format(formatter))
raise FeedrError('missing formatter')
# and get the current time
start_time = get_current_time()
lgr.debug('start time is: {0}'.format(start_time))
lgr.info('transporting data... EN GARDE!')
while True:
# generate the data from the formatter
data = [formatter_instance.generate_data() for i in xrange(batch)]
# and send the data through the relevant transport
instance.send(client, data)
message_count += batch
# check if the number of messages sent are less than the desired amount
if message_count < messages or messages == 0:
# and sleep the desired amount of time.. zzz zz zZZ zZZzzzz
sleep(gap)
else:
break
# just to get some feedback during execution
if not message_count % (1 / gap):
lgr.info('{0} data pieces written. NICE!'.format(message_count))
# then get the current time once more
end_time = get_current_time()
lgr.debug('end time is: {0}'.format(end_time))
# and the elapsed time
elapsed_time = end_time - start_time
# meH!
throughput, seconds = calculate_throughput(elapsed_time, messages)
# TODO: (FEAT) add the option to send the throughput as well to benchmark
# TODO: (FEAT) the transport process itself.
lgr.info('DONE! (after {0}h ({1} seconds) with '
'throughput: {2} pieces/sec. now you can go for coffee.)'.format(
elapsed_time, seconds, throughput))
try:
# create a pretty table to write the statistical data to
# TODO: (IMPRV) move this to generator function.
data = instance.get_data()
lgr.info('statistical data:\n {0}'.format(data))
except AttributeError:
lgr.debug(
'statistical data not implemented for chosen transport.')
# TODO: (IMPRV) why is this here?
return
| 5,338,700
|
def str2num(s):
"""Convert string to int or float number.
Parameters
----------
s : string
String representing a number.
Returns
-------
Number (int or float)
Raises
------
TypeError
If `s` is not a string.
ValueError
If the string does not represent a (float or int) number.
"""
try:
x = float(s)
if x.is_integer():
return int(x)
else:
return x
except ValueError:
raise ValueError("'s' does not represent a number (int or float)")
| 5,338,701
|
def forward_signal(signum, proc):
"""Forward signal to child."""
if not proc.poll():
proc.send_signal(signum)
| 5,338,702
|
def get_html_templates_path():
"""
Return path to ABlog templates folder.
"""
pkgdir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(pkgdir, "templates")
| 5,338,703
|
def new_log(infile_history=None, extra_notes=None, git_repo=None):
"""Create a new command line log/history.
Kwargs:
infile_history (dict): keys are input file names and values are the logs for those files
extra_notes (list): List containing strings of extra information (output is one list item per line)
git_repo (str): Location of git repository associated with script executed at command line
Returns:
str. Command line log
"""
log = ''
current_entry = get_current_entry(git_repo=git_repo)
log += current_entry + '\n'
if extra_notes:
log += 'Extra notes: \n'
for line in extra_notes:
log += line + '\n'
if infile_history:
assert type(infile_history) == dict
nfiles = len(list(infile_history.keys()))
for fname, history in infile_history.items():
if nfiles > 1:
log += 'History of %s: \n %s \n' %(fname, history)
else:
log += '%s \n' %(history)
return log
| 5,338,704
|
def pre_delete_category(sender, instance, **kwargs):
"""
Receiver that is called after a category is deleted. Makes associated Air
Quality project inactive.
"""
try:
category = AirQualityCategory.objects.get(category=instance)
category.project.status = 'inactive'
category.project.save()
user = category.project.creator
category.delete()
action = 'deleted'
email_user(
'emails/category_not_active.txt',
'Category %s %s' % (instance.name, action),
user,
action,
instance.project.name,
instance.name
)
except AirQualityCategory.DoesNotExist:
pass
| 5,338,705
|
def validate_export_node(node_label):
"""
Raise a ``UserError`` if there is any reason that nodes with the type
specified by ``node_label`` should not be exported. This m
Args:
node_label (str): string of the node type
Return:
None
Raises:
UserError: if the node cannot be exported
"""
if node_label not in dictionary.schema:
raise UserError("dictionary does not have node with type {}".format(node_label))
category = get_node_category(node_label)
if category in UNSUPPORTED_EXPORT_NODE_CATEGORIES:
raise UserError("cannot export node with category `internal`")
| 5,338,706
|
def generate_permutation(perm, n):
"""
Let's generate all possible permutations
Now it is a SMART generator which can cut dead ends
"""
if len(perm) == n:
# Congrats! U have found a legit solution. Let me make it in [[x1, y1], [x2, y2], [x3, y3] ... [xn, yn]] format
# queens_pos = []
# for x in range(len(perm)):
# queens_pos.append([x, perm[x]])
# print("Final Queens positions:", queens_pos)
global COUNTER
COUNTER += 1
return
# When the for loop ends its iterations -> current instance of `generate_permutation` func also ends or RETURNS
# That helps us to navigate among recursive tree
for k in range(n):
if k not in perm:
perm.append(k)
if can_be_extended_to_a_solution(perm):
generate_permutation(perm, n)
perm.pop()
| 5,338,707
|
def _get_new_args_dict(func, args, kwargs):
"""Build one dict from args, kwargs and function default args
The function signature is used to build one joint dict from args and kwargs and
additional from the default arguments found in the function signature. The order
of the args in this dict is the order of the args in the function signature and
hence the list of args can be used in cases where we can only supply *args, but
we have to work with a mixture of args, kwargs and default args as in
xarray.apply_ufunc in the xarray wrapper.
"""
new_args_dict = OrderedDict()
for i, (arg, parameter) in enumerate(inspect.signature(func).parameters.items()):
if i < len(args):
new_args_dict[arg] = args[i]
elif arg in kwargs.keys():
new_args_dict[arg] = kwargs[arg]
else:
new_args_dict[arg] = parameter.default
return new_args_dict
| 5,338,708
|
def hellinger_funct(x,P,Q):
"""
P,Q should be numpy stats gkde objects
"""
return np.sqrt(P(x) * Q(x))
| 5,338,709
|
def VM_BUILD(*names):
"""build a virtual machine
This task builds a virtual machine image from scratch. This
usually takes some time and may require the original ISO
image and a product key of the operating system.
Run this task without arguments to build images for all
registered VMs.
"""
if names:
vms = [VM.find(name) for name in names]
else:
vms = [vm for vm in VM.list() if vm.missing()]
for vm in vms:
if not vm.missing():
warn("VM is already built: {}", vm.name)
continue
vm.build()
| 5,338,710
|
def main() -> None:
"""Run Program."""
root = tk.Tk()
GUI(root)
print("To begin file upload:\n")
print("1. Load your recipient's public key")
print("2. Select a file or a directory for upload (not both)")
print("3. Write SFTP username, server and port to SFTP Credentials")
print("4. Load your SFTP identity key")
print("5. Click [Encrypt and Upload File(s)] to upload selected file or directory")
print("6. Password for SFTP authentication will be prompted\n")
root.mainloop()
| 5,338,711
|
def print_annotation(name, value, xml):
"""Writes some named bits of information about the current test run."""
if xml:
print escape(name) + " {{{"
print escape(value)
print "}}}"
else:
print name + " {{{"
print value
print "}}}"
| 5,338,712
|
def clean_collection(collection):
"""Iterates through the images in the Collection and remove those that don't exist
on disk anymore
"""
images = collection.images()
number_purged = 0
for image in images:
if not os.path.isfile(image.get_filepath()):
logger.info('Removing Image %s from collection %s', image.get_filepath(), collection)
image.delete()
number_purged = number_purged + 1
return number_purged
| 5,338,713
|
def human_date(date):
""" Return a string containing a nice human readable date/time.
Miss out the year if it's this year
"""
today = datetime.datetime.today()
if today.year == date.year:
return date.strftime("%b %d, %I:%M%P")
return date.strftime("%Y %b %d, %I:%M%P")
| 5,338,714
|
def main():
#
# "a" is a string at index 0 and
#
"""
Lista de palavras onde:
"A" é uma string no índice 0 e
"E" é uma string no índice 4
"""
letters = ["a", "b", "c", "d", "e"]
print(letters)
assert letters[0] == "a"
assert letters[4] == letters[-1] == "e"
"""
Criando uma lista dinamicamente com a função range
"""
lista = list(range(10))
# Exibindo os métodos disponiveis do object list
print(dir(lista))
assert lista == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
Checando se o número 7 está presente na lista
"""
assert 7 in lista
"""
Iterando pelos valores da lista
"""
for n in lista:
print(n)
| 5,338,715
|
def compute_thickness(wmP, kdTreegm, kdTreewm):
"""
This function..
:param wmP:
:param kdTreegm:
:param kdTreewm:
:return:
"""
# Find the closest point to the gray matter surface point
gmIndex = kdTreegm.FindClosestPoint(wmP)
gmP = kdTreegm.GetDataSet().GetPoint(gmIndex)
# compute the distance
# distance from wm point to gm point
dst1 = distance.euclidean(wmP, gmP)
wmIndex = kdTreewm.FindClosestPoint(gmP)
wmP2 = kdTreegm.GetDataSet().GetPoint(wmIndex)
# distnace from gm to closest wm point
dst2 = distance.euclidean(gmP, wmP2)
# average the two distances
thickness = (dst1 + dst2) / float(2)
return thickness
| 5,338,716
|
def sync(args):
"""Synchronize your local repository with the manifest and the real world.
This includes:
- ensures that all projects are cloned
- ensures that they have the correct remotes set up
- fetches from the remotes
- checks out the correct tracking branches
- if the local branch is not dirty and it is a fast-forward update, merges
the remote branch's changes in
Options:
-f - if you have dirty repositories, will blow away changes rather than
failing. This does *not* reset your branch if you have local
*committed* changes.
Process exit code will be 0 if all projects updated correctly.
"""
force = '-f' in args
man = load_manifest()
for (name, project) in man.projects.items():
if not project.is_cloned():
project.clone()
ensure_remotes([])
fetch([])
checkout_branches(args)
retcode = 0
for project in man.projects.values():
if project.is_uptodate():
continue
repo = project.git_repo
if repo.is_workdir_dirty() or repo.is_index_dirty():
if force:
print("Blowing away changes in %s" % project.name, file=sys.stderr)
repo.check_command(['reset', '--hard', 'HEAD'])
else:
print("Not syncing project %s - it is dirty." % project.name, file=sys.stderr)
retcode = 1
continue
(left, right) = project.tracking_status
if left > 0:
print(("Not syncing project %s - you have %d unpushed changes." %
(project.name, left)), file=sys.stderr)
retcode = 1
continue
elif right > 0:
repo.check_command(["merge", project.tracker.remote_ref])
project.set_uptodate()
else:
print("Project %s needs no update" % project.name, file=sys.stderr)
return retcode
| 5,338,717
|
def cytoband_interval():
"""Create test fixture for Cytoband Interval."""
return CytobandInterval(
start="q13.32", end="q13.32"
)
| 5,338,718
|
def _get_proxy_class(request):
""" Return a class that is a subclass of the requests class. """
cls = request.__class__
if cls not in _proxy_classes:
class RequestProxy(cls):
def __init__(self, request):
self.__dict__ = request.__dict__
self.__request = request
def __eq__(self, other):
return self.__request == other
# since we're overriding __eq__ we must override __hash__:
def __hash__(self):
return hash(self.__request)
def finish(self):
return self.__request.finish()
_proxy_classes[cls] = RequestProxy
return _proxy_classes[cls]
| 5,338,719
|
def test_yaml_representation():
"""
Testing the yaml_representation method
"""
name = 'Donut'
description = {'This is an old fasion donut'}
properties = ['eatable', 'pickable']
item = Item(name, description, properties)
assert isinstance(item.yaml_representation(), dict)
| 5,338,720
|
def rebuilt_emoji_dictionaries(filename):
"""
Rebuilds emoji dictionaries, given a csv file with labeled emoji's.
"""
emoji2unicode_name, emoji2sentiment = {}, {}
with open(filename) as csvin:
for emoji in csv.DictReader(csvin):
for key, value in emoji.items():
if key in ('Occurrences', 'Positive', 'Neutral', 'Negative'):
emoji[key] = int(value)
elif key in ('Position',):
emoji[key] = float(value)
emoji['Sentiment'] = (emoji['Positive'] - emoji['Negative']) / \
max(100, (emoji['Positive'] +
emoji['Neutral'] +
emoji['Negative']))
emoji2unicode_name[emoji['Emoji']] = emoji['Unicode name']
emoji2sentiment[emoji['Emoji']] = emoji['Sentiment']
return emoji2unicode_name, emoji2sentiment
| 5,338,721
|
def send_message(receiver, message):
"""
Send message to receivers using the Twilio account.
:param receiver: Number of Receivers
:param message: Message to be Sent
:return: Sends the Message
"""
message = client.messages.create(
from_="whatsapp:+14155238886", body=message, to=f"whatsapp:{receiver}"
)
return message
| 5,338,722
|
def stem_list(tokens: list) -> list:
"""Stems all tokens in a given list
Arguments:
- tokens: List of tokens
Returns:
List of stemmed tokens
"""
stem = PorterStemmer().stem
return [stem(t) for t in tokens]
| 5,338,723
|
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
# Try using a dict first, as that's the fastest and will usually
# work. If it doesn't work, it will usually fail quickly, so it
# usually doesn't cost much to *try* it. It requires that all the
# sequence elements be hashable, and support equality comparison.
u = {}
try:
for x in s:
u[x] = 1
except TypeError:
del u # move on to the next method
else:
return u.keys()
# We can't hash all the elements. Second fastest is to sort,
# which brings the equal elements together; then duplicates are
# easy to weed out in a single pass.
# NOTE: Python's list.sort() was designed to be efficient in the
# presence of many duplicate elements. This isn't true of all
# sort functions in all languages or libraries, so this approach
# is more effective in Python than it may be elsewhere.
try:
t = list(s)
t.sort()
except TypeError:
del t # move on to the next method
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti += 1
i += 1
return t[:lasti]
# Brute force is all that's left.
u = []
for x in s:
if x not in u:
u.append(x)
return u
| 5,338,724
|
def execute(command: str, cwd: str = None, env: dict = None) -> str:
"""Executes a command and returns the stdout from it"""
result = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
env=env,
shell=True,
check=False,
)
if result.returncode != 0:
stderr = result.stderr.decode("utf-8").rstrip()
raise RuntimeError(stderr)
return result.stdout.decode("utf-8").rstrip()
| 5,338,725
|
def get_ground_truth_assignments_for_zacharys_karate_club() -> jnp.ndarray:
"""Returns ground truth assignments for Zachary's karate club."""
return jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1,
0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
| 5,338,726
|
def actions_loop(once, scope, rses, sleep_time, dark_min_age, dark_threshold_percent,
miss_threshold_percent, force_proceed, scanner_files_path):
"""
Main loop to apply the CC actions
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
executable = 'storage-consistency-actions'
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
# Make an initial heartbeat
# so that all storage-consistency-actions have the correct worker number on the next try
prefix = 'storage-consistency-actions[%i/%i] ' %\
(heartbeat['assign_thread'], heartbeat['nr_threads'])
logger = formatted_logger(logging.log, prefix + '%s')
logger(logging.INFO, 'hostname: %s pid: %d current_thread: %s' %
(hostname, pid, current_thread))
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
heartbeat = live(executable=executable, hostname=hostname, pid=pid,
thread=current_thread)
logger(logging.INFO, 'heartbeat? %s' % heartbeat)
prefix = 'storage-consistency-actions[%i/%i] ' %\
(heartbeat['assign_thread'], heartbeat['nr_threads'])
logger(logging.INFO, 'prefix: %s' % prefix)
start = time.time()
logger(logging.DEBUG, 'Start time: %f' % start)
deckard_loop(scope, rses, dark_min_age, dark_threshold_percent, miss_threshold_percent,
force_proceed, scanner_files_path)
daemon_sleep(start_time=start, sleep_time=sleep_time, graceful_stop=graceful_stop,
logger=logger)
except Exception as e:
traceback.print_exc()
logger(logging.WARNING, '\n Something went wrong here... %s' % e)
logger(logging.WARNING, '\n Something went wrong here... %s ' % (e.__class__.__name__))
if once:
break
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
| 5,338,727
|
def read_frames_from_dir(_dir, _type='png'):
""" Read frames from the dir, and return list """
paths = os.listdir(_dir)
valid_paths = []
for path in paths:
if _type in path: valid_paths.append(path)
valid_paths = ns.natsorted(valid_paths)
frames = []
for path in valid_paths:
frames.append( np.array(Image.open(ospj(_dir,path))) )
return np.array(frames)
| 5,338,728
|
def _append_binary_content(buffer, b):
"""Serializes binary value (without bounds) to the buffer."""
# the content should be padded to full 4-byte word
buffer.write(b.value)
length = len(b.value)
length_rounded = _round_to_4(length)
buffer.write((length_rounded - length) * "\x00")
| 5,338,729
|
def sort_places_versus_distance_from_coordinates(
list_places: List[Place], gps_coord: Tuple[float, float]
) -> List[Place]:
"""Oder list of places according to the distance to a reference coordinates.
Note: this helper is compensating the bad results of the API. Results in the API
are generally sorted, but lot of cases identified where the order is inconsistent
(example: Montréal)
Args:
list_places: List of Place instances to be ordered
gps_coord: Tuple with latitude and longitude in degrees for the reference point
Returns:
List of Place instances ordered by distance to the reference point (nearest
first)
"""
sorted_places = sorted(
list_places,
key=lambda x: haversine((float(x.latitude), float(x.longitude)), gps_coord),
)
return sorted_places
| 5,338,730
|
def solve_part2_coordinate_subdivision(boxes: List[Tuple[str, Box]]) -> int:
""" An alternative method to solve part 2 which uses coordinate subdivisions to make a new grid.
On the puzzle input, this is roughly a 800x800x800 grid, which actually takes some time to compute through (~3 min)
It runs all the examples however, in under 3 seconds.
"""
# The boxes are in [a, b] form. Replace them with coordinate divisions that are [a, b)
x_divisions = sorted({b.x0 for _, b in boxes} | {b.x1 + 1 for _, b in boxes})
y_divisions = sorted({b.y0 for _, b in boxes} | {b.y1 + 1 for _, b in boxes})
z_divisions = sorted({b.z0 for _, b in boxes} | {b.z1 + 1 for _, b in boxes})
# Map of lower corner coordinates to index into the divisions
x_index = {x: i for i, x in enumerate(x_divisions)}
y_index = {y: i for i, y in enumerate(y_divisions)}
z_index = {z: i for i, z in enumerate(z_divisions)}
on = set()
for step, box in boxes:
points = {
(x, y, z)
for x in range(x_index[box.x0], x_index[box.x1 + 1])
for y in range(y_index[box.y0], y_index[box.y1 + 1])
for z in range(z_index[box.z0], z_index[box.z1 + 1])
}
if step == 'on':
on |= points
else:
on -= points
# Calculate the actual area held by all boxes
def area(pos: Tuple[int, int, int]) -> int:
x, y, z = pos
return ((x_divisions[x + 1] - x_divisions[x]) *
(y_divisions[y + 1] - y_divisions[y]) *
(z_divisions[z + 1] - z_divisions[z]))
return sum(map(area, on))
| 5,338,731
|
def decodeSignal(y, t, fclk, nbits) :
"""
This file reads in digitized voltages outputted from the
QCM Antenna Master Controller Board and outputs time,logic code number
pairs.
The encoding scheme is as follows:
HEADER: 3 clock cycles HIGH, followed by 3 clock cycles LOW
SIGNAL: 1 clock cycle LOW, followed by 1 clock cycle HIGH or LOW depending on logic state of bit, followed by
another clock cycle LOW
CLOSER: 1 clock cycle LOW, followed by 2 clock cycles HIGH.
Ex. USAGE:
...
fclk=4.E6
nbits=7
t,y = decodeSig(y,t, fclk, nbits)
y = array of double-precision numbers giving voltage signal with encoded numbers
t = array of double-precision numbers giving timebase of signal
fclk = Clock speed of output, which is master controller board's clock speed divided by 16, since least significant bit of counter is only toggled on clock positive edges [Hz]
nbits = number of bits encoded in signal.
Begun on Tuesday, 17 January 2012 (my 28th Birthday!), Ted Golfinopoulos
"""
tauc=1./fclk #Period of master controller board clock, [s]
taus=t[1]-t[0] #Sampling time
fs=1.E0/taus #Sampling frequency.
onThresh=1.0E0 #Threshold voltage above which the signal is considered ON.
#Duration of an encoded logic transmission, including header (6 clock cycles),
#bit encoding, and closer (3 clock cycles) [s]
dt = (9.E0+nbits*3.E0)*tauc
tbin = 3.E0*tauc
#Find indice and times times where board output is high
onSamplesInHeader=int(3.E0*tauc/taus) #Number of digitizer samples expected to be high in header.
onSamplesInCloser=int(2.E0*tauc/taus) #Number of digitizer samples expected to be low in closer.
codeLength=int(dt/taus) #Number of samples expected in whole code.
###Nomenclature:
#header = characteristic pattern at the start of an encoded signal.
# Here, it is 3 clock counts HIGH, followed by 3 clock counts LOW
#closer = characteristic pattern at the end of an encoded signal.
# Here, it is 1 clock count LOW, followed by 2 clock counts HIGH
#Find indices at which headers and closers start.
#The algorithm that follows looks for stretches of points where the signal is HIGH for a given
#duration - the header is high for 3 counts, the closer for 2, and encoded signal bits for 1.
#There may be some spread in the actual number of points registering as HIGH; as such, the algorithm
#returns the index of the first point for which the subsequent sequence of points is HIGH for the expected
#time period, then advances the index pointer by (a) if header, the nominal number of time points in the
#encoded stream, less the closer, or (b) if closer, the nominal number of time points in the closer.
#This avoids double-counting.
#The resulting indices delimit the boundaries of encoded numbers.
headInds=[]
closeInds=[]
bufferLength=0;
i=0 # Initialize index pointer
while i < len(y) :
if(y[i]>onThresh) : #First, check if y[i] is on - save computation of comparing series.
if(all(y[(i+bufferLength):(i+onSamplesInHeader-bufferLength)]>onThresh)) : #Header found - store and jump to end of header ON code.
headInds.append(i)
i=i+codeLength-onSamplesInCloser
continue
#Don't start marking closers until a header has been found - this can be important if MCB starts putting outputs before the outputs signal digitization starts.
elif(all(y[(i+bufferLength):(i+onSamplesInCloser-bufferLength)]>onThresh) and len(headInds)>0) :
closeInds.append(i) #Start index of closer found - store. Code is between these two indices.
i=i+onSamplesInCloser
continue
i=i+1 #Increment index
print("Finished finding headers and closers.")
# Takes an array containing a list of bits which are on in a binary number, in any order, with least-significant value corresponding to 0, and returns the decimal number corresponding to this number.
def onBits2num(bitInds) :
if len(bitInds)==0 : return 0
else : return sum([ pow(2,aa) for aa in bitInds ])
#Preallocate arrays.
codeVals=zeros(len(closeInds)) #Array to store encoded numbers
timeVals=zeros(len(closeInds)) #Array to store timepoints at which encoded numbers were sampled
#Loop through all indices containing the start and end times for encoded bit patterns
for i in range( 0, len(closeInds) ) :
#Within each encoded segment, divide up the segment into bins of duration, tbin.
#The state of the bits are contained in each bin. Find and number the bins for which the
#board output was high.
try :
tOnInBin= t[headInds[i]+find( y[headInds[i]:closeInds[i]]>onThresh )] - t[headInds[i]]
codeInds=find([tOnInBin[jj]>2.E0*tbin and tOnInBin[jj]<(2.E0+nbits)*tbin for jj in range(0,len(tOnInBin))])
except :
temp=headInds[i:i+5]
print(i)
print('headInds')
print(len(headInds))
print(temp)
temp=closeInds[i:i+5]
print('closeInds')
print(len(closeInds))
print(temp)
temp=find( y[headInds[i]:closeInds[i]]>onThresh )
print('length of find( y[headInds[i]:closeInds[i]]>onThresh )')
print(len(temp))
print('First value')
print(temp[0])
raise #Re-raise the exception.
#Don't try to index into tOnInBin with array unless array is not empty. If array is empty, the logic code is 0, and the signal is low for the entire code segment.
if(len(codeInds)>0) :
tOnInBin= tOnInBin[ codeInds ]
tOnInBin=tOnInBin-2.E0*tbin #Subtract time equivalent to first two time bins from signal - these are for the header.
else : tOnInBin = []
onBins = unique([ int(aa) for aa in tOnInBin/tbin ])
#The first two bins (i.e t>0 and t < 2*tbin) comprise the header.
#Remove these bins from consideration. The remaining internal bins comprise the logic signal,
#ordered most-to-least significant bit. Turn these numbers into the 2's place to simplify conversion
#into a decimal number.
onBits = (nbits - 1) - onBins
#Convert array showing which places are 1 in the binary number into a decimal number. Store.
codeVals[i] = onBits2num(onBits)
timeVals[i] = t[headInds[i]]-0.5*taus #Store timepoint. On average, time point is halfway between data points around the edge.
print("Finished calculating codes.")
#Return vectors of time points and corresponding code values.
return [timeVals, codeVals]
| 5,338,732
|
def sqrt_price_to_tick(sqrt_price):
"""
TODO: finish documentation
See formula 6.8 in the white paper.
We use the change of base formula to compute the log as numpy doesn't have
a log function with an arbitrary base.
:param sqrt_price:
:return:
"""
base = np.sqrt(1.0001)
return int(np.floor(np.log(sqrt_price) / np.log(base)))
| 5,338,733
|
def recurring_fraction_repeat_length(nominator, denominator):
"""Takes a fraction like 1/6, and returns 1 as 1/6 = 0.16666666.
Takes 1/7 and returns 6 as 1/7 = 0.148257148257."""
a = highest_common_factor([nominator, denominator])
nominator = int(nominator/a)
denominator = int(denominator/a)
length = 1
while True:
pass
| 5,338,734
|
def getStateName(responseText: str) -> str:
"""Parse state name in title field.
Args:
responseText: response.text object from requests.
Returns: State string name.
"""
soup = Soup(responseText, "html.parser")
logging.debug("Ingesting soup: %s", soup.prettify())
if soup.title:
return soup.title.get_text()
else:
return "***No state found."
| 5,338,735
|
def find_dead_blocks(func, cfg):
"""Find all immediate dead blocks"""
return [block for block in cfg if not cfg.predecessors(block)
if block != func.startblock]
| 5,338,736
|
def load_tsv_to_pickle(filename):
""" Open tsv to dict """
with open(filename, "r", encoding="utf-8") as tsv:
equivalencies = {}
for row in tsv:
if not row:
break
try:
page_title_norm, rd_title_norm = row.strip().lower().replace('_', ' ').split("\t")
except ValueError:
print("Error", row, type(row))
continue
if page_title_norm[-16:] == "(disambiguation)" or rd_title_norm[-16:] == "(disambiguation)":
continue
# print((page_title_norm, rd_title_norm))
equivalencies.setdefault(rd_title_norm, set())
equivalencies[rd_title_norm].add(rd_title_norm)
equivalencies[rd_title_norm].add(page_title_norm)
equivalencies.setdefault(page_title_norm, set())
equivalencies[page_title_norm].add(page_title_norm)
equivalencies[page_title_norm].add(rd_title_norm)
with open('equivalencies.pkl', 'wb') as equiv:
pickle.dump(equivalencies, equiv)
print("equivalencies", len(equivalencies))
| 5,338,737
|
def _mcse_sd(ary):
"""Compute the Markov Chain sd error."""
_numba_flag = Numba.numba_flag
ary = np.asarray(ary)
if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)):
return np.nan
ess = _ess_sd(ary)
if _numba_flag:
sd = np.float(_sqrt(svar(np.ravel(ary), ddof=1), np.zeros(1)))
else:
sd = np.std(ary, ddof=1)
fac_mcse_sd = np.sqrt(np.exp(1) * (1 - 1 / ess) ** (ess - 1) - 1)
mcse_sd_value = sd * fac_mcse_sd
return mcse_sd_value
| 5,338,738
|
def dfs_predecessors(G, source=None):
"""Return dictionary of predecessors in depth-first-search from source."""
return dict((t,s) for s,t in dfs_edges(G,source=source))
| 5,338,739
|
def convert_batchnorm(net, node, model, builder):
"""Convert a transpose layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
model: model
An model for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = node['attr']
inputs = node['inputs']
outputs = node['outputs']
args = model.arg_params
aux = model.aux_params
gamma = args[_get_node_name(net, inputs[1][0])].asnumpy()
beta = args[_get_node_name(net, inputs[2][0])].asnumpy()
mean = aux[_get_node_name(net, inputs[3][0])].asnumpy()
variance = aux[_get_node_name(net, inputs[4][0])].asnumpy()
nb_channels = gamma.shape[0]
builder.add_batchnorm(
name = name,
channels = nb_channels,
gamma = gamma,
beta = beta,
mean = mean,
variance = variance,
input_name = input_name,
output_name = output_name)
| 5,338,740
|
def main():
"""
Main function to test the data generator module.:
"""
data_set, labels = gen_data_set(n_normals=1000, n_moons=100, n_scurves=100, n_circles=100)
plot_figure(data_set, labels, 'name.eps')
save_data(data_set, labels, './Synthetic_data.txt')
| 5,338,741
|
def merge_all_sections(prnt_sctns, child_sctns, merge_within_sections=False):
""" Merge the doc-sections of the parent's and child's attribute into a single docstring.
Parameters
----------
prnt_sctns: OrderedDict[str, Union[None,str]]
child_sctns: OrderedDict[str, Union[None,str]]
Returns
-------
str
Output docstring of the merged docstrings."""
doc = []
prnt_only_raises = prnt_sctns["Raises"] and not (
prnt_sctns["Returns"] or prnt_sctns["Yields"]
)
if prnt_only_raises and (child_sctns["Returns"] or child_sctns["Yields"]):
prnt_sctns["Raises"] = None
for key in prnt_sctns:
sect = merge_section(
key,
prnt_sctns[key],
child_sctns[key],
merge_within_sections=merge_within_sections
)
if sect is not None:
doc.append(sect)
return "\n\n".join(doc) if doc else None
| 5,338,742
|
def input_fn(is_training, data_dir, batch_size, num_epochs=1):
"""Input_fn using the tf.data input pipeline for CIFAR-10 dataset.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
Returns:
A tuple of images and labels.
"""
dataset = tf.data.Dataset.from_tensor_slices(get_filenames(is_training, data_dir))
dataset = dataset.flat_map(tf.data.TFRecordDataset)
#if is_training:
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes have better performance.
# is a relatively small dataset, we choose to shuffle the full epoch.
#dataset = dataset.shuffle(buffer_size=_NUM_IMAGES['train'])
if is_training:
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes have better performance.
# is a relatively small dataset, we choose to shuffle the full epoch.
dataset = dataset.shuffle(buffer_size=500)
dataset = dataset.map(parse_record)
dataset = dataset.map(lambda image, label: preprocess_image(image, label, is_training))
dataset = dataset.prefetch(batch_size)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
| 5,338,743
|
def combine_catchments_covered_by_the_same_lake_arcgis(
Routing_Product_Folder
):
"""Define final lake river routing structure
Generate the final lake river routing structure by merging subbasin
polygons that are covered by the same lake.
The input are the catchment polygons and river segements
before merging for lakes. The input files can be output of
any of following functions:
SelectLakes, Select_Routing_product_based_SubId,
Customize_Routing_Topology,RoutingNetworkTopologyUpdateToolset_riv
The result is the final catchment polygon that ready to be used for
hydrological modeling
Parameters
----------
OutputFolder : string
Folder name that stores generated extracted routing product
Path_final_riv_ply : string
Path to the catchment polygon which is the routing product
before merging lakes catchments and need to be processed before
used. It is the input for simplify the routing product based
on lake area or drianage area.
routing product and can be directly used.
Path_final_riv : string
Path to the river polyline which is the routing product
before merging lakes catchments and need to be processed before
used. It is the input for simplify the routing product based
on lake area or drianage area.
Notes
-------
This function has no return values, instead will generate following
files. They are catchment polygons and river polylines that can be
used for hydrological modeling.
os.path.join(OutputFolder,'finalcat_info.shp')
os.path.join(OutputFolder,'finalcat_info_riv.shp')
Returns:
-------
None
Examples
-------
"""
sub_colnm = "SubId"
Path_Catchment_Polygon="#"
Path_River_Polyline="#"
Path_Con_Lake_ply="#"
Path_NonCon_Lake_ply="#"
Path_obs_gauge_point="#"
Path_final_cat_ply="#"
Path_final_cat_riv="#"
##define input files from routing prodcut
for file in os.listdir(Routing_Product_Folder):
if file.endswith(".shp"):
if 'catchment_without_merging_lakes' in file:
Path_Catchment_Polygon = os.path.join(Routing_Product_Folder, file)
if 'river_without_merging_lakes' in file:
Path_River_Polyline = os.path.join(Routing_Product_Folder, file)
if 'sl_connected_lake' in file:
Path_Con_Lake_ply = os.path.join(Routing_Product_Folder, file)
if 'sl_non_connected_lake' in file:
Path_NonCon_Lake_ply = os.path.join(Routing_Product_Folder, file)
if 'obs_gauges' in file:
Path_obs_gauge_point = os.path.join(Routing_Product_Folder, file)
if 'finalcat_info' in file:
Path_final_cat_ply = os.path.join(Routing_Product_Folder, file)
if 'finalcat_info_riv' in file:
Path_final_cat_riv = os.path.join(Routing_Product_Folder, file)
if Path_Catchment_Polygon == '#' or Path_River_Polyline =='#':
print("Invalid routing product folder ")
OutputFolder = Routing_Product_Folder
Path_final_rivply = Path_Catchment_Polygon
Path_final_riv = Path_River_Polyline
if not os.path.exists(OutputFolder):
os.makedirs(OutputFolder)
tempfolder = os.path.join(
tempfile.gettempdir(), "basinmaker_" + str(np.random.randint(1, 10000 + 1))
)
if not os.path.exists(tempfolder):
os.makedirs(tempfolder)
arcpy.env.workspace = tempfolder
### create a copy of shapfiles in temp folder
Path_Temp_final_rviply = os.path.join(OutputFolder,"temp_finalriv_ply" + str(np.random.randint(1, 10000 + 1)) + ".shp")
Path_Temp_final_rvi = os.path.join(OutputFolder,"temp_finalriv_riv" + str(np.random.randint(1, 10000 + 1)) + ".shp")
### read riv ply info
### read attribute table
finalrivply_info = pd.DataFrame.spatial.from_featureclass(Path_final_rivply)
# change attribute table for lake covered catchments,
finalrivply_info['SubId'] = finalrivply_info['SubId'].astype('int32')
finalrivply_info['DowSubId'] = finalrivply_info['DowSubId'].astype('int32')
finalrivply_info['HyLakeId'] = finalrivply_info['HyLakeId'].astype('int32')
# finalrivply_info['DrainArea'] = finalrivply_info['DrainArea'].astype('float')
mapoldnew_info = change_attribute_values_for_catchments_covered_by_same_lake(
finalrivply_info
)
mapoldnew_info = remove_possible_small_subbasins(mapoldnew_info = mapoldnew_info, area_thresthold = 10*30*30/1000/1000)
# update topology for new attribute table
mapoldnew_info = update_topology(mapoldnew_info, UpdateStreamorder=-1)
mapoldnew_info['DowSubId'] = mapoldnew_info['DowSubId'].astype('int32')
if len(os.path.basename(Path_Catchment_Polygon).split('_')) == 5:
cat_name = "finalcat_info_"+os.path.basename(Path_Catchment_Polygon).split('_')[4]
riv_name = "finalcat_info_riv_"+os.path.basename(Path_Catchment_Polygon).split('_')[4]
else:
cat_name = "finalcat_info.shp"
riv_name = "finalcat_info_riv.shp"
save_modified_attributes_to_outputs(
mapoldnew_info=mapoldnew_info,
tempfolder=tempfolder,
OutputFolder=OutputFolder,
cat_name=cat_name,
riv_name =riv_name,
Path_final_riv = Path_final_riv,
)
return
| 5,338,744
|
def search(term, aur):
"""Search the official repositories
"""
if aur:
secho()
_bar('Arch Core')
try:
remote = get_output('pacman', '-Ss', term)
except CalledProcessError:
remote = ''
def echo_result(name, repo, version, description, installed=False):
secho()
secho(name, fg='yellow', nl=False)
secho(f' ({repo}) ', fg='bright_black', nl=False)
secho(version, nl=not installed)
if installed:
secho('[i]', fg='green')
secho(description)
if remote:
lines = remote.split('\n')
i = 1
while i < len(lines):
title_line = lines[i - 1]
desc_line = lines[i]
repo, parts = title_line.split('/', maxsplit=1)
parts = parts.split(' ', maxsplit=1)
installed = parts[1].endswith('[installed]')
version = parts[1][:-11] if installed else parts[1]
echo_result(parts[0], repo, version, desc_line, installed)
i += 2
else:
secho('Nothing found', fg='red')
if aur:
secho()
_bar('Arch User Repository')
aur_search = json.loads(requests.get(
f'https://aur.archlinux.org/rpc.php?v=5&type=search&arg={term}'
).text)
if len(aur_search['results']):
installed_aur_pkgs = _aur_package_names()
for r in sorted(aur_search['results'], key=lambda x: x['Name']):
name = r['Name']
echo_result(name, 'aur', r['Version'], r['Description'],
name in installed_aur_pkgs)
else:
secho('Nothing found', fg='red')
| 5,338,745
|
def _eye(sys: brax.System, qp: brax.QP) -> List[float]:
"""Determines the camera location for a Brax system."""
d = {}
for joint in sys.config.joints:
if joint.parent not in d:
d[joint.parent] = []
po, co = joint.parent_offset, joint.child_offset
off = onp.array([po.x, po.y, po.z]) - onp.array([co.x, co.y, co.z])
d[joint.parent].append((joint.child, onp.linalg.norm(off)))
def max_dist(parent):
ret = 0
for child, dist in d.get(parent, []):
dist += max_dist(child)
if dist > ret:
ret = dist
return ret
# TODO: improve on this rough approximation of the bounding box
dist = max([max_dist(p) for p in d] + [1]) * 3
off = [dist * .5, -dist, dist * .5]
if sys.config.frozen.position.x:
off = [dist, 0, 0]
elif sys.config.frozen.position.y:
off = [0, -dist, 0]
elif sys.config.frozen.position.z:
off = [0, 0, dist * 2]
return list(qp.pos[0] + onp.array(off))
| 5,338,746
|
def num_list(to_parse):
"""
Creates list from its string representation
Arguments:
to_parse {string} -- String representation of list, can include 'None' or internal lists, represented by separation with '#'
Returns:
list[int] -- List represented in to_parse
"""
if len(to_parse) == 2:
return []
inter = to_parse[1:-1]
inter = [x.strip() for x in inter.split(',')]
result = []
for n in inter:
if n == "None":
result.append(None)
elif "#" in n:
result.append([int(x) for x in n.split("#")])
else:
result.append(int(n))
return result
| 5,338,747
|
def secret(secret):
"""Secret validation handler."""
if not secret:
raise ValidationError("Missing a secret to encrypt.")
if len(secret) > 150:
raise ValidationError(
"The secret needs to have less than 150 characters.")
| 5,338,748
|
def main():
"""Main."""
torch.manual_seed(args.seed)
# Experiment Information
print_experiment_info(args)
dataloaders, G, optimizer_g, writer = train_setup(args)
optimizer_g, lr = lr_scheduler_withoutDecay(optimizer_g, lr=args.lr)
# scheduler_g = optim.lr_scheduler.StepLR(optimizer_g, step_size=20, gamma=0.1, verbose=True)
scheduler_g = optim.lr_scheduler.StepLR(optimizer_g, step_size=5, gamma=0.5, verbose=True)
F1 = ResClassifier(num_classes=args.class_num, num_layer=1)
F2 = ResClassifier(num_classes=args.class_num, num_layer=1)
F1.cuda()
F2.cuda()
optimizer_f = optim.SGD(list(F1.parameters()) + list(F2.parameters()), momentum=0.9, lr=0.01, weight_decay=0.0005)
# scheduler_f = optim.lr_scheduler.StepLR(optimizer_f, step_size=20, gamma=0.1, verbose=True)
scheduler_f = optim.lr_scheduler.StepLR(optimizer_f, step_size=5, gamma=0.5, verbose=True)
# G_ckpt= os.path.join(args.out, f'ckpts/MCD_G.pkl')
# if os.path.exists(G_ckpt):
# checkpoint = torch.load (G_ckpt, map_location='cuda')
# G.load_state_dict (checkpoint, strict=False)
# F1_ckpt= os.path.join(args.out, f'ckpts/MCD_F1.pkl')
# if os.path.exists(F1_ckpt):
# checkpoint = torch.load (F1_ckpt, map_location='cuda')
# F1.load_state_dict (checkpoint, strict=False)
# F2_ckpt= os.path.join(args.out, f'ckpts/MCD_F2.pkl')
# if os.path.exists(F2_ckpt):
# checkpoint = torch.load (F2_ckpt, map_location='cuda')
# F2.load_state_dict (checkpoint, strict=False)
if args.show_feat:
G_ckpt= os.path.join(args.out, f'ckpts/MCD_G.pkl')
if os.path.exists(G_ckpt):
checkpoint = torch.load (G_ckpt, map_location='cuda')
G.load_state_dict (checkpoint, strict=False)
F1_ckpt= os.path.join(args.out, f'ckpts/MCD_F1.pkl')
if os.path.exists(F1_ckpt):
checkpoint = torch.load (F1_ckpt, map_location='cuda')
F1.load_state_dict (checkpoint, strict=False)
F2_ckpt= os.path.join(args.out, f'ckpts/MCD_F2.pkl')
if os.path.exists(F2_ckpt):
checkpoint = torch.load (F2_ckpt, map_location='cuda')
F2.load_state_dict (checkpoint, strict=False)
Test_MCD_tsne(args, G, F1, F2, dataloaders, 30, splits=['test_source', 'train_target', 'test_target'])
return
if args.criterion == 'ce':
criterion = nn.CrossEntropyLoss()
elif args.criterion == 'focal':
criterion = FocalLoss(gamma=1)
elif args.criterion == 'weighted_focal':
if args.source == 'RAF_balanced':
cls_num_list= np.array([713, 262, 713, 713, 713, 682, 713])
else: #RAF
cls_num_list= np.array([1259, 262, 713, 4705, 1885, 682, 2465])
beta = 0.9999
effective_num = 1.0 - np.power(beta, cls_num_list)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
#[0.65831665 3.01150101 1.13164193 0.20750166 0.45330163 1.18126904 0.35646808]
per_cls_weights = [1.75, 3.0, 2.0, 1.0, 1.5, 2.0, 1.25]
print(per_cls_weights)
class_weights = torch.FloatTensor(per_cls_weights).cuda()
criterion = FocalLoss(weight=class_weights, gamma=1)
elif args.criterion == 'ldam':
if args.source == 'RAF_balanced':
cls_num_list= np.array([713, 262, 713, 713, 713, 682, 713])
else: #RAF
cls_num_list= np.array([1259, 262, 713, 4705, 1885, 682, 2465])
idx = 0
betas = [0, 0.9999]
effective_num = 1.0 - np.power(betas[idx], cls_num_list)
per_cls_weights = (1.0 - betas[idx]) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
per_cls_weights = [1.75, 3.0, 2.0, 1.0, 1.5, 2.0, 1.25]
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda()
def get_drw_weights(args, epoch, cls_num_list):
if True:
idx = 0 if epoch <= 5 else 1
betas = [0, 0.9999]
effective_num = 1.0 - np.power(betas[idx], cls_num_list)
per_cls_weights = (1.0 - betas[idx]) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda()
return per_cls_weights
print(f'Using {args.criterion} loss')
# Running Experiment
print("Run Experiment...")
for epoch in range(1, args.epochs + 1):
# if epoch < 5 and args.criterion == 'weighted_focal': #Try delayed reweighting
# criterion = FocalLoss(gamma=1)
if args.criterion=='ldam':
if epoch >4:
per_cls_weights = [1.75, 3.0, 2.0, 1.0, 1.5, 2.0, 1.25]
else:
per_cls_weights = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
# per_cls_weights = get_drw_weights(args, epoch, cls_num_list)
print(f'Epoch: {epoch}, per cls weights: {per_cls_weights}')
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda()
criterion = LDAMLoss(cls_num_list, weight=per_cls_weights)
print(f'Epoch : {epoch}')
Train_MCD(args, G, F1, F2, dataloaders['train_source'], dataloaders['train_target'], optimizer_g, optimizer_f,
epoch, writer, criterion)
scheduler_g.step()
scheduler_f.step()
print('\nEvaluation ...')
Test_MCD_tsne(args, G, F1, F2, dataloaders, epoch, splits=['test_source', 'train_target', 'test_target'])
# Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'test_source'])
# Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'train_target'])
# Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'test_target'])
# Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'train_target', 'test_source', 'test_target'])
if args.save_checkpoint and epoch%5:
torch.save(G.state_dict(), os.path.join(args.out, f'ckpts/MCD_G_{epoch}.pkl'))
torch.save(F1.state_dict(), os.path.join(args.out, f'ckpts/MCD_F1_{epoch}.pkl'))
torch.save(F2.state_dict(), os.path.join(args.out, f'ckpts/MCD_F2_{epoch}.pkl'))
writer.close()
| 5,338,749
|
def total_scatter_matrix(data):
"""
Total sum of square (TSS) : sum of squared distances of points around the baycentre
References : Clustering Indices, Bernard Desgraupes (April 2013)
"""
X = np.array(data.T.copy(), dtype=np.float64)
for feature_i in range(data.shape[1]):
X[feature_i] = X[feature_i] - np.mean(X[feature_i])
T = np.dot(X, X.T)
return T
| 5,338,750
|
def save_camera_zip(camera_id, year, month, file_path=None):
"""
Download a camera ZIP archive.
:param camera_id: int, camera ID
:param year: int, year
:param month: int, month
:param file_path: str, optional, path to save file
:return: bool, status of download
"""
# Setup file name
file_name = "{0:04d}.{1:02d}.zip".format(year, month)
if file_path is None:
file_path = "./{0}".format(file_name)
# Download
save_buffer(get_zip_url(camera_id, year, month), file_path)
return True
| 5,338,751
|
def test_cyme_to_json():
"""Test the JSON writer with CYME models as input."""
from ditto.readers.cyme.read import Reader
from ditto.store import Store
from ditto.writers.json.write import Writer
cyme_models = [
f
for f in os.listdir(os.path.join(current_directory, "data/small_cases/cyme/"))
if not f.startswith(".")
]
for model in cyme_models:
m = Store()
r = Reader(
data_folder_path=os.path.join(
current_directory, "data/small_cases/cyme", model
)
)
r.parse(m)
m.set_names()
output_path = tempfile.TemporaryDirectory()
w = Writer(output_path=output_path.name)
w.write(m)
| 5,338,752
|
def read_user(str):
""" str -> dict """
pieces = str.split()
return {
'first': pieces[0],
'last': pieces[1],
'username': pieces[5],
'custID': pieces[3],
'password': pieces[7],
'rank': 0,
'total': 0
}
| 5,338,753
|
def log_page_timing(sender, **kwargs):
"""
Collects and stores page timing information, which the client may send at the end of a page view.
The extra information will be added in to the original VIEWED event representing the page view.
"""
event_id = kwargs['event_id']
times = kwargs['times']
if event_id:
try:
event = Event.objects.get(id=event_id)
logger.debug('Adding page timing to %s: %s', event, times)
loadTime = times.get('loadTime')
duration = times.get('duration')
activeDuration = times.get('activeDuration')
if loadTime:
event.loadTime = timedelta(milliseconds=loadTime)
if duration:
event.duration = timedelta(milliseconds=duration)
if activeDuration:
event.activeDuration = timedelta(milliseconds=activeDuration)
event.save()
except Event.DoesNotExist:
logger.error('Received page timing for a non-existent event %s', event_id)
else:
logger.error('Missing event ID in page timing message')
| 5,338,754
|
def lweekdate(weekday, year, month, nextDay=0):
"""
Usage
lastDate = lweekdate(weekday, year, month, nextDay)
Notes
Date of last occurrence of weekday in month
returns the serial date number for the last occurrence of Weekday in the given
year and month and in a week that also contains NextDay.
Weekday Weekday whose date you seek. Enter as an integer from 1 through 7:
1 Sunday
2 Monday
3 Tuesday
4 Wednesday
5 Thursday
6 Friday
7 Saturday
Year Year. Enter as a four-digit integer.
Month Month. Enter as an integer from 1 through 12.
Not Implemented
NextDay (Optional) Weekday that must occur after Weekday in the same week.
Enter as an integer from 0 through 7, where 0 = ignore (default) and 1 through 7
are the same as for Weekday.
Any input can contain multiple values, but if so, all other inputs must contain
the same number of values or a single value that applies to all.
See Also
Use the function datestr to convert serial date numbers to formatted date strings.
Examples
"""
assert weekday in range(1,8), "weekday must be in range(1,8)"
assert month in range(1,13), "month must be in range(1,13)"
assert year in range(0, 10000), "year must be in range(0,10000)"
assert nextDay in range(0,8), "weekday must be in range(0,8)"
day = calendar.monthcalendar(year,month)[-1][weekday-1]
if day == 0:
day = calendar.monthcalendar(year,month)[-2][weekday-1]
return datenum(year, month, day)
| 5,338,755
|
def multi_polygon_gdf(basic_polygon):
"""
A GeoDataFrame containing the basic polygon geometry.
Returns
-------
GeoDataFrame containing the basic_polygon polygon.
"""
poly_a = Polygon([(3, 5), (2, 3.25), (5.25, 6), (2.25, 2), (2, 2)])
gdf = gpd.GeoDataFrame(
[1, 2],
geometry=[poly_a.buffer(0), basic_polygon.buffer(0)],
crs="epsg:4326",
)
multi_gdf = gpd.GeoDataFrame(
geometry=gpd.GeoSeries(gdf.unary_union), crs="epsg:4326"
)
return multi_gdf
| 5,338,756
|
def isort():
"""Run isort on the source and test files"""
subprocess.run(
[
"poetry",
"run",
"isort",
"tmt_carddeck",
"tests",
],
check=True,
)
| 5,338,757
|
def get_or_create_pull(github_repo, title, body, head, base, *, none_if_no_commit=False):
"""Try to create the PR. If the PR exists, try to find it instead. Raises otherwise.
You should always use the complete head syntax "org:branch", since the syntax is required
in case of listing.
if "none_if_no_commit" is set, return None instead of raising exception if the problem
is that head and base are the same.
"""
try: # Try to create or get a PR
return github_repo.create_pull(
title=title,
body=body,
head=head,
base=base
)
except GithubException as err:
err_message = err.data['errors'][0].get('message', '')
if err.status == 422 and err_message.startswith('A pull request already exists'):
_LOGGER.info('PR already exists, get this PR')
return list(github_repo.get_pulls(
head=head,
base=base
))[0]
elif none_if_no_commit and err.status == 422 and err_message.startswith('No commits between'):
_LOGGER.info('No PR possible since head %s and base %s are the same',
head,
base)
return None
else:
_LOGGER.warning("Unable to create PR:\n%s", err.data)
raise
except Exception as err:
response = traceback.format_exc()
_LOGGER.warning("Unable to create PR:\n%s", response)
raise
| 5,338,758
|
def get_board_properties(board, board_path):
"""parses the board file returns the properties of the board specified"""
with open(helper.linux_path(board_path)) as f:
board_data = json.load(f, object_pairs_hook=OrderedDict)
return board_data[board]
| 5,338,759
|
def pytest_sessionstart(session):
""" This should run first as it creates the temporary filder when run on the xdist master."""
if _is_xdist_master(session):
# perform cleanup
session.config.hook.pytest_harvest_xdist_init()
# mark the fixture store as to be reloaded
FIXTURE_STORE.disabled = True
| 5,338,760
|
def extract_tweets_and_labels(filename ):
""" Extract tweets and labels from the downloaded data"""
print ('Step 3: Reading the data as a dataframe')
df=pd.read_csv(filename, header=None, encoding='iso-8859-1')
df.columns=['Label','TweetId','Date','Query','User','Text']
print ('Read {} lines'.format(df.shape[0]))
print ('Discarding neutral tweets')
df=df[df.Label!=2]
print ('No of lines in the data after filtering neutral tweets: {}'.format(df.shape[0]))
print ('Step 4: Shuffling the data')
train_length=int(df.shape[0]*0.8)
df=df.sample(frac=1) # reshuffling the data
df['Text']=df['Text'].astype(str).apply(lambda x:x.strip())#.encode('ascii','ignore')#str.decode('utf8','ignore')#.str.encode('ascii','ignore')
print (df.head())
print ('Step 5: Dividing into test and train datasets')
df_train = df.iloc[:train_length, :]
df_test = df.iloc[train_length:, :]
print ('Step 6: Exporting the train and test datasets')
print ('Exporting training data of rows {}'.format(df_train.shape[0]))
export_prefix='training'
df_train[['Label']].to_csv(export_prefix+'_label.csv', header=False, index=False)
df_train[['Text']].to_csv(export_prefix+'_text.csv', header=False, index=False)
print ('Target distribution in the training data is as follows')
print ('\n',df_train['Label'].value_counts())
print ('Exporting training data of rows {}'.format(df_test.shape[0]))
export_prefix='testing'
df_test[['Label']].to_csv(export_prefix+'_label.csv', header=False, index=False)
df_test[['Text']].to_csv(export_prefix+'_text.csv', header=False, index=False)
print ('Target distribution in the testing data is as follows')
print ('\n',df_test['Label'].value_counts())
| 5,338,761
|
def test_noun_chunks_is_parsed_it(it_tokenizer):
"""Test that noun_chunks raises Value Error for 'it' language if Doc is not parsed."""
doc = it_tokenizer("Sei andato a Oxford")
with pytest.raises(ValueError):
list(doc.noun_chunks)
| 5,338,762
|
def test_iconutil_fails(infopl, alfred4, tempdir):
"""`iconutil` throws RuntimeError"""
with FakePrograms("iconutil"):
icns_path = os.path.join(tempdir, "icon.icns")
with pytest.raises(RuntimeError):
notify.png_to_icns(PNG_PATH, icns_path)
| 5,338,763
|
def test_fbeta_score_inconsistancy():
"""Test of `fbeta_score` with input inconsistency."""
y_pred = np.array([0, 1])
y_true = np.array([1])
with pytest.raises(InvalidInput):
fbeta_score(y_pred, y_true, 2)
| 5,338,764
|
def parse_command(command):
"""
Parse the given one-line QF command analogously to parse_file().
"""
m = re.match(r'^\#?([bdpq]|build|dig|place|query)\s+(.+)', command)
if m is None:
raise ParametersError("Invalid command format '%s'." % command)
# set up details dict
details = {
'build_type': buildconfig.get_full_build_type_name(m.group(1)),
'start': (0, 0),
'start_comment': '',
'comment': ''
}
# break apart lines by # and cells by ,
lines = [[cell.strip() for cell in line.split(',')]
for line
in m.group(2).split('#')
]
# break up lines into z-layers separated by #> or #<
# TODO: actually support this properly, right now we are just
# calling this to do conversion to FileLayers for us
filelayers = split_zlayers(lines)
# tidy up the layers
for fl in filelayers:
fl.fixup()
fl.clean_cells()
return filelayers, details
| 5,338,765
|
def clear_screen():
"""
Clears the screen
"""
# Clear the previously drawn text:
if sys.platform == 'win32':
os.system('cls') # Clears Windows terminal.
else:
os.system('clear')
| 5,338,766
|
def _run(cmd: Union[str, List[str]]) -> List[str]:
"""Run a 'cmd', returning stdout as a list of strings."""
cmd_list = shlex.split(cmd) if type(cmd) == str else cmd
result = subprocess.run(cmd_list, capture_output=True)
return result.stdout.decode('utf-8').split("\n")
| 5,338,767
|
def test_check_description_git_url():
""" Does DESCRIPTION file contain an upstream Git repo URL? """
from fontbakery.profiles.googlefonts import (
com_google_fonts_check_description_git_url as check,
description,
descfile)
# TODO: test INFO "url-found"
bad_desc = description(descfile(TEST_FILE("cabin/Cabin-Regular.ttf")))
print('Test FAIL with description file that has no git repo URLs...')
status, message = list(check(bad_desc))[-1]
assert status == FAIL and message.code == "lacks-git-url"
good_desc = ("<a href='https://github.com/uswds/public-sans'>Good URL</a>"
"<a href='https://gitlab.com/smc/fonts/uroob'>Another Good One</a>")
print('Test PASS with description file that has good links...')
status, message = list(check(good_desc))[-1]
assert status == PASS
bad_desc = "<a href='https://v2.designsystem.digital.gov'>Bad URL</a>"
print('Test FAIL with description file that has false git in URL...')
status, message = list(check(bad_desc))[-1]
assert status == FAIL and message.code == "lacks-git-url"
| 5,338,768
|
def get_repository_username(repo_url):
"""
Returns the repository username
:return: (str) Repository owner username
"""
repo_path = _get_repo_path(repo_url)
return repo_path[0]
| 5,338,769
|
def get_logger():
"""Grab the global logger instance.
If a global IPython Application is instantiated, grab its logger.
Otherwise, grab the root logger.
"""
global _logger
if _logger is None:
from IPython.config import Application
if Application.initialized():
_logger = Application.instance().log
else:
logging.basicConfig()
_logger = logging.getLogger()
return _logger
| 5,338,770
|
def process_y(y_train: pd.Series, max_mult=20, large_sampsize=50000):
"""
Drop missing values, downsample the negative class
if sample size is large and there is significant class imbalance
"""
# Remove missing labels
ytr = y_train.dropna()
# The code below assumes the negative class is over-represented.
assert ytr.mean() < 0.5
# If there are too many negative samples, downsample
if len(ytr) > large_sampsize:
label_counts = ytr.value_counts()
max_neg = max(label_counts.loc[1.0] * max_mult, large_sampsize)
y_neg = ytr[ytr == 0.0]
y_pos = ytr[ytr == 1.0]
new_y = pd.concat(
[y_neg.sample(frac=1.0, replace=False).iloc[:max_neg], y_pos]
).sample(frac=1.0, replace=False)
return new_y
else:
return ytr
| 5,338,771
|
def _distance(y1, y2):
"""1D distance calculator"""
inner = (y2 - y1) ** 2
d = np.sqrt(inner)
return d
| 5,338,772
|
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_commands_file=dict(required=True, type='str'),
)
)
message = execute_commands(module, module.params['pn_commands_file'])
module.exit_json(
stdout=message,
error='0',
failed=False,
msg='Operation Completed',
changed=True
)
| 5,338,773
|
def create_container(context, values):
"""Create a new container.
:param context: The security context
:param values: A dict containing several items used to identify
and track the container, and several dicts which are
passed
into the Drivers when managing this container. For
example:
::
{
'uuid': uuidutils.generate_uuid(),
'name': 'example',
'type': 'virt'
}
:returns: A container.
"""
return _get_dbdriver_instance().create_container(context, values)
| 5,338,774
|
def build_model_from_pb(name: str, pb_model: Callable):
"""
Build model from protobuf message.
:param name: Name of the model.
:param pb_model: protobuf message.
:return: Model.
"""
from google.protobuf.json_format import MessageToDict
dp = MessageToDict(pb_model(), including_default_value_fields=True)
all_fields = {
k: (name if k in ('chunks', 'matches') else type(v), Field(default=v))
for k, v in dp.items()
}
if pb_model == QueryLangProto:
all_fields['parameters'] = (Dict, Field(default={}))
return create_model(name, **all_fields)
| 5,338,775
|
def create_proof_of_time_pietrzak(discriminant, x, iterations, int_size_bits):
"""
Returns a serialized proof blob.
"""
delta = 8
powers_to_calculate = proof_pietrzak.cache_indeces_for_count(iterations)
powers = iterate_squarings(x, powers_to_calculate)
y = powers[iterations]
proof = proof_pietrzak.generate_proof(x, iterations, delta, y, powers,
x.identity(), generate_r_value, int_size_bits)
return y.serialize(), serialize_proof(proof)
| 5,338,776
|
def resample_dataset ( fname, x_factor, y_factor, method="mean", \
data_min=-1000, data_max=10000 ):
"""This function resamples a GDAL dataset (single band) by a factor of
(``x_factor``, ``y_factor``) in x and y. By default, the only method used
is to calculate the mean. The ``data_min`` and ``data_max`` parameters are
used to mask out pixels in value"""
QA_OK = np.array([0, 1, 4, 12, 8, 64, 512, 2048] )# VI OK
# Table in http://gis.cri.fmach.it/modis-ndvi-evi/
# First open the NDVI file
fname = 'HDF4_EOS:EOS_GRID:"%s":' % fname + \
'MOD_Grid_monthly_CMG_VI:CMG 0.05 Deg Monthly NDVI'
gdal_data = gdal.Open ( fname )
# Get raster sizes
nx = gdal_data.RasterXSize
ny = gdal_data.RasterYSize
# Calculate output raster size
nnx = nx/x_factor
nny = ny/y_factor
# Reshape the raster data...
B = np.reshape ( gdal_data.ReadAsArray(), ( nny, y_factor, nnx, x_factor ) )
# Now open QA file
fname = fname.replace ("NDVI", "VI Quality" )
gdal_data = gdal.Open ( fname )
qa = gdal_data.ReadAsArray()
# Check what goes through QA
qa_pass = np.logical_or.reduce([qa==x for x in QA_OK ])
B = np.ma.array ( B, mask=qa_pass )
# Re-jiggle the dimensions so we can easily average over then
C = np.transpose ( B, (0, 2, 1, 3 ) )
if method == "mean":
reduced_raster = np.mean ( np.mean ( C, axis=-1), axis=-1 )
else:
raise NotImplemented( "Only mean reduction supported by now")
return reduced_raster
| 5,338,777
|
def int2bytes(number, fill_size=None, chunk_size=None, overflow=False):
"""
Convert an unsigned integer to bytes (base-256 representation)::
Does not preserve leading zeros if you don't specify a chunk size or
fill size.
.. NOTE:
You must not specify both fill_size and chunk_size. Only one
of them is allowed.
:param number:
Integer value
:param fill_size:
If the optional fill size is given the length of the resulting
byte string is expected to be the fill size and will be padded
with prefix zero bytes to satisfy that length.
:param chunk_size:
If optional chunk size is given and greater than zero, pad the front of
the byte string with binary zeros so that the length is a multiple of
``chunk_size``.
:param overflow:
``False`` (default). If this is ``True``, no ``OverflowError``
will be raised when the fill_size is shorter than the length
of the generated byte sequence. Instead the byte sequence will
be returned as is.
:returns:
Raw bytes (base-256 representation).
:raises:
``OverflowError`` when fill_size is given and the number takes up more
bytes than fit into the block. This requires the ``overflow``
argument to this function to be set to ``False`` otherwise, no
error will be raised.
"""
if number < 0:
raise ValueError("Number must be an unsigned integer: %d" % number)
if fill_size and chunk_size:
raise ValueError("You can either fill or pad chunks, but not both")
# Ensure these are integers.
number & 1
raw_bytes = b('')
# Pack the integer one machine word at a time into bytes.
num = number
word_bits, _, max_uint, pack_type = get_word_alignment(num)
pack_format = ">%s" % pack_type
while num > 0:
raw_bytes = pack(pack_format, num & max_uint) + raw_bytes
num >>= word_bits
# Obtain the index of the first non-zero byte.
zero_leading = bytes_leading(raw_bytes)
if number == 0:
raw_bytes = ZERO_BYTE
# De-padding.
raw_bytes = raw_bytes[zero_leading:]
length = len(raw_bytes)
if fill_size and fill_size > 0:
if not overflow and length > fill_size:
raise OverflowError(
"Need %d bytes for number, but fill size is %d" %
(length, fill_size)
)
raw_bytes = raw_bytes.rjust(fill_size, ZERO_BYTE)
elif chunk_size and chunk_size > 0:
remainder = length % chunk_size
if remainder:
padding_size = chunk_size - remainder
raw_bytes = raw_bytes.rjust(length + padding_size, ZERO_BYTE)
return raw_bytes
| 5,338,778
|
def compFirstFivePowOf2(iset={0, 1, 2, 3, 4}):
"""
task 0.5.6
a comprehension over the given set whose value is the set consisting
of the first five powers of two, starting with 2**0
"""
return {2**x for x in iset}
| 5,338,779
|
def sdc_pandas_dataframe_getitem(self, idx):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.DataFrame.getitem
Get data from a DataFrame by indexer.
Limitations
-----------
Supported ``key`` can be one of the following:
* String literal, e.g. :obj:`df['A']`
* A slice, e.g. :obj:`df[2:5]`
* A tuple of string, e.g. :obj:`df[('A', 'B')]`
* An array of booleans, e.g. :obj:`df[True,False]`
* A series of booleans, e.g. :obj:`df(series([True,False]))`
Supported getting a column through getting attribute.
Examples
--------
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_attr.py
:language: python
:lines: 37-
:caption: Getting Pandas DataFrame column through getting attribute.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem_attr.py
:cwd: ../../../examples
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem.py
:language: python
:lines: 37-
:caption: Getting Pandas DataFrame column where key is a string.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem.py
:cwd: ../../../examples
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_slice.py
:language: python
:lines: 34-
:caption: Getting slice of Pandas DataFrame.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem_slice.py
:cwd: ../../../examples
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_tuple.py
:language: python
:lines: 37-
:caption: Getting Pandas DataFrame elements where key is a tuple of strings.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem_tuple.py
:cwd: ../../../examples
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_array.py
:language: python
:lines: 34-
:caption: Getting Pandas DataFrame elements where key is an array of booleans.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem_array.py
:cwd: ../../../examples
.. literalinclude:: ../../../examples/dataframe/getitem/df_getitem_series.py
:language: python
:lines: 34-
:caption: Getting Pandas DataFrame elements where key is series of booleans.
:name: ex_dataframe_getitem
.. command-output:: python ./dataframe/getitem/df_getitem_series.py
:cwd: ../../../examples
.. seealso::
:ref:`Series.getitem <pandas.Series.getitem>`
Get value(s) of Series by key.
:ref:`Series.setitem <pandas.Series.setitem>`
Set value to Series by index
:ref:`Series.loc <pandas.Series.loc>`
Access a group of rows and columns by label(s) or a boolean array.
:ref:`Series.iloc <pandas.Series.iloc>`
Purely integer-location based indexing for selection by position.
:ref:`Series.at <pandas.Series.at>`
Access a single value for a row/column label pair.
:ref:`Series.iat <pandas.Series.iat>`
Access a single value for a row/column pair by integer position.
:ref:`DataFrame.setitem <pandas.DataFrame.setitem>`
Set value to DataFrame by index
:ref:`DataFrame.loc <pandas.DataFrame.loc>`
Access a group of rows and columns by label(s) or a boolean array.
:ref:`DataFrame.iloc <pandas.DataFrame.iloc>`
Purely integer-location based indexing for selection by position.
:ref:`DataFrame.at <pandas.DataFrame.at>`
Access a single value for a row/column label pair.
:ref:`DataFrame.iat <pandas.DataFrame.iat>`
Access a single value for a row/column pair by integer position.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas DataFrame method :meth:`pandas.DataFrame.getitem` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_dataframe.TestDataFrame.test_df_getitem*
"""
ty_checker = TypeChecker('Operator getitem().')
if not isinstance(self, DataFrameType):
return None
if isinstance(idx, types.StringLiteral):
col_loc = self.column_loc.get(idx.literal_value)
if col_loc is None:
key_error = True
else:
type_id, col_id = col_loc.type_id, col_loc.col_id
key_error = False
def _df_getitem_str_literal_idx_impl(self, idx):
if key_error == False: # noqa
data = self._data[type_id][col_id]
return pandas.Series(data, index=self._index, name=idx)
else:
raise KeyError('Column is not in the DataFrame')
return _df_getitem_str_literal_idx_impl
if isinstance(idx, types.UnicodeType):
def _df_getitem_unicode_idx_impl(self, idx):
# http://numba.pydata.org/numba-doc/dev/developer/literal.html#specifying-for-literal-typing
# literally raises special exception to call getitem with literal idx value got from unicode
return literally(idx)
return _df_getitem_unicode_idx_impl
if isinstance(idx, types.Tuple):
if all([isinstance(item, types.StringLiteral) for item in idx]):
return gen_df_getitem_tuple_idx_impl(self, idx)
if isinstance(idx, types.SliceType):
return gen_df_getitem_slice_idx_impl(self, idx)
if isinstance(idx, SeriesType) and isinstance(idx.dtype, types.Boolean):
self_index_is_none = isinstance(self.index, types.NoneType)
idx_index_is_none = isinstance(idx.index, types.NoneType)
if self_index_is_none and not idx_index_is_none:
if not check_index_is_numeric(idx):
ty_checker.raise_exc(idx.index.dtype, 'number', 'idx.index.dtype')
if not self_index_is_none and idx_index_is_none:
if not check_index_is_numeric(self):
ty_checker.raise_exc(idx.index.dtype, self.index.dtype, 'idx.index.dtype')
if not self_index_is_none and not idx_index_is_none:
if not check_types_comparable(self.index, idx.index):
ty_checker.raise_exc(idx.index.dtype, self.index.dtype, 'idx.index.dtype')
return gen_df_getitem_bool_series_idx_impl(self, idx)
if isinstance(idx, types.Array) and isinstance(idx.dtype, types.Boolean):
return gen_df_getitem_bool_array_idx_impl(self, idx)
ty_checker = TypeChecker('Operator getitem().')
expected_types = 'str, tuple(str), slice, series(bool), array(bool)'
ty_checker.raise_exc(idx, expected_types, 'idx')
| 5,338,780
|
def display_finds_meta(r):
"""A list of urls in r is displayed as HTML"""
rows = ["<tr><td><img src='{row}'</td><td><a href = {meta} target='_'>{meta}</a></td></tr>".format(row=row, meta=row) for row in r]
return HTML("""<html><head></head>
<body>
<table>
{rows}
</table>
</body>
</html>
""".format(rows=' '.join(rows)))
| 5,338,781
|
def __pairwise__(iterable):
""" Converts a list of elements in a list of pairs like:
list -> (list[0], list[1]), (list[2], list[3]), (list[4], list[5]), ...
:param iterable: Input list.
:return: List of pairs of the given list elements.
"""
a = iter(iterable)
return zip(a, a)
| 5,338,782
|
def _generateTriangleSequence():
"""
Generates list of elements following sequence of triangle numbers.
Returns:
sequenceElements - List of elements following the sequence.
"""
sequenceElements = []
totalCharactersInNewSequence = 0
total = 1
currentAddend = 2
while totalCharactersInNewSequence <= _MAX_NUMBER_OF_CHARACTERS_TO_PRINT:
currentSequenceMember = str(total)
sequenceElements.append(currentSequenceMember)
totalCharactersInNewSequence += len(currentSequenceMember)
total += currentAddend
currentAddend += 1
return sequenceElements
| 5,338,783
|
def normalize_neurons_range(neurons, standard_diagonal_line: int or float):
"""
:param neurons: should be refined.
:param standard_diagonal_line: pre-defined standard length of diagonal line of xoy plate
:return: neurons, width_scale, [width_span, height_span, z_span]
width_scale: The length of width is different with height among all volumes, so scaling width
could transfer ellipse shell into circle shell to count conveniently on xoy plate.
"""
regions = np.array([re for res in neurons.copy() for re in res], dtype = np.float32) # [x, y, z]
width, height = np.max(regions[:, 0]) - np.min(regions[:, 0]), np.max(regions[:, 1]) - np.min(regions[:, 1])
scale = standard_diagonal_line / math.sqrt(width * width + height * height)
neurons = [[[p[0] * scale, p[1] * scale, p[2] * scale] for p in pp] for pp in neurons] # for knn feature
width_scale = height / width
width_span = width * width_scale * scale
height_span = height * scale
z_span = (np.max(regions[:, 2]) - np.min(regions[:, 2])) * scale
return neurons, width_scale, [width_span, height_span, z_span]
| 5,338,784
|
def distort_image(image):
"""Perform random distortions to the given 4D image and return result"""
# Switch to 3D as that's what these operations require
slices = tf.unpack(image)
output = []
# Perform pixel-wise distortions
for image in slices:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_saturation(image, .2, 2.)
image += tf.truncated_normal(image.get_shape(), stddev=.05)
image = tf.image.random_contrast(image, .85, 1.15)
image = tf.image.random_brightness(image, .3)
output.append(image)
# Go back to 4D
image = tf.pack(output)
return image
| 5,338,785
|
def tablebyname(filehandle, header):
"""fast extraction of the table using the header to identify the table
This function reads only one table from the HTML file. This is in contrast to `results.readhtml.titletable` that will read all the tables into memory and allows you to interactively look thru them. The function `results.readhtml.titletable` can be very slow on large HTML files.
This function is useful when you know which file you are looking for. It looks for the title line that is in bold just before the table. Some tables don't have such a title in bold. This function will not work for tables that don't have a title in bold
Parameters
----------
fhandle : file like object
A file handle to the E+ HTML table file
header: str
This is the title of the table you are looking for
Returns
-------
titleandtable : (str, list)
- (title, table)
- title = previous item with a <b> tag
- table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..]
"""
htmlheader = f"<b>{header}</b><br><br>"
with filehandle:
for line in filehandle:
line = _decodeline(line)
if line.strip() == htmlheader:
justtable = getnexttable(filehandle)
thetable = f"{htmlheader}\n{justtable}"
break
filehandle = StringIO(thetable)
htables = readhtml.titletable(filehandle)
try:
return list(htables[0])
except IndexError as e:
None
| 5,338,786
|
def BuildDataset():
"""Create the dataset"""
# Get the dataset keeping the first two features
iris = load_iris()
x = iris["data"][:,:2]
y = iris["target"]
# Standardize and keep only classes 0 and 1
x = (x - x.mean(axis=0)) / x.std(axis=0)
i0 = np.where(y == 0)[0]
i1 = np.where(y == 1)[0]
x = np.vstack((x[i0],x[i1]))
# Train and test data
xtrn = np.vstack((x[:35],x[50:85]))
ytrn = np.array([0]*35 + [1]*35)
xtst = np.vstack((x[35:50],x[85:]))
ytst = np.array([0]*15+[1]*15)
idx = np.argsort(np.random.random(70))
xtrn = xtrn[idx]
ytrn = ytrn[idx]
idx = np.argsort(np.random.random(30))
xtst = xtst[idx]
ytst = ytst[idx]
y_train = np.zeros((len(ytrn),2))
for i in range(len(ytrn)):
if (ytrn[i] == 1):
y_train[i,:] = [0,1]
else:
y_train[i,:] = [1,0]
y_test = np.zeros((len(ytst),2))
for i in range(len(ytst)):
if (ytst[i] == 1):
y_test[i,:] = [0,1]
else:
y_test[i,:] = [1,0]
return (xtrn.reshape((xtrn.shape[0],1,2)), y_train,
xtst.reshape((xtst.shape[0],1,2)), y_test)
| 5,338,787
|
def label_train_spaces(ctx: cairo.Context, train: trains.Train, radius_factors=1, broken_spaces:Sequence=None):
"""Draw material name and thickness of the spaces.
Args:
radius_factors (scalar or sequence): Factor by which the average interface radii is multiplied to place the text. If
a scalar is given it is broadcast.
"""
spaces, brokens = break_spaces(train.spaces, broken_spaces)
radius_factors = np.broadcast_to(radius_factors, (len(spaces),))
radius_last = train.interfaces[0].radius
y_last = 0
h_last = 0
n = train.interfaces[0].n1
for space, next_interface, radius_factor in zip(spaces, train.interfaces + (None,), radius_factors):
if next_interface is None:
radius_next = radius_last
h_next = 0
else:
radius_next = next_interface.radius
h_next = functions.calc_sphere_sag(next_interface.roc, radius_next)
y_next = y_last + space
if space != 0:
string = '%.3f mm %s'%(space*1e3, n.name)
radius = (radius_last + radius_next)/2
x = radius_factor*radius
y = (y_next + h_next + y_last + h_last)/2
ctx.save()
ctx.translate(x, y)
ctx.show_text(string)
ctx.new_path()
ctx.restore()
if radius_factor > 1:
draw_polyline(ctx, ((radius, y), (x, y)))
ctx.stroke()
y_last = y_next
h_last = h_next
if next_interface is not None:
n = next_interface.n2
radius_last = next_interface.radius
| 5,338,788
|
def main(config_dir: Path, default_config: str, results_dir: Path):
"""Run all config files instead of the default one.
Ideally, these runs are parellellized instead of run in sequence."""
for config_file in config_dir.iterdir():
if config_file.name != default_config:
output_path = results_dir / config_file.stem
if not output_path.exists():
output_path.mkdir()
train(config_path=config_file, output_path=output_path)
| 5,338,789
|
def read_tile(file, config):
"""Read a codex-specific 5D image tile"""
# When saving tiles in ImageJ compatible format, any unit length
# dimensions are lost so when reading them back out, it is simplest
# to conform to 5D convention by reshaping if necessary
slices = [None if dim == 1 else slice(None) for dim in config.tile_dims]
return imread(file)[slices]
| 5,338,790
|
def rmsd(V, W):
""" Calculate Root-mean-square deviation from two sets of vectors V and W. """
D = len(V[0])
N = len(V)
rmsd = 0.0
for v, w in zip(V, W):
rmsd += sum([(v[i] - w[i]) ** 2.0 for i in range(D)])
return np.sqrt(rmsd / N)
| 5,338,791
|
def partition(f, xs):
"""
partition :: (a -> Bool) -> [a] -> ([a], [a])
The partition function takes a predicate a list and returns the pair of
lists of elements which do and do not satisfy the predicate.
"""
yes, no = [], []
for item in xs:
if f(item):
yes.append(item)
else:
no.append(item)
return L[yes], L[no]
| 5,338,792
|
def workflow(gid, lat, lon):
"""Do Some Work Please"""
res = []
for year in range(2010, 2016):
print("Processing GID: %s year: %s" % (gid, year))
uri = ("http://mesonet.agron.iastate.edu/iemre/multiday/"
"%s-01-01/%s-12-31/%s/%s/json"
) % (year, year, lat, lon)
req = requests.get(uri)
for row in req.json()['data']:
res.append(row)
df = pd.DataFrame(res)
df.to_csv("%s.csv" % (gid,), index=False)
| 5,338,793
|
async def restore(rc: RestClient, fc_entries: List[FCMetadata], dryrun: bool) -> None:
"""Send the fc_entries one-by-one to the FC.
PUT if FC entry already exists. Otherwise, POST with uuid.
"""
for i, fcm in enumerate(fc_entries):
print(f"{i}/{len(fc_entries)}")
logging.debug(fcm)
try:
if await already_in_fc(rc, fcm["uuid"], fcm["logical_name"]):
logging.info(
f"Entry is already in the FC ({fcm['uuid']}); Replacing (PUT)..."
)
if dryrun:
logging.warning("DRYRUN MODE ON: not sending PUT request")
else:
await rc.request("PUT", f'/api/files/{fcm["uuid"]}', fcm)
else:
logging.info(
f"Entry is not already in the FC ({fcm['uuid']}); Posting (POST)..."
)
if dryrun:
logging.warning("DRYRUN MODE ON: not sending POST request")
else:
await rc.request("POST", "/api/files", fcm)
except PathAlreadyExistsException as e:
logging.error(e)
with open("./paths-already-exist.json", "a") as json_f:
print(json.dumps(fcm), file=json_f)
| 5,338,794
|
def bvp2_check():
"""
Using scikits.bvp_solver to solve the bvp
y'' + y' + sin y = 0, y(0) = y(2*pi) = 0
y0 = y, y1 = y'
y0' = y1, y1' = y'' = -sin(y0) - y1
"""
from math import exp, pi, sin
lbc, rbc = .1, .1
def function1(x , y):
return np.array([y[1] , -sin(y[0]) -y[1] ])
def boundary_conditions(ya,yb):
return (np.array([ya[0] - lbc]), #evaluate the difference between the temperature of the hot stream on the
#left and the required boundary condition
np.array([yb[0] - rbc]))#evaluate the difference between the temperature of the cold stream on the
#right and the required boundary condition
problem = bvp_solver.ProblemDefinition(num_ODE = 2,
num_parameters = 0,
num_left_boundary_conditions = 1,
boundary_points = (0, 2.*pi),
function = function1,
boundary_conditions = boundary_conditions)
guess = np.linspace(0.,2.*pi, 10)
guess = np.array([.1-np.sin(2*guess),np.sin(2*guess)])
# plt.plot(guess,np.sin(guess))
# plt.show()
solution = bvp_solver.solve(problem,
solution_guess = guess)
#
A = np.linspace(0.,2.*pi, 200)
T = solution(A)
plt.plot(A, T[0,:],'-k',linewidth=2.0)
plt.show()
plt.clf()
N = 150
x = (2.*np.pi/N)*np.arange(1,N+1).reshape(N,1)
print x.shape
print solution(x)[0,:].shape
plt.plot(x,solution(x)[0,:])
plt.show()
# np.save('sol',solution(x)[0,:])
return
| 5,338,795
|
def test_location_invalid():
"""Test constructing invalid URIs."""
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
flight.connect("%")
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
ConstantFlightServer("%")
| 5,338,796
|
def get_zarr_store(file_path):
"""Get the storage type
"""
import zarr
ZARR_STORE_MAP = {"lmdb": zarr.LMDBStore,
"zip": zarr.ZipStore,
"dbm": zarr.DBMStore,
"default": zarr.DirectoryStore}
suffix, subsuffix = get_subsuffix(file_path)
if suffix != 'zarr' or (subsuffix is not None and subsuffix not in ZARR_STORE_MAP):
return ZARR_STORE_MAP['default'](file_path)
else:
return ZARR_STORE_MAP[subsuffix](file_path)
| 5,338,797
|
def test_integration_ref():
"""
GIVEN schema that references another schema and schemas
WHEN column_factory is called with the schema and schemas
THEN SQLAlchemy boolean column is returned in a dictionary with logical name and
the spec.
"""
spec = {"$ref": "#/components/schemas/RefSchema"}
schemas = {"RefSchema": {"type": "boolean"}}
([(logical_name, column)], spec) = column_factory.column_factory(
spec=spec, schemas=schemas, logical_name="column_1", model_schema={}
)
assert logical_name == "column_1"
assert isinstance(column.type, sqlalchemy.Boolean)
assert spec == {"type": "boolean"}
| 5,338,798
|
def image_5d_iterator(file_name, rescale=False):
"""Iterate over all series with (name, data)
Args:
file_name (str): path to file
rescale (bool, optional): rescale to min/max. Defaults to False.
Yields:
(str, numpy.array): Tuple of series name and pixel content as 5D array
"""
JVM().start()
meta_data = metadata(file_name)
n_series = series_count(file_name)
for s in range(n_series):
name = meta_data.image(s).get_Name()
yield name, image_5d(file_name, series=s, rescale=rescale)
| 5,338,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.