hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d14105e5a8b52c61c90d194c5ef582389ee082bc | 543 | py | Python | projects/rpi_led/project.py | Cruikshanks/electronics | fbd33fdd5ab1f3084fb777107767f89af3b2989c | [
"MIT"
] | null | null | null | projects/rpi_led/project.py | Cruikshanks/electronics | fbd33fdd5ab1f3084fb777107767f89af3b2989c | [
"MIT"
] | null | null | null | projects/rpi_led/project.py | Cruikshanks/electronics | fbd33fdd5ab1f3084fb777107767f89af3b2989c | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
led_state = False
try:
while True:
GPIO.output(18, led_state)
if led_state:
print("The LED is on. Press 'enter' to switch it off")
else:
print("The LED is off. Press 'enter' to switch it on")
arg = input("Press 'q' then 'enter' to quit.")
if arg == "q":
exit()
elif led_state:
led_state = False
else:
led_state = True
finally:
GPIO.cleanup()
| 20.884615 | 66 | 0.541436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.239411 |
d142b6a4d0c3c10a42751a0476a211f1e2be9723 | 63,715 | py | Python | SampleScripts/grasp.py | kyspencer/GAMMA-PC-A-Greedy-Memetic-Algorithm-for-Storing-Cooling-Objects | 3462ff8cc555ad646b59909c661ca58b21294a7b | [
"MIT"
] | null | null | null | SampleScripts/grasp.py | kyspencer/GAMMA-PC-A-Greedy-Memetic-Algorithm-for-Storing-Cooling-Objects | 3462ff8cc555ad646b59909c661ca58b21294a7b | [
"MIT"
] | null | null | null | SampleScripts/grasp.py | kyspencer/GAMMA-PC-A-Greedy-Memetic-Algorithm-for-Storing-Cooling-Objects | 3462ff8cc555ad646b59909c661ca58b21294a7b | [
"MIT"
] | 1 | 2019-11-11T21:43:34.000Z | 2019-11-11T21:43:34.000Z | # grasp.py
# This script implements the GRASP heuristic for the dynamic bin packing
# problem.
# Author: Kristina Yancey Spencer
from __future__ import print_function
import numpy as np
import random
import solutions_dynamic as solmaker
import sys
from copy import deepcopy
from itertools import combinations
from math import ceil, sqrt
from operator import attrgetter
class BPP:
# This class groups the bin packing problem information and performs
# the GRASP operations.
def __init__(self, n, cookies, moop):
self.beta = 5 # Cardinality restriction
self.n = int(n) # Number of cookies to sort
self.cookies = cookies # dictionary of item objects
self.moop = moop # Multiobjective problem class
self.lb = 0 # initialize lower bound
self.calclowerbound()
def generate_newsol(self, index, p_ls1, p_ls2, *args):
# This module creates an instance of a NewSolution class and
# performs the generate_newsol procedure
newbie = NewSolution(self.beta, self.n, self.cookies, self.moop)
newsol = newbie.make_newsol(index, *args)
newsol = self.checkandfit(newsol)
p = index + 1 # ID number for first neighbor
rannum = random.random()
if rannum < p_ls1:
if newsol.getopenbins() > self.lb:
p, neighbors = self.ls1(p, 1, newsol)
else:
p, neighbors = self.bin_mutation(p, 1, newsol)
elif rannum < p_ls2:
p, neighbors = self.ls2(p, 1, newsol)
else:
p, neighbors = self.ls3(p, 1, newsol)
if neighbors:
winner = self.test_domination(newsol, neighbors[0])
return p, winner
return p, newsol
def checkandfit(self, solution):
# This function checks the feasibility of a solution and calculates fitness
# values.
solution = self.moop.calcfeasibility(solution)
checkformismatch(solution.getx(), solution.getvlrep())
fits = self.moop.calcfits(solution)
solution.updatefitvals(fits)
return solution
def test_domination(self, solution, neighbor):
# This function determines if neighbor dominates solution.
u = solution.getfits()
v = neighbor.getfits()
if dom2(v, u):
return neighbor
else:
return solution
def ls_time(self, solution, rcl_t):
# This function seeks to find a better time to fill bins
# Start by finding the dynamic residual matrix for the cooling rack
neighbor = deepcopy(solution)
tfill = neighbor.gettfill()
i_tlowtohigh = list(np.argsort(tfill[:neighbor.openbins], axis=0))
for i in i_tlowtohigh:
neighbor, rcl_t = self.find_new_tfilli(i, neighbor, rcl_t)
# Check if modified solution is nondominated
neighbor = self.checkandfit(neighbor)
winner = self.test_domination(solution, neighbor)
return winner
def find_new_tfilli(self, i, solution, rcl_t):
# This function determines a new time for box i to be filled and updates
# the RCLTime instance
vlrep = solution.getvlrep()
tfill = solution.gettfill()
told = tfill[i]
tmin = self.get_box_tmin(vlrep[i])
kwargs = {'mode': 'hload', 'nmove': len(vlrep[i]), 'told': told}
t, rcl_t = self.get_feasible_tfilli(rcl_t, tmin, **kwargs)
if t:
solution.edit_tfilli(i, t)
# Adapt Greedy Function
rcl_t.adapt_changetime(told, t, len(vlrep[i]))
return solution, rcl_t
def get_feasible_tfilli(self, rcl_t, tmin, **kwargs):
# This function locates a new value for tfill[i] that doesn't violate
# rack or fill limits
# Find new time for box i
t_new, p_t, rcl_t = self.find_new_time_value(rcl_t, tmin, **kwargs)
if not t_new:
return None, rcl_t
kappa = 0 # Counter to exit loop
# Check if possible to fill in period
while rcl_t.res_fill[p_t] < 1:
if kappa == 10:
return None, rcl_t
# If not possible, find new time value
t_new, p_t, rcl_t = self.find_new_time_value(rcl_t, tmin, **kwargs)
if not t_new:
return None, rcl_t
kappa += 1
# If returning t_new to open bin, reduce fill capacity by 1
rcl_t.res_fill[p_t] -= 1
return t_new, rcl_t
def get_box_tmin(self, vlrepi):
# Find minimum time for box i
boxi_contents = {k: v for k, v in self.cookies.items() if k in vlrepi}
maxbatch = max(boxi_contents.values(), key=attrgetter('batch')).batch
tmin = maxbatch * 600
return tmin
def find_new_time_value(self, rcl_t, tmin, **kwargs):
# This module retrieves a new time value and also returns which period
# it belongs to
t_new = rcl_t.get_new_t(tmin, **kwargs)
if not t_new:
return None, None, rcl_t
t_p = self.find_t_in_fill_periods(t_new, rcl_t)
return t_new, t_p, rcl_t
def find_t_in_fill_periods(self, t, rcl_t):
# If the new time value is beyond the current fill periods, extend
while t > rcl_t.t_t[-1]:
rcl_t.extend_fill_periods()
# Find the period containing t_new
tlist = np.where(t >= np.array(rcl_t.t_t))[0]
return tlist[-1]
def ls1(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the first objective:
# minimizing the number of bins in use
k = 0
neighbors = []
searchfrom = solution
while k < numls:
coolneighbor, rcl_t = self.ls1_loading(searchfrom)
if coolneighbor:
k += 1
coolneighbor = self.ls_time(coolneighbor, rcl_t)
coolneighbor.updateid(p)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def ls2(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the second objective:
# minimizing the weighted average initial heat in a box
# p - current id number for new solution
# numls - number of neighbors to find during local search
# Returns updated p and list of neighbors
k = 0
neighbors = []
searchfrom = solution
while k < numls:
k, coolneighbor, rcl_t = self.ls2_loading(k, searchfrom)
if coolneighbor:
coolneighbor = self.ls_time(coolneighbor, rcl_t)
coolneighbor.updateid(p)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def ls3(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the third objective:
# minimizing the maximum time to move to store front.
k = 0
neighbors = []
searchfrom = solution
while k < numls:
k, coolneighbor, rcl_t = self.ls3_loading(k, searchfrom)
if coolneighbor:
coolneighbor = self.ls_time(coolneighbor, rcl_t)
coolneighbor.updateid(p)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def ls1_loading(self, searchfrom):
# This function attempts to empty the least filled bin and move its
# cookies into available boxes.
u = searchfrom.getfits()
vlrep = searchfrom.getvlrep()
r, rcl_t = self.getresiduals(vlrep, searchfrom.gettfill())
copy = deepcopy(searchfrom)
half = len(vlrep) // 2
for iloop in range(half):
# Find the emptiest bin's index number
lengths = [len(i) for i in copy.getvlrep()]
i = np.argmin(np.array(lengths))
copy, r, rcl_t = self.empty_bin(i, copy, r, rcl_t)
# If a nondominated solution wasn't found, return nothing
copy = self.checkandfit(copy)
v = copy.getfits()
if not dom2(u, v):
return copy, rcl_t
return None, rcl_t
def empty_bin(self, i, copy, r, rcl_t):
# This function moves items in box i to other boxes
for j in list(copy.getvlrep()[i]):
# Find rcl_bins
tfill = copy.gettfill()
rcl_bins = self.ls1_makercl(i, j, r, rcl_t, tfill)
if len(rcl_bins) == 0:
return copy, r, rcl_t
# Pick random bin
inew = random.choice(rcl_bins)
# Move cookie to new bin
copy.moveitem(i, j, inew)
r = self.update_spaceresiduals(r, i, inew)
r[i, 1], r[inew, 1] = rcl_t.adapt_movebins(tfill[i], tfill[inew])
return copy, r, rcl_t
def ls1_makercl(self, iold, j, r, rcl_t, tfill):
# This function returns the restricted candidate list for cookie
# j to move into based on the dot product strategy
# Set weights for the dot product array (1/boxcap, 1/coolrackcap)
weights = [1.0 / self.moop.boxcap, 1.0 / self.moop.coolrack]
# The cookie should not move into a box that is filled until after
# it is done baking
tmin = self.cookies.get(j).getbatch() * 600
tmax = rcl_t.get_tmax(tmin, 1)
options_byt = [i for i in range(self.n) if tfill[i] > tmin]
if tfill[iold] != tmin:
options_byt.remove(iold)
# Form dot product array
dparray = np.zeros(self.n)
for i in options_byt:
if tfill[i] <= tmax:
# Make sure there is space available
if r[i, 0] > 1:
tk = rcl_t.find_t_in_timeline(tfill[i])
# Filling early will reduce onrack for all after time[tk]
onrack = np.subtract(self.moop.coolrack, rcl_t.space[tk:])
maxonrack_fromtk = max(onrack)
dparray[i] = weights[0] * r[i, 0] + weights[1] * maxonrack_fromtk
# Max fill
if len(np.nonzero(dparray)[0]) > self.beta:
options = list(np.argsort(-dparray)[:self.beta])
return options
else:
options = list(np.nonzero(dparray)[0])
return options
def ls2_loading(self, k, searchfrom):
# This function finds the restricted candidate list and tries to move
# cookies toward more favorable configurations to minimize the weighted avg
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
hotbins = np.argsort(searchfrom.getq0bins())
for s in range(searchfrom.openbins):
i = hotbins[-s - 1]
vlrep = copy.getvlrep()
# If there is only one item in the box, no point in moving
if len(vlrep[i]) < 2:
return k, None, rcl_t
rcl_j = self.ls2_makercl(i, vlrep)
k, newsol, rcl_t = self.search_rclj(k, i, copy, u, r, rcl_j, rcl_t)
if newsol:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def ls2_makercl(self, i, vlrep):
# This function returns the restricted candidate list for local search 2
# Restricted candidate list
binkeys = list(vlrep[i])
avglen = averageLen(vlrep)
nrcl_min = min(len(binkeys) - 1, self.beta)
nrcl = max(len(binkeys) - avglen, nrcl_min)
rcl_j = random.sample(binkeys, nrcl)
return rcl_j
def ls3_loading(self, k, searchfrom):
# This function finds the restricted candidate list for bin i and tries to
# move cookies to find a new nondominated solution. If unsuccessful, moves
# to a new bin
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
latebins = np.argsort(searchfrom.gettavail(), axis=0)
for s in range(searchfrom.openbins):
i = latebins[-s - 1]
vlrep = copy.getvlrep()
# If there is only one item in the box, no point in moving
if len(vlrep[i]) < 2:
return k, None, rcl_t
# Restricted candidate list
rcl_j = self.ls3_makercl(i, vlrep)
k, newsol, rcl_t = self.search_rclj(k, i, copy, u, r, rcl_j, rcl_t)
if newsol:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def ls3_makercl(self, i, vlrep):
# This function returns the restricted candidate list for local search 3
# Restricted candidate list
binkeys = list(vlrep[i])
n_rclj = int(0.5 * len(binkeys))
rcl_j = binkeys[-n_rclj - 1: -1]
return rcl_j
def search_rclj(self, k, i, solution, u, r, rcl_j, rcl_t):
# This function moves cookies into new boxes until either it finds a new
# nondominated solution or it runs out of candidates from this solution
for m in range(len(rcl_j)):
k += 1
j = random.choice(rcl_j)
rcl_j.remove(j)
r, rcl_t, solution = self.lsmove(i, j, r, rcl_t, solution)
# Check if modified solution is nondominated
solution = self.checkandfit(solution)
v = solution.getfits()
if not dom2(u, v):
return k, solution, rcl_t
return k, None, rcl_t
def lsmove(self, i, j, r, rcl_t, solution):
# This function determines where cookie j should move to
m = solution.getopenbins()
tfill = solution.gettfill()
# Gather bin options and pick new bin for the move
ilist = self.move_options(j, m, r, rcl_t, tfill)
inew = random.choice(ilist)
# Open a new bin or move cookie to a new bin
if inew == m:
tmin = self.get_box_tmin([j])
kwargs = {'mode': 'hload'}
t, rcl_t = self.get_feasible_tfilli(rcl_t, tmin, **kwargs)
if t:
solution.opennewbin(i, j, round(t, 1))
r[inew, 0] = self.moop.boxcap
r[inew, 1] = rcl_t.adapt_greedy_function_newbin(t)
else:
return r, rcl_t, solution
else:
solution.moveitem(i, j, inew)
r[i, 1], r[inew, 1] = rcl_t.adapt_movebins(tfill[i], tfill[inew])
r = self.update_spaceresiduals(r, i, inew)
return r, rcl_t, solution
def move_options(self, j, m, r, rcl_t, tfill):
# This function retrieves a candidate list for moving a cookie.
bcookiej = self.cookies.get(j).getbatch() # cookie batch number
tmax = rcl_t.get_tmax(bcookiej * 600, 1)
i_rlowtohigh = np.argsort(r[:m, 0], axis=0)
# This module performs the sorting for module ll.
for i in range(m):
# Find open bin with max. residual value, moving backward thru i_rlowtohigh
lsi = i_rlowtohigh[-1 - i]
if tfill[lsi] <= tmax:
pack = packable(r[lsi, :], bcookiej, tfill[lsi])
if pack:
return [m, lsi]
# If least loaded bin won't fit item, need to open new bin.
return [m]
def bin_mutation(self, p, numls, solution):
# Heuristic to locate a better solution in terms of the first objective:
# minimizing the number of bins.
k = 0
neighbors = []
searchfrom = solution
while k < numls:
k, coolneighbor, rcl_t = self.select_mutation_operation(k, searchfrom)
if coolneighbor:
coolneighbor.updateid(p)
coolneighbor = self.ls_time(coolneighbor, rcl_t)
p += 1
neighbors.append(coolneighbor)
searchfrom = coolneighbor
else:
k = numls
return p, neighbors
def select_mutation_operation(self, k, searchfrom):
# This function selects the mutation operator
vlrep = searchfrom.getvlrep()
avg_bin_size = averageLen(vlrep)
too_small_lengths = [i for i in vlrep if 2 * len(i) <= avg_bin_size]
if too_small_lengths:
k, coolneighbor, rcl_t = self.move_cookies(k, searchfrom)
else:
rannum = random.random()
if rannum < 0.50:
k, coolneighbor, rcl_t = self.part_swap(k, searchfrom)
else:
k, coolneighbor, rcl_t = self.cookie_swap(k, searchfrom)
return k, coolneighbor, rcl_t
def time_mutation_by_heat(self, solution, rcl_t):
# This function tries a new time value for the initial hottest bin to
# see if that helps
tfill = solution.gettfill()
q0_bybin = solution.getq0bins()[:solution.getopenbins()]
i_hot_list = np.argsort(q0_bybin)
i_hot = i_hot_list[-1]
told = tfill[i_hot]
kwargs = {'mode': 'hload', 'nmove': len(solution.vlrep[i_hot])}
t_new, rcl_t = self.get_feasible_tfilli(rcl_t, told - 5.0, **kwargs)
if t_new:
neighbor = deepcopy(solution)
neighbor.edit_tfilli(i_hot, t_new)
# Adapt Greedy Function
rcl_t.adapt_changetime(told, t_new, len(neighbor.vlrep[i_hot]))
# Check if modified solution is nondominated
neighbor = self.checkandfit(neighbor)
solution = self.test_domination(solution, neighbor)
return solution
def split_bin(self, solution, rcl_t):
# This function splits the highest capacity bin into two boxes.
vlrep = solution.getvlrep()
i = self.getmaxbin(vlrep)
# Get random place to split bin
jsplit = random.randrange(1, len(vlrep[i]))
newbin = list(vlrep[i][jsplit:])
# Open new bin with feasible time value
tmin = self.get_box_tmin(newbin)
kwargs = {'mode': 'hload', 'nmove': len(newbin)}
t_new, rcl_t = self.get_feasible_tfilli(rcl_t, tmin, **kwargs)
if t_new:
tfill = solution.gettfill()
solution.opennewbin(i, newbin[0], round(t_new, 1))
inew = solution.getopenbins() - 1
rcl_t.adapt_greedy_function_newbin(t_new, add=0)
rcl_t.adapt_movebins(tfill[i], t_new)
if len(newbin) > 1:
for j in newbin[1:]:
solution.moveitem(i, j, inew)
rcl_t.adapt_movebins(tfill[i], tfill[inew])
return solution, rcl_t
def cookie_swap(self, k, searchfrom):
# This function selects two random bins and tries to swap cookies between
# them. If unsuccessful, it splits the highest capacity bin.
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
for s in range(searchfrom.openbins):
mode = random.choice(['random', 'moveheat', 'movelate'])
i1, i2 = self.select_two_bins(copy, mode)
if not i2:
newsol, rcl_t = self.split_bin(copy, rcl_t)
else:
kwargs = {'i1': i1, 'i2': i2, 'mode': mode}
newsol, rcl_t = self.perform_cookie_swap(copy, rcl_t, **kwargs)
# Will return None if it's dominated by vector u
nondominated = self.check4nondomination(u, newsol)
k += 1
if nondominated:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def perform_cookie_swap(self, solution, rcl_t, i1, i2, mode):
# This function performs the part swap between box i1 and i2
tfill = solution.gettfill()
vlrep = solution.getvlrep()
# Get cookies to swap
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
bini2_options = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
if mode == 'moveheat':
j1 = bini1_options[-1]
j2 = bini2_options[0]
else:
j1 = random.choice(bini1_options)
j2 = random.choice(bini2_options)
solution.moveitem(i1, j1, i2)
solution.moveitem(i2, j2, i1)
return solution, rcl_t
def part_swap(self, k, searchfrom):
# This function selects two random bins and tries to swap cookies between
# them. If unsuccessful, it splits the highest capacity bin.
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
for s in range(searchfrom.openbins):
mode = random.choice(['random', 'moveheat', 'movelate'])
i1, i2 = self.select_two_bins(copy, mode)
if not i2:
newsol, rcl_t = self.split_bin(copy, rcl_t)
else:
kwargs = {'i1': i1, 'i2': i2, 'mode': mode}
newsol, rcl_t = self.perform_part_swap(copy, rcl_t, **kwargs)
# Will return None if it's dominated by vector u
nondominated = self.check4nondomination(u, newsol)
k += 1
if nondominated:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def perform_part_swap(self, solution, rcl_t, i1, i2, mode):
# This function performs the part swap between box i1 and i2
# Get swap points
if mode == 'moveheat':
movetobin2, movetobin1 = self.get_heat_swap_sets(solution, i1, i2)
else:
movetobin2, movetobin1 = self.get_random_swap_sets(solution, i1, i2)
if movetobin2:
kwargs = {'i1': i1, 'movetobin2': movetobin2,
'i2': i2, 'movetobin1': movetobin1}
solution, rcl_t = \
self.make_swap_happen(solution, rcl_t, **kwargs)
else:
solution, rcl_t = self.split_bin(solution, rcl_t)
return solution, rcl_t
def make_swap_happen(self, solution, rcl_t, i1, movetobin2, i2, movetobin1):
# This function swaps a portion of box i1 with box i2
# potentially fix this: adapt rcl_t all at once instead of cookie by cookie
tfill = solution.gettfill()
for j in movetobin2:
solution.moveitem(i1, j, i2)
rcl_t.adapt_movebins(tfill[i1], tfill[i2])
for j in movetobin1:
solution.moveitem(i2, j, i1)
rcl_t.adapt_movebins(tfill[i2], tfill[i1])
return solution, rcl_t
def get_heat_swap_sets(self, solution, i1, i2):
# This function returns sets of cookies meant to reduce overall heat
# between boxes
vlrep = solution.getvlrep()
tfill = solution.gettfill()
# Determine eligible cookies
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
bini2_options = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
# Pick random swap sets
min_box_fill = min(len(vlrep[i1]), len(vlrep[i2]))
max_swap = min(len(bini1_options), len(bini2_options), min_box_fill - 1)
swap_number = random.randint(1, max_swap)
movetobin2 = bini1_options[-swap_number:]
movetobin1 = bini2_options[:swap_number]
return movetobin2, movetobin1
def get_random_swap_sets(self, solution, i1, i2):
# This function returns a random set of cookies to swap between boxes.
vlrep = solution.getvlrep()
tfill = solution.gettfill()
# Determine eligible cookies
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
bini2_options = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
# Pick random swap sets
min_box_fill = min(len(vlrep[i1]), len(vlrep[i2]))
max_swap = min(len(bini1_options), len(bini2_options), min_box_fill - 1)
swap_number = random.randint(1, max_swap)
movetobin2 = random.sample(bini1_options, swap_number)
movetobin1 = random.sample(bini2_options, swap_number)
return movetobin2, movetobin1
def getpoints_4swap(self, binitems1, t1, binitems2, t2):
# This function returns two points to perform the swap on
# Retrieve boolean lists
bool1 = self.moop.packatt(binitems1, t2)
bool2 = self.moop.packatt(binitems2, t1)
p1 = self.get_swap_point(bool1)
p2 = self.get_swap_point(bool2)
# If no swap point, return false
if not p1 or not p2:
return None, None
# Check for capacity violations
newbin1 = binitems1[:p1] + binitems2[p2:]
if len(newbin1) > self.moop.boxcap:
p2 = self.get_new_swap_point(binitems1, p1, binitems2, bool2)
newbin2 = binitems2[:p2] + binitems1[p1:]
if len(newbin2) > self.moop.boxcap:
p1 = self.get_new_swap_point(binitems2, p2, binitems1, bool1)
# Return the lists of cookies to be swapped
movetobin2 = list(binitems1[p1:])
movetobin1 = list(binitems2[p2:])
return movetobin2, movetobin1
def get_swap_point(self, booli):
# This function finds a feasible point to swap with another box
# Find starting point for bin i
starti = self.findstartforswap(booli)
if starti == len(booli):
return False
else:
pi = random.randrange(starti, len(booli))
return pi
def get_new_swap_point(self, bin_into, p1, bin_outta, bool_outta):
# This function finds a swap point that won't violate bin_into's capacity
can_accept = self.moop.boxcap - len(bin_into[:p1])
p2 = self.get_swap_point(bool_outta)
kappa = 10
while len(bin_outta[p2:]) > can_accept:
# If can't find point, only swap one item
if kappa == 10:
return len(bin_outta) - 1
p2 = self.get_swap_point(bool_outta)
return p2
def findstartforswap(self, boollist):
# This function returns the index after which all values are True
start = 1
for k in range(len(boollist) - 1, 0, -1):
if boollist[k] is False:
start = k + 1
return start
return start
def move_cookies(self, k, searchfrom):
# This function selects two random bins and tries to move cookies between
# them. If unsuccessful, it splits the highest capacity bin.
u = searchfrom.getfits()
r, rcl_t = self.getresiduals(searchfrom.getvlrep(), searchfrom.gettfill())
copy = deepcopy(searchfrom)
for s in range(searchfrom.openbins):
mode = random.choice(['moveheat', 'movelate'])
i1, i2 = self.get_hot_empty_bins(copy, mode)
if i2 == None or len(copy.vlrep[i2]) == self.moop.boxcap:
newsol, rcl_t = self.split_bin(copy, rcl_t)
else:
kwargs = {'i1': i1, 'i2': i2, 'mode': mode}
newsol, rcl_t = self.perform_cookie_move(copy, rcl_t, **kwargs)
# Will return None if it's dominated by vector u
nondominated = self.check4nondomination(u, newsol)
k += 1
if nondominated:
return k, newsol, rcl_t
# If a nondominated solution wasn't found, return nothing
return k, None, rcl_t
def perform_cookie_move(self, solution, rcl_t, i1, i2, mode):
# This function performs the move of one cookie from box i1 to i2
tfill = solution.gettfill()
vlrep = solution.getvlrep()
# Get cookies to swap
bini1_options = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
empty_space = self.moop.boxcap - len(vlrep[i2])
max_move = min(empty_space, empty_space // 2 + 1, len(bini1_options))
nmove = random.randint(1, max_move)
for k in range(nmove):
j1 = bini1_options[-1 - k]
solution.moveitem(i1, j1, i2)
return solution, rcl_t
def select_two_bins(self, solution, mode):
# This module selects two bins for swap using specified function
vlrep = solution.getvlrep()
tfill = solution.gettfill()
if mode == 'moveheat':
i1, i2 = self.get_hot_cold_bins(vlrep, tfill, solution.getq0bins())
elif mode == 'movelate':
i1, i2 = self.get_hot_cold_bins(vlrep, tfill, solution.gettavail())
else:
# Pick random bins
i1, i2 = self.get_two_random_bins(vlrep, tfill)
return i1, i2
def get_hot_cold_bins(self, vlrep, tfill, characteristic):
# This function returns the indices of the hottest bin and the coldest
# bin that are compatible
m = len(vlrep) # number of open bins
ilist_hot = np.argsort(characteristic[:m])
for kh in range(m):
i_hot = ilist_hot[-1 - kh]
for kc in range(m - kh):
i_cold = ilist_hot[kc]
if i_hot != i_cold:
compatible = self.good_match(vlrep, tfill, i_hot, i_cold)
if compatible:
return i_hot, i_cold
return None, None
def get_hot_empty_bins(self, solution, mode):
# This function returns the indices of the hottest bin compatible with
# the emptiest bin
m = solution.getopenbins()
vlrep = solution.getvlrep()
tfill = solution.gettfill()
i2 = self.getminbin(vlrep)
if mode == 'moveheat':
ilist_hot = np.argsort(solution.getq0bins()[:m])
else:
ilist_hot = np.argsort(solution.gettavail()[:m])
for k in range(m):
i_hot = ilist_hot[-1 - k]
compatible = self.good_match(vlrep, tfill, i_hot, i2,
ignore_length=True)
if compatible:
return i_hot, i2
return None, None
def get_two_random_bins(self, vlrep, tfill):
# This function returns two individual random bins that can swap cookies
bin_pairs = list(combinations(range(len(vlrep)), 2))
for bp in range(len(bin_pairs)):
i1, i2 = random.choice(bin_pairs)
can_swap = self.good_match(vlrep, tfill, i1, i2)
if can_swap:
return i1, i2
return None, None
def good_match(self, vlrep, tfill, i1, i2, ignore_length=False):
# This function returns True if i1 and i2 are a good match for swapping
# and False if they are a bad match
if i1 == i2:
return False
if not ignore_length:
if len(vlrep[i1]) <= 1 or len(vlrep[i2]) <= 1:
return False
list1 = [j for j in vlrep[i1] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i2]]
if not list1:
return False
list2 = [j for j in vlrep[i2] if self.cookies.get(j).getbatch()
* self.moop.tbatch < tfill[i1]]
if not list2:
return False
# If made it past conditions, return True
return True
def getrandombin(self, vlrep):
# This function returns a random bin with more than one item in it
bins = range(len(vlrep))
bini = random.choice(bins)
while len(vlrep[bini]) <= 1:
bini = random.choice(bins)
return bini
def getrandsecondbin(self, i1, vlrep, tfill):
# This function returns a second random bin that is not
# bin i1 and that items in bin i1 can be moved to
i2 = random.choice(range(len(vlrep)))
kappa = 1
while not self.good_match(vlrep, tfill, i1, i2):
if kappa == len(vlrep):
return None
i2 = random.choice(range(len(vlrep)))
kappa += 1
return i2
def getmaxbin(self, vlrep):
# This function returns the index of the fullest bin.
bincapacity = np.zeros(len(vlrep))
for i in range(len(vlrep)):
bincapacity[i] = len(vlrep[i])
bini = np.argmax(bincapacity)
return bini
def getminbin(self, vlrep):
# This function returns the index of the emptiest bin.
bincapacity = np.zeros(len(vlrep))
for i in range(len(vlrep)):
bincapacity[i] = len(vlrep[i])
minbin = np.argmin(bincapacity)
return minbin
def getresiduals(self, vlrep, tfill):
# This function calculates the residual matrix associated with a given
# dynamic bin packing loading. The first column represents the open box
# capacities, and the second column represents the maximum number of
# cookies that can be added to the cooling rack right before tfill_i
coolrack = self.moop.coolrack
r = np.zeros((self.n, 2), dtype=np.int)
# Set box capacity residuals
for i in range(len(vlrep)):
r[i, 0] = self.moop.boxcap - len(vlrep[i])
r[i, 1] = coolrack
# Set cooling rack capacity residuals
n_b = self.n // self.moop.nbatches
rcl_t = RCLtime(coolrack, self.moop.fillcap, n_b,
self.moop.tbatch, self.moop.nbatches)
r[:len(vlrep), 1] = rcl_t.initialize_withtfill(len(vlrep), vlrep, tfill)
return r, rcl_t
def update_spaceresiduals(self, r, i, inew):
# This function updates the space residual r after a cookie moves
# from box i to box inew
# Update r: box capacity
r[i, 0] += 1
r[inew, 0] -= 1
return r
def check4nondomination(self, u, solution):
# Check if modified solution is nondominated
solution = self.checkandfit(solution)
v = solution.getfits()
if not dom2(u, v):
return True
else:
return False
def countonrack(self, t, solution):
# Cookies from boxes filled after t might be on rack
vlrep = solution.getvlrep()
tfill = solution.gettfill()
timecheckindices = np.where(tfill > t)
nrackitems = 0
for i in timecheckindices[0]:
for j in vlrep[i]:
onrack = self.moop.rackij(t, tfill[i], self.cookies.get(j))
nrackitems += onrack
return nrackitems
def calclowerbound(self):
# This function calculates theoretical lower bound for the number of
# bins. It assumes this is the total number of cookies divided by
# the box capacity.
minbins = ceil(float(self.n) / self.moop.boxcap)
self.lb = int(minbins)
def getub(self):
# Returns the upper bound (bin capacity)
return self.moop.boxcap
def getcookies(self):
# Returns the list of items to pack
return self.cookies
def getlb(self):
# Returns the theoretical lower bound
return self.lb
class NewSolution:
# This class performs the GRASP creation of a new solution.
def __init__(self, beta, n, cookies, moop):
self.beta = beta # Cardinality restriction
self.n = int(n) # Number of cookies to sort
self.cookies = cookies # dictionary of item objects
self.moop = moop # Multiobjective problem class
self.m = 0 # initialize open bins count
self.r = np.zeros((n, 2)) # Residual capacity matrix
self.x = np.zeros((n, n), dtype=np.int)
self.y = np.zeros(n, dtype=np.int)
self.vlrep = []
self.tfill = np.zeros(n, dtype=np.float)
# Initialize restricted candidate list
n_b = self.n // self.moop.nbatches
self.rcl_t = RCLtime(moop.coolrack, moop.fillcap, n_b,
moop.tbatch, moop.nbatches)
def make_newsol(self, index, *args):
# This function takes the solution from generate_newsol and creates
# a CookieSol instance.
# Possible args: a newgenes list containing a chromosome representation
# and a suggested tfill.
if args:
self.generate_newsol_from_chromosome(args[0], args[1])
else:
self.generate_newsol()
newsol = solmaker.CookieSol(index, self.x, self.y, self.vlrep, self.tfill)
return newsol
def generate_newsol(self):
# This function generates a new solution from scratch using GRASP
modes = ['ss', 'hload'] # Modes for retrieving new tfill time
self.initialize_greedy_tfill()
self.open_new_bin(0, 0)
# Set strategy for the loading
theta_i = random.random()
for j in range(1, self.n):
rcl_i = self.get_rcl_bins(theta_i, j)
i = random.choice(rcl_i)
if self.y[i] == 0:
self.tfill[i] = self.get_feasible_tfilli(j, modes)
self.open_new_bin(i, j)
else:
self.vlrep[i].append(j)
self.r[i, 0] -= 1
self.rcl_t.adapt_greedy_function_addtobin(self.tfill[i])
self.r[:self.m, 1] = \
self.rcl_t.retrieve_space_by_tfill(self.m, self.tfill)
self.constructx()
def generate_newsol_from_chromosome(self, chrom, tfill_suggested):
# This function generates a new solution based on a given chromosome
modes = ['ss', 'hload'] # Modes for retrieving new tfill time
self.initialize_greedy_tfill(*tfill_suggested)
chrom = self.initialize_first_bin(chrom)
# Set strategy for the loading
theta_i = random.random()
for j in chrom:
rcl_i = self.get_rcl_bins(theta_i, j)
i = random.choice(rcl_i)
if self.y[i] == 0:
self.tfill[i] = self.pick_tfilli(j, modes, tfill_suggested)
self.open_new_bin(i, j)
else:
self.vlrep[i].append(j)
self.r[i, 0] -= 1
self.rcl_t.adapt_greedy_function_addtobin(self.tfill[i])
self.r[:self.m, 1] = \
self.rcl_t.retrieve_space_by_tfill(self.m, self.tfill)
self.constructx()
def initialize_greedy_tfill(self, *args):
# This function initializes t_fill
# Calculate tfill_0 using inverse cdf and set residual capacity
if args:
# args = tfill_suggested
self.tfill[0] = self.rcl_t.pick_suggested_t(args, self.moop.tbatch)
else:
self.tfill[0] = self.rcl_t.get_new_t(self.moop.tbatch)
def initialize_first_bin(self, chrom):
# This function finds the first cookie in list chrom that can be packed
# at tfill[0] and opens the first bin with that cookie
for j in chrom:
if self.moop.cookiedonebaking(j, self.tfill[0]):
self.open_new_bin(0, j)
chrom.remove(j)
return chrom
print('Error: NewSolution picked a time that cannot be filled.')
def pick_tfilli(self, j, modes, tfill_maybe):
# This module tries to use one of the time values from tfill
tmin = self.cookies.get(j).getbatch() * self.moop.tbatch
# If tmin when coolrack is overfull, find least worst solution
tk = self.find_t_in_trange(tmin)
if self.rcl_t.space[tk] <= 0:
t_new = self.rcl_t.find_least_worst_newt(tmin)
return t_new
t_possible = self.get_t_from_oldtfill(tmin, tfill_maybe)
if t_possible:
return t_possible
else:
# If nothing in tfill_maybe worked, return new value:
t_new = self.get_feasible_tfilli(j, modes)
return t_new
def get_t_from_oldtfill(self, tmin, tfill_maybe):
# This function returns a feasible time from tfill_maybe
# First establish tmax based on moving 1 cookie from the rack
tmax = self.rcl_t.get_tmax(tmin, 1)
t_options = np.unique(tfill_maybe)
for i in range(len(t_options)):
if t_options[i] < tmax:
# Avoid reusing a value from tfill_maybe
if t_options[i] not in self.tfill:
if self.rcl_t.time_feasible(t_options[i], tmin):
return t_options[i]
return None
def get_feasible_tfilli(self, j, modes):
# This function locates a new value for tfill[i] that doesn't violate
# rack or fill limits
theta_t = random.randint(0, 1)
tmin = self.cookies.get(j).getbatch() * self.moop.tbatch
# Find fill time for box i
t_new, p_t = self.find_new_time_value(tmin, modes[theta_t])
kappa = 0 # Counter to exit loop
# Check if possible to fill in period
while self.rcl_t.res_fill[p_t] < 1:
if kappa == 10:
return None
# If not possible, find new time value
t_new, p_t = self.find_new_time_value(tmin, modes[theta_t])
kappa += 1
return t_new
def find_new_time_value(self, tmin, mode):
# This module retrieves a new time value and also returns which period
# it belongs to
t_new = self.rcl_t.get_new_t(tmin, mode=mode)
t_t = self.find_t_in_fill_periods(t_new)
return t_new, t_t
def find_t_in_fill_periods(self, t):
# If the new time value is beyond the current fill periods, extend
while t > self.rcl_t.t_t[-1]:
self.rcl_t.extend_fill_periods()
# Find the period containing t_new
tlist = np.where(t >= np.array(self.rcl_t.t_t))[0]
return tlist[-1]
def find_t_in_trange(self, t):
# If the new time value is beyond the current timeline, extend
while t > self.rcl_t.trange[-1]:
self.rcl_t.extend_timeline()
tklist = np.where(np.array(self.rcl_t.trange) <= t)[0]
return tklist[-1]
def get_rcl_bins(self, theta_i, j):
# This module selects the strategy based on theta_i and returns
# the corresponding restricted candidate list.
if theta_i < 0.33:
# Least loaded strategy
rcl_i = self.llmove(j)
elif theta_i < 0.66:
# Weighted max strategy
rcl_i = self.wmaxmove(j)
else:
# Combo-t strategy
rcl_i = self.combot_move(j)
# Return either a new bin or the list found above
if not rcl_i:
rcl_i = self.find_alternative_bin(j)
return rcl_i
else:
return rcl_i
def llmove(self, j):
# This module performs the sorting for module ll.
# The goal of this strategy is to balance the loading of the boxes.
rcl_i = []
i_rlowtohigh = np.argsort(self.r[:self.m, 0], axis=0)
# Add new bin as an option if others are starting to get full
if self.r[i_rlowtohigh[-1], 0] <= 0.5 * self.moop.boxcap:
rcl_i.append(self.m)
for k in range(self.m):
# Find open bin with max. residual value, moving backward thru i_rlowtohigh
lli = i_rlowtohigh[- 1 - k]
bcookiej = self.cookies.get(j).getbatch()
pack = packable(self.r[lli, :], bcookiej, self.tfill[lli])
if pack:
rcl_i.append(lli)
if len(rcl_i) == self.beta:
return rcl_i
return rcl_i
def wmaxmove(self, j):
# This module determines the restricted candidate list by the weighted
# max strategy. The goal is to keep the number of boxes to a minimum.
rcl_i = []
# Gather weights: space on rack / maximum space over time
maxval = np.max(self.r[:self.m, 1])
weights = np.zeros(self.m)
for k in range(self.m):
weights[k] = self.r[k, 1] / maxval
# Calculate weighted residuals
wresidual = np.multiply(self.r[:self.m, 0], weights)
i_rlowtohigh = np.argsort(wresidual, axis=0)
for k in range(self.m):
# Find open bin with min. weighted residual value
i = i_rlowtohigh[k]
bcookiej = self.cookies.get(j).getbatch()
pack = packable(self.r[i, :], bcookiej, self.tfill[i])
if pack:
rcl_i.append(i)
if len(rcl_i) == self.beta // 2:
return rcl_i
return rcl_i
def combot_move(self, j):
# This module determines the restricted candidate list by the combo-t
# strategy. The goal is to reduce the maximum time until the boxes
# can be moved to the store front.
n_b = self.n // self.moop.nbatches # Number of cookies per batch
jmax = j - (j % n_b) # Max. cookie no. for heat restriction
rcl_i = []
i_rlowtohigh = np.argsort(self.r[:self.m, 0], axis=0)
# Add new bin as an option after all bins meet a minimum level
if self.r[i_rlowtohigh[-1], 0] <= 0.7 * self.moop.boxcap:
rcl_i.append(self.m)
for k in range(self.m):
# Find open bin with max. residual value
lli = i_rlowtohigh[- 1 - k]
otherbatch = [jo for jo in self.vlrep[lli] if jo < jmax]
# Heat restriction
if (self.r[lli, 0] <= 0.5 * self.moop.boxcap) & \
(len(otherbatch) == 0):
pass
else:
bcookiej = self.cookies.get(j).getbatch()
pack = packable(self.r[lli, :], bcookiej, self.tfill[lli])
if pack:
rcl_i.append(lli)
if len(rcl_i) == self.beta:
return rcl_i
return rcl_i
def open_new_bin(self, i, j):
# This module opens a new bin i with cookie j
self.m += 1
self.y[i] = 1
self.vlrep.insert(i, [j])
self.r[i, 0] = self.moop.boxcap - 1
# Adapt Greedy Function (time)
self.rcl_t.adapt_greedy_function_newbin(self.tfill[i])
t_t = self.find_t_in_fill_periods(self.tfill[i])
self.rcl_t.res_fill[t_t] -= 1
self.r[:self.m, 1] = self.rcl_t.retrieve_space_by_tfill(self.m, self.tfill)
def find_alternative_bin(self, j):
# If tmin when coolrack is overfull, find least worst solution
tmin = self.cookies.get(j).getbatch() * self.moop.tbatch
tk = self.find_t_in_trange(tmin)
if self.rcl_t.space[tk] <= 0:
# Find least-worst alternative
options = [i for i in range(self.m)
if tmin < self.tfill[i] and self.r[i, 0] > 0]
if options:
return options
else:
return [self.m]
else:
return [self.m]
def constructx(self):
# This function transforms the variable length representation into
# the x-matrix
for i in range(self.m):
for j in self.vlrep[i]:
self.x[i, j] = 1
checkformismatch(self.x, self.vlrep)
class RCLtime:
# This class maintains and updates the restricted candidate list for a
# unique t_fill
def __init__(self, coolrack, fillcap, n_b, tbatch, nbatches):
self.coolrack = coolrack # Cooling rack capacity
self.fillcap = fillcap # Fill period limit
self.n_b = n_b # Number of cookies in one batch
self.tbatch = tbatch # Time to cook one batch
self.nbatches = nbatches # Number of batches cooked
# Set the time range, extend one cycle past last pull
self.trange = [(b + 1) * self.tbatch for b in range(self.nbatches + 1)]
# Space on the cooling rack as a function of time
self.space = [self.coolrack - (b + 1) * self.n_b
for b in range(self.nbatches)]
self.space.append(self.space[-1])
# Include restrictions for period fill limits
n_period = 2 * (nbatches - 1) + 2
self.t_t = [self.tbatch * (1.0 + t / 2.0) for t in range(n_period)]
self.res_fill = [fillcap for _ in range(n_period)]
def initialize_withtfill(self, m, vlrep, tfill):
# This function adds the information from vlrep and tfill
# into the trange and space lists
# First fix the cooling rack related items
r2 = np.zeros(m, dtype=np.int) # Collect residual values
i_lowtohigh = list(np.argsort(tfill[:m], axis=0))
for i in i_lowtohigh:
r2[i] = self.adapt_greedy_function_newbin(tfill[i],
add=len(vlrep[i]))
# Then fix the fill period related items
t_latest = np.amax(tfill)
while t_latest > self.t_t[-1]:
self.extend_fill_periods()
for t in range(len(self.t_t) - 1):
p_t = [i for i in range(m)
if self.t_t[t] <= tfill[i] < self.t_t[t + 1]]
self.res_fill[t] -= len(p_t)
return r2
def pick_suggested_t(self, t_maybe, tmin):
# This function returns a possible starting t-value, first by trying
# the suggested t values in t_maybe, and then by finding a feasible one
for i in range(len(t_maybe)):
if t_maybe[i] < self.trange[-1]:
if self.time_feasible(t_maybe[i], tmin):
return t_maybe[i]
t_new = self.get_new_t(tmin)
return t_new
def time_feasible(self, t, tmin):
# This function checks if time t is feasible to open a new bin
if t < tmin:
return False
while self.trange[-1] < t:
self.extend_timeline()
tk = self.find_t_in_timeline(t)
# To be feasible, the cooling rack cannot be overcrowded
if self.space[tk] > 0:
return self.time_period_feasible(t)
# If overcrowded, return False
return False
def time_period_feasible(self, t):
# This module determines if time value t is valid within period fill
# limit constraints.
if t < self.t_t[0]:
return False
ttlist = np.where(np.array(self.t_t) <= t)[0]
# The number of boxes filled during the period < limit
if self.res_fill[ttlist[-1]] > 0:
return True
else:
return False
def get_new_t(self, tmin, mode='ss', nmove=1, told=None):
# This function returns a random time on the cumulative
# distribution function of space(trange) greater than tmin
t = 0
tmax = self.get_tmax(tmin, nmove)
dist = self.retrieve_pdensityfunction(mode)
c_min = dist.cdf(tmin)
c_max = dist.cdf(tmax)
if c_min == c_max:
return None
k = 0
while round(t) <= tmin or round(t) >= tmax:
rannum = random.uniform(c_min, c_max)
t = dist.ppf(rannum)
k += 1
if k == 10:
return None
return round(t)
def retrieve_pdensityfunction(self, mode):
# This function returns the needed pdf
if mode == 'hload':
dist = PiecewiseLinearPDF(self.trange, self.space)
else:
dist = PiecewisePDF(self.trange, self.space)
return dist
def find_least_worst_newt(self, tmin):
# This function returns the least worst time for a box to be opened
# based on tmin.
tklist = np.where(np.array(self.trange) >= tmin)[0]
max_space = self.space[tklist[0]]
tmax = self.get_tmax(tmin, max_space)
t_new = random.uniform(tmin + 1, tmax)
kappa = 0
while not self.time_period_feasible(t_new):
if kappa == 10:
return tmin + 1.0
t_new = random.uniform(tmin + 1, tmax)
kappa += 1
return round(t_new)
def get_tmax(self, tmin, nmove):
# This function determines if the get_new_t function needs to limit its
# search to a max. value. If not, it returns the last trange value.
tklist = np.where(np.array(self.trange) > tmin)[0]
for tk in tklist:
if self.space[tk] - nmove <= 0:
return self.trange[tk]
# If did not find t_max, and enough space at end of timeline, extend
if self.space[-1] >= nmove:
self.extend_timeline()
return self.trange[-1]
def adapt_greedy_function_newbin(self, t, add=1):
# This function updates the space and trange lists after a new bin is
# opened, add is the space being opened by # of cookies being removed
# If t is larger than the range, add it on to the end
if t > self.trange[-1]:
self.trange.append(t)
self.space.append(self.space[-1])
self.update_space(-1, add=add)
return self.space[-1]
# If the new t is the same as the last t in trange, extend it by some
elif t == self.trange[-1]:
self.update_space(-1, add=add)
self.extend_timeline()
return self.space[-2]
else:
ilist = np.where(np.array(self.trange) >= t)[0]
if t == self.trange[ilist[0]]:
start = ilist[0]
else:
self.trange.insert(ilist[0], t)
self.space.insert(ilist[0], self.space[ilist[0] - 1] + add)
start = ilist[0] + 1
for tk in range(start, len(self.space)):
self.update_space(tk, add=add)
return self.space[ilist[0]]
def adapt_greedy_function_addtobin(self, t):
# This function updates the space and trange lists after a cookie is
# added to a box and removed from the cooling rack at time t
tklist = np.where(np.array(self.trange) >= t)[0]
for tk in tklist:
self.update_space(tk)
return self.space[tklist[0]]
def adapt_movebins(self, t1, t2):
# This function updates the space list after a cookie is moved from
# the box filled at t1 to the one filled at t2
tklist1 = np.where(np.array(self.trange) >= t1)[0]
tklist2 = np.where(np.array(self.trange) >= t2)[0]
tklist = np.setxor1d(tklist1, tklist2)
if t1 == t2:
return self.space[tklist1[0]], self.space[tklist1[0]]
elif t1 < t2:
for tk in tklist:
self.update_space(tk, add=-1)
else:
for tk in tklist:
self.update_space(tk)
return self.space[tklist1[0]], self.space[tklist2[0]]
def adapt_changetime(self, told, tnew, nmove):
# This function updates the trange and space lists to account for a bin
# being filled at tnew instead of told.
# nmove is the size of the box being changed
while tnew > self.trange[-1]:
self.extend_timeline()
tklist1 = np.where(np.array(self.trange) >= told)[0]
tklist2 = np.where(np.array(self.trange) >= tnew)[0]
tklist = np.setxor1d(tklist1, tklist2)
if told < tnew:
for tk in tklist:
self.update_space(tk, add=-nmove)
else:
for tk in tklist:
self.update_space(tk, add=nmove)
self.trange.insert(tklist2[0], tnew)
self.space.insert(tklist2[0], self.space[tklist2[0] - 1] + nmove)
return self.space
def update_space(self, tk, add=1):
# This function updates the space list at time tk, assuming one cookie
# was removed from the cooling rack
self.space[tk] += add
if self.space[tk] > self.coolrack:
self.space[tk] = self.coolrack
def retrieve_space_by_tfill(self, m, tfill):
# This function returns the space residuals matching tfill
r2 = np.zeros(m, dtype=np.int) # Collect residual values
for i in range(m):
ilist = np.where(np.array(self.trange) == tfill[i])[0]
r2[i] = self.space[ilist[0]]
return r2
def find_t_in_timeline(self, t):
tklist = np.where(np.array(self.trange) > t)[0]
tk = tklist[0] - 1
return tk
def extend_timeline(self):
# This function extends trange by one batch time period.
new_tlast = self.trange[-1] + 0.5 * self.tbatch
self.trange.append(new_tlast)
self.space.append(self.space[-1])
def extend_fill_periods(self):
# This function extends t_t by one period
self.t_t.append(self.t_t[-1] + 0.5 * self.tbatch)
self.res_fill.append(self.fillcap)
class PiecewisePDF:
# This class defines a piecewise function along with its pdf and cdf
def __init__(self, trange, space):
self.tchunk = np.ediff1d(trange)
space_array = np.array(space)
for tk in range(len(space_array)):
if space_array[tk] < 0.0:
space_array[tk] = 0.0
area_chunks = np.multiply(self.tchunk, space_array[:-1])
area_total = np.sum(area_chunks)
self.tk = np.array(trange) # time range for distribution
self.pk = space_array / float(area_total) # probability at tk
self.ck = np.cumsum(np.multiply(self.pk[:-1], self.tchunk)) # cumulative probability
self.ck = np.insert(self.ck, 0, 0.0)
def pdf(self, t):
# This function returns the probability at time t
if t < self.tk[0]:
return 0.0
listi = np.where(t < self.tk)
probt = self.pk[listi[0][0] - 1]
return probt
def cdf(self, t):
# This function returns the cumulative probability of quantile t
if t < self.tk[0]:
return 0.0
i = np.where(t == self.tk)[0]
if any(i):
return self.ck[i[0]]
else:
ilist = np.where(t < self.tk)[0]
i1 = ilist[0] - 1
i2 = ilist[0]
slope = (self.ck[i2] - self.ck[i1]) / (self.tk[i2] - self.tk[i1])
p_c = slope * (t - self.tk[i1]) + self.ck[i1]
return p_c
def ppf(self, p):
# This function returns the time associated with percentile p
# This is the inverse cumulative distribution function.
i = np.where(p == self.ck)[0]
if any(i):
return self.tk[i[0]]
else:
ilist = np.where(p < self.ck)[0]
# Linear function: t = (t_high - t_low)/(c_high - c_low)* (p - c_low) + t_low
i1 = ilist[0] - 1
i2 = ilist[0]
slope = (self.tk[i2] - self.tk[i1]) / (self.ck[i2] - self.ck[i1])
return slope * (p - self.ck[i1]) + self.tk[i1]
class PiecewiseLinearPDF:
# This class defines a piecewise function along with its pdf and cdf, with a
# linear increase in probability over each given time range
def __init__(self, trange, space):
self.tk = np.array(trange) # time range for distribution
self.space_array = np.array(space) # space available in each time range
for tk in range(len(self.space_array)):
if self.space_array[tk] < 0.0:
self.space_array[tk] = 0.0
self.tchunk = np.ediff1d(trange) # differences between time values
area_chunks = np.multiply(self.tchunk, self.space_array[:-1])
self.area_total = float(np.sum(area_chunks)) # total area under the space(t) curve
self.ck = np.cumsum(np.divide(area_chunks, self.area_total)) # cumulative probability
self.ck = np.insert(self.ck, 0, 0.0)
def pdf(self, t):
# This function returns the probability at time t
if t < self.tk[0]:
return 0.0
listi = np.where(t < self.tk)[0]
k = listi[0] - 1
# Linear function: probt = [(2 * space(tk) - 0) / (tk+1 - tk) * (t - tk)] / totalarea
slope = 2 * (self.space_array[k]/self.area_total)/self.tchunk[k]
probt = slope * (t - self.tk[k])
return probt
def cdf(self, t):
# This function returns the cumulative probability of quantile t
if t < self.tk[0]:
return 0.0
i = np.where(t == self.tk)[0]
if any(i):
return self.ck[i[0]]
else:
ilist = np.where(t < self.tk)[0]
k = ilist[0] - 1 # index for lower boundary of chunk
slope = 2 * (self.space_array[k] / self.area_total) / self.tchunk[k]
p_c = slope * (t - self.tk[k]) ** 2 / 2 + self.ck[k]
return p_c
def ppf(self, p):
# This function returns the time associated with percentile p
# This is the inverse cumulative distribution function.
i = np.where(p == self.ck)[0]
if any(i):
return self.tk[i[0]]
else:
ilist = np.where(p < self.ck)[0]
# Quad function: t = sqrt(2*(p-c_low)/slope) + t_low
k = ilist[0] - 1
slope = 2 * (self.space_array[k]/self.area_total)/self.tchunk[k]
x = sqrt(2 * (p - self.ck[k]) / slope)
return x + self.tk[k]
def dom2(u, v):
# Determines if fitness vector u dominates fitness vector v
# This function assumes a minimization problem.
# For u to dominate v, every fitness value must be either
# equal to or less than the value in v AND one fitness value
# must be less than the one in v
equaltest = np.allclose(u, v)
if equaltest is True:
# If u == v then nondominated
return False
# less_equal returns boolean for each element u[i] <= v[i]
domtest = np.less_equal(u, v)
return np.all(domtest)
def packable(ri, batch, tfilli):
# This module checks to see if cookie j can fit inside bin i at time tfilli
# Capacity constraints
r1 = ri[0] - 1
r2 = ri[1] - 1
# Time constraint: tbatch = 10 min = 600 s
t_cook = batch * 600
return r1 >= 0 and r2 >= 0 and t_cook < tfilli
def checkformismatch(x, vlrep, out=sys.stdout):
# This function identifies if the given solution does not have an x-matrix
# and a variable length representation that match.
for i in range(len(vlrep)):
for j in vlrep[i]:
if x[i, j] != 1:
out.write('Error: NewSolution is not coordinated on item', j)
def averageLen(lst):
# Calculates the average length of lists inside a list, returns integer value
lengths = [len(i) for i in lst]
return 0 if len(lengths) == 0 else (int(sum(lengths) / len(lengths)))
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
# This function determines if value a and value b are about equal
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
if __name__ == '__main__':
print('grasp.py needs to be combined with coolcookies.py') | 41.945359 | 98 | 0.58148 | 61,638 | 0.967402 | 0 | 0 | 0 | 0 | 0 | 0 | 15,626 | 0.245248 |
d142c7d81fb966cbfca34d86396212cc9b63e454 | 13,244 | py | Python | scripts/copy_bundles.py | VIIgit/azul | bb61965f625c667979a2f255f6bc39dcafaaf40b | [
"Apache-2.0"
] | null | null | null | scripts/copy_bundles.py | VIIgit/azul | bb61965f625c667979a2f255f6bc39dcafaaf40b | [
"Apache-2.0"
] | null | null | null | scripts/copy_bundles.py | VIIgit/azul | bb61965f625c667979a2f255f6bc39dcafaaf40b | [
"Apache-2.0"
] | null | null | null | import argparse
import csv
from datetime import datetime
import logging
import sys
import time
from typing import Set, Tuple
from urllib.parse import urlparse, urlunparse
from botocore.config import Config
from botocore.exceptions import ClientError
from hca.util import SwaggerAPIException
from azul import config, require
from azul.dss import MiniDSS, shared_dss_credentials
from azul.logging import configure_script_logging
from azul.threads import DeferredTaskExecutor
from azul.types import MutableJSON
logger = logging.getLogger(__name__)
class CopyBundle(DeferredTaskExecutor):
def main(self):
if self.args.shared:
with shared_dss_credentials():
errors = self.run()
else:
errors = self.run()
if errors:
for e in errors:
# S3 errors often refer to the key they occurred for, providing useful context here
if isinstance(e, ClientError):
key = getattr(e, 'response', None).get('Error', {}).get('Key', None)
if key is None:
continue
logger.error('Error in deferred task for key %s:\n%s', key, e)
logger.error('Error in deferred task:\n%s', e)
raise RuntimeError(f'Some bundles or files could not be copied. '
f'The total number of failed tasks is {len(errors)}.', )
@classmethod
def _parse_args(cls, argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--source', '-s', metavar='URL', type=urlparse,
default=config.dss_endpoint,
help='The URL of the DSS REST API from which to copy the bundles (default: %(default)s).')
parser.add_argument('--destination', '-d', metavar='URL', type=urlparse,
default=config.dss_endpoint,
help='The URL of the DSS REST API to which to copy the bundles (default: %(default)s).')
parser.add_argument('--personal', '-P', dest='shared', action='store_false', default=True,
help="Do not use the shared credentials of the Google service account that represents the "
"current deployment, but instead use personal credentials for authenticating to the "
"DSS. When specifying this option you will need to a) run `hca dss login` prior to "
"running this script or b) set GOOGLE_APPLICATION_CREDENTIALS to point to another "
"service account's credentials.")
version = parser.add_mutually_exclusive_group()
version.add_argument('--keep-version', '-K', dest='version', action='store_const', const='keep',
default='keep',
help="This is the default. Use the original version string for each copy of a file or "
"bundle. This mode is idempotent when used together with --keep-uuid or --map-uuid.")
version.add_argument('--set-version', '-S', metavar='VERSION', dest='version', type=cls._validate_version,
help=f'Set the version of bundle and file copies to the given value. This mode is '
f'idempotent but it will lead to conflicts if the input contains multiple versions '
f'of the same bundle or file. The version must be a string like '
f'{cls._new_version()}.')
version.add_argument('--map-version', '-M', metavar='VERSION', dest='version', type=float,
help='Set the version of bundle and file copies to the version of the orginal plus/minus '
'the specified duration in seconds. This mode is idempotent but has a low '
'probability of introducing collisions.')
version.add_argument('--new-version', '-N', dest='version', action='store_const', const='new',
help='Allocate a new version for copies of bundles and files. This is not idempotent '
'because it creates new files and bundles everytime the program is run.')
parser.add_argument('--fix-tags', '-f', action='store_true', default=False,
help="Add checksum tags to the blob objects in the source (!) DSS if necessary.")
input_ = parser.add_mutually_exclusive_group(required=True)
input_.add_argument('--bundle', '-b', metavar='UUID.VERSION', nargs='+', dest='bundles',
help='One or more fully qualified identifiers (FQID) of bundles to be copied')
input_.add_argument('--manifest', '-m', metavar='PATH')
parser.add_argument('--prefix', '-p', type=str, metavar='HEX', default='',
help='Only copy input bundles whose UUID begins with the given string. Applied to both '
'--bundles and --manifest but really only makes sense with the latter where it can '
'be used copy only a deterministic subset of the bundles in the manifest.')
parser.add_argument('--suffix', '-x', metavar='HEX', type=str, default='',
help='Only copy input bundles whose UUID ends in the given string. Applied to both '
'--bundles and --manifest but really only makes sense with the latter where it can '
'be used copy only a deterministic subset of the bundles in the manifest.')
args = parser.parse_args(argv)
return args
num_workers = 32
def __init__(self, argv) -> None:
super().__init__(num_workers=self.num_workers)
self.args = self._parse_args(argv)
self.source = MiniDSS(dss_endpoint=urlunparse(self.args.source),
config=Config(max_pool_connections=self.num_workers))
self.destination = self._new_dss_client()
def _new_dss_client(self):
return config.dss_client(dss_endpoint=urlunparse(self.args.destination),
adapter_args=dict(pool_maxsize=self.num_workers))
def _run(self):
if self.args.bundles:
bundle_fqids = {(uuid, version)
for uuid, _, version in (fqid.partition('.')
for fqid in self.args.bundles)}
else:
with open(self.args.manifest) as f:
manifest = csv.DictReader(f, delimiter='\t')
columns = {'bundle_uuid', 'file_uuid'}
require(columns.issubset(manifest.fieldnames),
f'Expecting TSV with at least these columns: {columns}')
bundle_fqids = {(row['bundle_uuid'], row['bundle_version']) for row in manifest}
self._copy_bundles(bundle_fqids)
def _copy_bundles(self, bundle_fqids: Set[Tuple[str, str]]):
for bundle_fqid in bundle_fqids:
bundle_uuid, bundle_version = bundle_fqid
if bundle_uuid.endswith(self.args.suffix) and bundle_uuid.startswith(self.args.prefix):
self._defer(self._copy_files, bundle_uuid, bundle_version)
def _copy_files(self, bundle_uuid, bundle_version):
logger.info('Getting bundle %s, version %s', bundle_uuid, bundle_version)
manifest = self.source.get_bundle(uuid=bundle_uuid,
version=bundle_version,
replica='aws')
files = manifest['files']
logger.info('Copying %i file(s) from bundle %s, version %s',
len(files), bundle_uuid, bundle_version)
file: MutableJSON
futures = [self._defer(self._copy_file, bundle_uuid, bundle_version, file) for file in files]
self._defer(self._copy_bundle, bundle_uuid, bundle_version, manifest, run_after=futures)
def _copy_file(self, bundle_uuid, bundle_version, file, attempt=0):
attempt += 1
logger.info('Copying file %r from bundle %s, version %s', file, bundle_uuid, bundle_version)
source_url = self.source.get_native_file_url(uuid=(file['uuid']),
version=(file['version']),
replica='aws')
new_file = dict(uuid=file['uuid'],
version=(self._copy_version(file['version'])),
creator_uid=0,
source_url=source_url)
logger.info('Creating file %r', new_file)
try:
# noinspection PyProtectedMember
self.destination.put_file._request(new_file)
except SwaggerAPIException as e:
if e.code == 422 and e.reason == 'missing_checksum' and self.args.fix_tags and attempt < 10:
logger.warning('Target DSS complains that source blob for file %s, version %s lacks checksum tags, '
'retagging in %is.', file['uuid'], file['version'], attempt)
self.source.retag_blob(uuid=(file['uuid']),
version=(file['version']),
replica='aws')
# Object tag updates are eventually consistent so the DSS might not see the tag update
# immediately. Keep trying until it does
self._defer(self._copy_file, bundle_uuid, bundle_version, file, attempt=attempt, delay=attempt)
else:
raise
else:
# Update the source manifest to refer to the new bundle
file['version'] = new_file['version']
def _copy_bundle(self, bundle_uuid, bundle_version, manifest, attempt=0):
attempt += 1
new_bundle_version = self._copy_version(bundle_version)
try:
logger.info('Creating bundle %s, version %s', bundle_uuid, new_bundle_version)
self.destination.put_bundle(uuid=bundle_uuid,
version=new_bundle_version,
replica='aws',
creator_uid=0,
files=manifest['files'])
except SwaggerAPIException as e:
if e.code == 400 and e.reason == 'file_missing' and attempt < 10:
logger.warning('Target DSS complains that a source file in bundle %s, version %s is missing, '
'retrying in %is.', bundle_uuid, bundle_version, attempt)
self._defer(self._copy_bundle, bundle_uuid, bundle_version, manifest, attempt=attempt, delay=attempt)
else:
raise
def _copy_version(self, version: str):
mode = self.args.version
if mode == 'keep':
return version
elif mode == 'new':
return self._new_version()
else:
if isinstance(mode, float):
version = datetime.strptime(version, self.version_format)
version = datetime.fromtimestamp(version.timestamp() + mode)
return version.strftime(self.version_format)
else:
return mode
version_format = '%Y-%m-%dT%H%M%S.%fZ'
@classmethod
def _new_version(cls):
return datetime.utcfromtimestamp(time.time()).strftime(cls.version_format)
@classmethod
def _validate_version(cls, version: str):
"""
>>> # noinspection PyProtectedMember
>>> CopyBundle._validate_version('2018-10-18T150431.370880Z')
'2018-10-18T150431.370880Z'
>>> # noinspection PyProtectedMember
>>> CopyBundle._validate_version('2018-10-18T150431.0Z')
Traceback (most recent call last):
...
ValueError: ('2018-10-18T150431.0Z', '2018-10-18T150431.000000Z')
>>> # noinspection PyProtectedMember
>>> CopyBundle._validate_version(' 2018-10-18T150431.370880Z')
Traceback (most recent call last):
...
ValueError: time data ' 2018-10-18T150431.370880Z' does not match format '%Y-%m-%dT%H%M%S.%fZ'
>>> # noinspection PyProtectedMember
>>> CopyBundle._validate_version('2018-10-18T150431.370880')
Traceback (most recent call last):
...
ValueError: time data '2018-10-18T150431.370880' does not match format '%Y-%m-%dT%H%M%S.%fZ'
>>> # noinspection PyProtectedMember
>>> CopyBundle._validate_version('2018-10-187150431.370880Z')
Traceback (most recent call last):
...
ValueError: time data '2018-10-187150431.370880Z' does not match format '%Y-%m-%dT%H%M%S.%fZ'
"""
reparsed_version = datetime.strptime(version, cls.version_format).strftime(cls.version_format)
if version != reparsed_version:
raise ValueError(version, reparsed_version)
return version
if __name__ == '__main__':
configure_script_logging(logger)
CopyBundle(sys.argv[1:]).main()
| 53.837398 | 119 | 0.586077 | 12,591 | 0.950695 | 0 | 0 | 6,011 | 0.453866 | 0 | 0 | 4,702 | 0.355029 |
d142df79bef452231592066c73c02fa23e4fff32 | 398 | py | Python | firsttest/models/check.py | charlos1204/firsttest | 2c66385ae7149d1403071c2bf6da997873350556 | [
"MIT"
] | null | null | null | firsttest/models/check.py | charlos1204/firsttest | 2c66385ae7149d1403071c2bf6da997873350556 | [
"MIT"
] | null | null | null | firsttest/models/check.py | charlos1204/firsttest | 2c66385ae7149d1403071c2bf6da997873350556 | [
"MIT"
] | null | null | null | #import numpy as np
#import pickle
#import sequence2vector as s2v_tools
#y_data_name = '/data/label_dataset.pkl'#
#Y = pickle.load(open(y_data_name, 'rb'))
#print(Y.shape)
#Y = s2v_tools.seq2vectorize(Y)
#print(Y)
from keras.models import Sequential
import plotresults as pltrslts
import pickle
network = pickle.load(open("/data/history.pkl", 'rb'))
pltrslts.plot_acc_loss(network, 'save')
| 18.090909 | 54 | 0.748744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.590452 |
d1440a5d8d1674d5c1865a9e8914bba72236e29c | 2,146 | py | Python | tools/build.py | kxf3199/gltf_tool | b060135209dff2127095575b8fc87849b5bfbdf4 | [
"MIT"
] | 1 | 2022-03-04T10:53:42.000Z | 2022-03-04T10:53:42.000Z | tools/build.py | spindro/disney_brdf_for_yocto-gl | aa79c60ec9603240f35a6c70ed20586d3fe5df45 | [
"MIT"
] | null | null | null | tools/build.py | spindro/disney_brdf_for_yocto-gl | aa79c60ec9603240f35a6c70ed20586d3fe5df45 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3 -B
# build utility for easy development
# complete and unreliable hack used for making it easier to develop
import click, os, platform, markdown, glob, textwrap
def build(target, dirname, buildtype, cmakeopts=''):
os.system('mkdir -p build/{dirname}; cd build/{dirname}; cmake ../../ -GNinja -DCMAKE_BUILD_TYPE={buildtype} -DYOCTO_EXPERIMENTAL=ON {cmakeopts}; cmake --build . {target}'.format(target=target, dirname=dirname, buildtype=buildtype, cmakeopts=cmakeopts))
os.system('ln -Ffs {dirname} build/latest'.format(dirname=dirname))
@click.group()
def run():
pass
@run.command()
@click.argument('target', required=False, default='')
def latest(target=''):
os.system('cd build/latest; cmake --build . {target}'.format(target=target))
@run.command()
@click.argument('target', required=False, default='')
def release(target=''):
build(target, 'release', 'Release')
@run.command()
@click.argument('target', required=False, default='')
def nogl(target=''):
build(target, 'nogl', 'Release', '-DYOCTO_OPENGL=OFF')
@run.command()
@click.argument('target', required=False, default='')
def debug(target=''):
build(target, 'debug', 'Debug')
@run.command()
@click.argument('target', required=False, default='')
def gcc(target=''):
build(target, 'gcc', 'Release', '-DCMAKE_C_COMPILER=gcc-7 -DCMAKE_CXX_COMPILER=g++-7')
@run.command()
def xcode():
os.system('mkdir -p build/xcode; cd build/xcode; cmake -G Xcode -DYOCTO_EXPERIMENTAL=ON ../../; open yocto-gl.xcodeproj')
@run.command()
def clean():
os.system('rm -rf bin; rm -rf build')
@run.command()
def format():
for glob in ['yocto/yocto_*.h', 'yocto/yocto_*.cpp', 'apps/y*.cpp']:
os.system('clang-format -i -style=file ' + glob)
@run.command()
def docs():
os.system('./tools/cpp2doc.py')
@run.command()
def doxygen():
os.system('doxygen ./tools/Doxyfile')
@run.command()
@click.argument('msg', required=True, default='')
def commit(msg=''):
os.system('./tools/build.py format')
os.system('./tools/build.py docs')
os.system('git commit -a -m ' + msg)
if __name__ == '__main__':
run()
| 30.225352 | 257 | 0.671482 | 0 | 0 | 0 | 0 | 1,509 | 0.703169 | 0 | 0 | 908 | 0.423113 |
d146f4973440816e4a9c135565077e8cd8ed1f36 | 2,108 | py | Python | server/wrangle/config.py | kdinkla/ProtoMPDA | 08ec7de3a24ea07da19062b009ca81a0f5a9c924 | [
"MIT"
] | 3 | 2017-12-07T19:11:24.000Z | 2020-07-03T07:51:09.000Z | server/wrangle/config.py | kdinkla/Screenit | 08ec7de3a24ea07da19062b009ca81a0f5a9c924 | [
"MIT"
] | null | null | null | server/wrangle/config.py | kdinkla/Screenit | 08ec7de3a24ea07da19062b009ca81a0f5a9c924 | [
"MIT"
] | null | null | null | import sqlite3 as lite
import csv
# Constants.
inputPath = "/Users/kdinkla/Desktop/Novartis/HCS/CellMorph/www.ebi.ac.uk/huber-srv/cellmorph/data/"
outputPath = "/Users/kdinkla/MPDA/git/wrangle/db/"
sqlDotReplacement = '_'
# Screening parameters.
plates = ["HT" + str(i).zfill(2) for i in range(1, 69)]
plateDirectories = [inputPath + d + "/" for d in plates]
columns = [c for c in 'ABCDEFGHIJKLMNOP']
rows = [str(r).zfill(3) for r in range(4, 25)]
imageSpots = range(1, 5)
assignedClasses = {
"AF": "Actin fiber",
"BC": "Big cells",
"C": "Condensed",
"D": "Debris",
"LA": "Lamellipodia",
"M": "Metaphase",
"MB": "Membrane blebbing",
"N": "Normal",
"P": "Protruded",
"Z": "Telophase"
}
# Derived.
dbPath = outputPath + "core.db"
# Connect to SQLite database.
def connect():
return lite.connect(dbPath)
# Format object feature field for SQL.
def formatField(field):
return field.replace(".", sqlDotReplacement)
# Convert plate index (starting at 1) to plate tag.
def plateTag(index):
return plates[index]
def columnTag(index):
return columns[index]
def rowTag(index):
return rows[index]
# Determine object feature fields.
def objectFeatures():
firstFilePath = inputPath + "HT01/HT01A004_ftrs.tab"
with open(firstFilePath, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
header = reader.next()
return [formatField(f) for f in header if f != 'spot' and f != 'class']
# Directory of feature file of given plate, column, and row.
def featureDirectory(plate, column, row):
return inputPath + plate + "/" + plate + column + row + "_ftrs.tab"
# Resolves directory for given database column, row, and plate number. Image types: seg and rgb
def wellURL(column, row, plate, type):
plateTag = plates[plate]
wellTag = plateTag + columns[column] + rows[row]
return "http://www.ebi.ac.uk/huber-srv/cellmorph/view/" + plateTag + "/" + wellTag + "/" + wellTag + "_" + type + ".jpeg"
#return "dataset/images/" + plateTag + "/" + wellTag + "/" + wellTag + "_seg.jpeg" | 31.462687 | 125 | 0.652277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 872 | 0.413662 |
d1484a9f3cc1c846f424f96a4602ea6fd126b3cd | 952 | py | Python | src/stationbook/book/tests/test_view_station_borehole_layer_add.py | EIDA/stationbook | 80d36189170328257955b236c9ed6a8657a92853 | [
"MIT"
] | 3 | 2019-02-07T18:03:56.000Z | 2020-06-30T11:09:50.000Z | src/stationbook/book/tests/test_view_station_borehole_layer_add.py | EIDA/stationbook | 80d36189170328257955b236c9ed6a8657a92853 | [
"MIT"
] | 13 | 2019-03-25T08:09:25.000Z | 2022-03-11T23:40:25.000Z | src/stationbook/book/tests/test_view_station_borehole_layer_add.py | EIDA/stationbook | 80d36189170328257955b236c9ed6a8657a92853 | [
"MIT"
] | 1 | 2019-07-26T10:23:37.000Z | 2019-07-26T10:23:37.000Z | from django.urls import resolve, reverse
from .base_classes import NetworkStationTest
from ..views import station_borehole_layer_add
class StationBoreholeLayerAddTests(NetworkStationTest):
def __init__(self, *args):
NetworkStationTest.__init__(
self,
*args,
url="station_borehole_layer_add",
arguments={"network_pk": "1", "station_pk": "1"}
)
def test_station_borehole_layer_add_view_status_code_authenticated(self):
self.login_and_refresh()
self.assertEquals(self.response.status_code, 200)
def test_station_borehole_layer_add_view_status_code_anon(self):
self.logout_and_refresh()
self.assertEquals(self.response.status_code, 302)
def test_station_borehole_layer_add_update_url_resolves_view(self):
view = resolve("/networks/1/station/1/add-borehole-layer/")
self.assertEquals(view.func, station_borehole_layer_add)
| 35.259259 | 77 | 0.726891 | 815 | 0.856092 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.106092 |
d148ad0b74eafb9e28b83f4e64a1d66cc75dbe56 | 590 | py | Python | engfrosh_site/frosh/migrations/0004_alter_team_group.py | engfrosh/engfrosh | 8eed0f3e86ff43de569c280a5f1571c02f2324f2 | [
"MIT"
] | 1 | 2021-05-21T01:01:16.000Z | 2021-05-21T01:01:16.000Z | engfrosh_site/frosh/migrations/0004_alter_team_group.py | engfrosh/engfrosh | 8eed0f3e86ff43de569c280a5f1571c02f2324f2 | [
"MIT"
] | 50 | 2021-05-20T21:00:55.000Z | 2022-03-12T00:59:18.000Z | engfrosh_site/frosh/migrations/0004_alter_team_group.py | engfrosh/engfrosh | 8eed0f3e86ff43de569c280a5f1571c02f2324f2 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-06-19 00:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('frosh', '0003_alter_team_coin_amount'),
]
operations = [
migrations.AlterField(
model_name='team',
name='group',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='frosh_team', serialize=False, to='auth.group'),
),
]
| 28.095238 | 163 | 0.662712 | 464 | 0.786441 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.279661 |
d14a47851458aeac04d23e1f6be1cad44af69083 | 905 | py | Python | CMSIS/DSP/Testing/PatternGeneration/Convolutions.py | milos-lazic/CMSIS_5 | 61b74b70bd961af9e2a8bb6bc1632c1014e7c42e | [
"Apache-2.0"
] | 6 | 2019-05-30T21:02:44.000Z | 2022-01-16T23:40:23.000Z | CMSIS/DSP/Testing/PatternGeneration/Convolutions.py | milos-lazic/CMSIS_5 | 61b74b70bd961af9e2a8bb6bc1632c1014e7c42e | [
"Apache-2.0"
] | 2 | 2019-05-23T10:11:45.000Z | 2019-08-28T15:13:56.000Z | CMSIS/DSP/Testing/PatternGeneration/Convolutions.py | milos-lazic/CMSIS_5 | 61b74b70bd961af9e2a8bb6bc1632c1014e7c42e | [
"Apache-2.0"
] | 2 | 2019-11-27T09:56:17.000Z | 2021-11-25T11:02:17.000Z | import os.path
import numpy as np
import itertools
import Tools
# Those patterns are used for tests and benchmarks.
# For tests, there is the need to add tests for saturation
def writeTests(config):
NBSAMPLES=128
inputsA=np.random.randn(NBSAMPLES)
inputsB=np.random.randn(NBSAMPLES)
inputsA = inputsA/max(inputsA)
inputsB = inputsB/max(inputsB)
config.writeInput(1, inputsA,"InputsA")
config.writeInput(1, inputsB,"InputsB")
PATTERNDIR = os.path.join("Patterns","DSP","Filtering","MISC","MISC")
PARAMDIR = os.path.join("Parameters","DSP","Filtering","MISC","MISC")
configf32=Tools.Config(PATTERNDIR,PARAMDIR,"f32")
configq31=Tools.Config(PATTERNDIR,PARAMDIR,"q31")
configq15=Tools.Config(PATTERNDIR,PARAMDIR,"q15")
configq7=Tools.Config(PATTERNDIR,PARAMDIR,"q7")
writeTests(configf32)
writeTests(configq31)
writeTests(configq15)
writeTests(configq7)
| 20.568182 | 69 | 0.741436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.247514 |
d14a8262549a69bf5004e70bf2d22c44d9e1fbdd | 6,594 | py | Python | cesiumpy/entities/tests/test_color.py | cksammons7/cesiumpy | 0ffa7509fdac03644f0e2fb91385106c40284aa1 | [
"Apache-2.0"
] | 62 | 2015-12-30T04:17:25.000Z | 2022-02-09T04:26:24.000Z | cesiumpy/entities/tests/test_color.py | cksammons7/cesiumpy | 0ffa7509fdac03644f0e2fb91385106c40284aa1 | [
"Apache-2.0"
] | 20 | 2016-01-19T10:07:21.000Z | 2021-11-15T18:36:45.000Z | cesiumpy/entities/tests/test_color.py | cksammons7/cesiumpy | 0ffa7509fdac03644f0e2fb91385106c40284aa1 | [
"Apache-2.0"
] | 33 | 2016-02-03T13:28:29.000Z | 2022-02-26T13:14:41.000Z | #!/usr/bin/env python
# coding: utf-8
import nose
import unittest
import traitlets
import cesiumpy
import cesiumpy.testing as tm
class TestColor(unittest.TestCase):
def test_maybe_color(self):
blue = cesiumpy.color.Color.maybe('blue')
self.assertEqual(repr(blue), "Color.BLUE")
self.assertEqual(blue.script, "Cesium.Color.BLUE")
red = cesiumpy.color.Color.maybe('RED')
self.assertEqual(repr(red), "Color.RED")
self.assertEqual(red.script, "Cesium.Color.RED")
msg = "Unable to convert to Color instance: "
with nose.tools.assert_raises_regexp(ValueError, msg):
cesiumpy.color.Color.maybe('NamedColor')
msg = "Unable to convert to Color instance: "
with nose.tools.assert_raises_regexp(ValueError, msg):
cesiumpy.color.Color.maybe('x')
msg = "Unable to convert to Color instance: "
with nose.tools.assert_raises_regexp(ValueError, msg):
cesiumpy.color.Color.maybe(1)
def test_maybe_color_listlike(self):
# tuple
c = cesiumpy.color.Color.maybe((0.5, 0.3, 0.5))
self.assertEqual(repr(c), "Color(0.5, 0.3, 0.5)")
self.assertEqual(c.script, "new Cesium.Color(0.5, 0.3, 0.5)")
c = cesiumpy.color.Color.maybe((0.5, 0.3, 0.5, 0.2))
self.assertEqual(repr(c), "Color(0.5, 0.3, 0.5, 0.2)")
self.assertEqual(c.script, "new Cesium.Color(0.5, 0.3, 0.5, 0.2)")
# do not convert
msg = "Unable to convert to Color instance: "
with nose.tools.assert_raises_regexp(ValueError, msg):
cesiumpy.color.Color.maybe((0.5, 0.3))
msg = "Unable to convert to Color instance: "
with nose.tools.assert_raises_regexp(ValueError, msg):
cesiumpy.color.Color.maybe((0.5, 0.3, 0.2, 0.1, 0.5))
def test_named_colors(self):
aqua = cesiumpy.color.AQUA
exp = "Color.AQUA"
self.assertEqual(repr(aqua), exp)
self.assertEqual(aqua.name, 'AQUA')
exp = "Cesium.Color.AQUA"
self.assertEqual(aqua.script, exp)
aqua = aqua.set_alpha(0.5)
exp = "Color.AQUA.withAlpha(0.5)"
self.assertEqual(repr(aqua), exp)
self.assertEqual(aqua.name, 'AQUA')
exp = "Cesium.Color.AQUA.withAlpha(0.5)"
self.assertEqual(aqua.script, exp)
# confirm set_alpha modifies the constant
aqua = cesiumpy.color.AQUA
exp = "Color.AQUA"
self.assertEqual(repr(aqua), exp)
self.assertEqual(aqua.name, 'AQUA')
exp = "Cesium.Color.AQUA"
self.assertEqual(aqua.script, exp)
blue = cesiumpy.color.BLUE
exp = "Color.BLUE"
self.assertEqual(repr(blue), exp)
self.assertEqual(blue.name, 'BLUE')
exp = "Cesium.Color.BLUE"
self.assertEqual(blue.script, exp)
def test_single_char_color(self):
_m = cesiumpy.color.Color.maybe
self.assertEqual(_m('b'), cesiumpy.color.BLUE)
self.assertEqual(_m('g'), cesiumpy.color.GREEN)
self.assertEqual(_m('r'), cesiumpy.color.RED)
self.assertEqual(_m('c'), cesiumpy.color.CYAN)
self.assertEqual(_m('m'), cesiumpy.color.MAGENTA)
self.assertEqual(_m('y'), cesiumpy.color.YELLOW)
self.assertEqual(_m('k'), cesiumpy.color.BLACK)
self.assertEqual(_m('w'), cesiumpy.color.WHITE)
self.assertEqual(_m('B'), cesiumpy.color.BLUE)
self.assertEqual(_m('G'), cesiumpy.color.GREEN)
self.assertEqual(_m('R'), cesiumpy.color.RED)
self.assertEqual(_m('C'), cesiumpy.color.CYAN)
self.assertEqual(_m('M'), cesiumpy.color.MAGENTA)
self.assertEqual(_m('Y'), cesiumpy.color.YELLOW)
self.assertEqual(_m('K'), cesiumpy.color.BLACK)
self.assertEqual(_m('W'), cesiumpy.color.WHITE)
def test_alpha(self):
aqua = cesiumpy.color.AQUA
res = aqua.set_alpha(0.3)
exp = "Cesium.Color.AQUA.withAlpha(0.3)"
self.assertEqual(res.script, exp)
res = aqua.withAlpha(0.3)
exp = "Cesium.Color.AQUA.withAlpha(0.3)"
self.assertEqual(res.script, exp)
res = aqua.withAlpha(1.0)
exp = "Cesium.Color.AQUA.withAlpha(1.0)"
self.assertEqual(res.script, exp)
res = aqua.withAlpha(0.0)
exp = "Cesium.Color.AQUA.withAlpha(0.0)"
self.assertEqual(res.script, exp)
msg = "The value of the 'alpha' trait of a ColorConstant instance should"
with nose.tools.assert_raises_regexp(traitlets.TraitError, msg):
aqua.withAlpha(1.1)
def test_rgb(self):
c = cesiumpy.color.Color(1, 0, 0)
exp = "new Cesium.Color(1.0, 0.0, 0.0)"
self.assertEqual(c.script, exp)
c = cesiumpy.color.Color(1, 0, 0, 0.5)
exp = "new Cesium.Color(1.0, 0.0, 0.0, 0.5)"
self.assertEqual(c.script, exp)
c = cesiumpy.color.Color.fromBytes(255, 0, 255)
exp = "new Cesium.Color(1.0, 0.0, 1.0)"
self.assertEqual(c.script, exp)
c = cesiumpy.color.Color.fromBytes(255, 0, 255, 255)
exp = "new Cesium.Color(1.0, 0.0, 1.0, 1.0)"
self.assertEqual(c.script, exp)
def test_color_string(self):
c = cesiumpy.color.Color.fromString('#FF0000')
exp = """Cesium.Color.fromCssColorString("#FF0000")"""
self.assertEqual(c.script, exp)
def test_random(self):
c = cesiumpy.color.choice()
self.assertIsInstance(c, cesiumpy.color.Color)
colors = cesiumpy.color.sample(5)
self.assertIsInstance(colors, list)
self.assertEqual(len(colors), 5)
self.assertTrue(all(isinstance(c, cesiumpy.color.Color) for c in colors))
def test_cmap(self):
tm._skip_if_no_matplotlib()
import matplotlib.pyplot as plt
mpl_cmap = plt.get_cmap('winter')
cmap = cesiumpy.color.get_cmap('winter')
exp = """ColorMap("winter")"""
self.assertEqual(repr(cmap), exp)
res = cmap(3)
exp = mpl_cmap(3)
self.assertEqual(res.red, exp[0])
self.assertEqual(res.green, exp[1])
self.assertEqual(res.blue, exp[2])
self.assertEqual(res.alpha, exp[3])
res = cmap([2, 4])
exp = mpl_cmap([2, 4])
for r, e in zip(res, exp):
self.assertEqual(r.red, e[0])
self.assertEqual(r.green, e[1])
self.assertEqual(r.blue, e[2])
self.assertEqual(r.alpha, e[3])
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 35.074468 | 81 | 0.607067 | 6,323 | 0.958902 | 0 | 0 | 0 | 0 | 0 | 0 | 1,211 | 0.183652 |
d14a9fead3b8a6d1a0d3f6e69ca6d8524429fdaf | 410 | py | Python | todo/migrations/0004_alter_post_title.py | Saup21/Todo-list | f806fee5ba11a2ce6a8242d8675f0984d2c2f0eb | [
"MIT"
] | 14 | 2021-05-14T15:06:38.000Z | 2021-09-10T06:29:23.000Z | todo/migrations/0004_alter_post_title.py | Saup21/Todo-list | f806fee5ba11a2ce6a8242d8675f0984d2c2f0eb | [
"MIT"
] | null | null | null | todo/migrations/0004_alter_post_title.py | Saup21/Todo-list | f806fee5ba11a2ce6a8242d8675f0984d2c2f0eb | [
"MIT"
] | 3 | 2021-05-16T12:39:41.000Z | 2021-05-18T04:13:57.000Z | # Generated by Django 3.2.1 on 2021-05-12 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0003_auto_20210511_0127'),
]
operations = [
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(blank=True, max_length=25),
),
]
| 21.578947 | 63 | 0.570732 | 311 | 0.758537 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.22439 |
d14ad9b665a10159104a85fed696e67d8770cb72 | 1,246 | py | Python | vn.trader/strategyMain.py | freeitaly/TT | 9b88ccec3739077649b0f57787d7f02764ad6897 | [
"MIT"
] | null | null | null | vn.trader/strategyMain.py | freeitaly/TT | 9b88ccec3739077649b0f57787d7f02764ad6897 | [
"MIT"
] | null | null | null | vn.trader/strategyMain.py | freeitaly/TT | 9b88ccec3739077649b0f57787d7f02764ad6897 | [
"MIT"
] | null | null | null | # encoding: UTF-8
import sys
import ctypes
import platform
from vtEngine import MainEngine
from ctaAlgo.uiStrategyWindow import *
#----------------------------------------------------------------------
def main():
"""主程序入口"""
# 设置底部任务栏图标,win7以下请注释掉
try:
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID('vn.py demo')
except:
pass
# 重载sys模块,设置默认字符串编码方式为utf8
reload(sys)
sys.setdefaultencoding('utf8')
# # 设置Windows底部任务栏图标
# if 'Windows' in platform.uname() :
# ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID('vn.trader')
# 初始化Qt应用对象
app = QtGui.QApplication(sys.argv)
app.setWindowIcon(QtGui.QIcon('vnpy.ico'))
app.setFont(BASIC_FONT)
# 设置Qt的皮肤
try:
f = file("VT_setting.json")
setting = json.load(f)
if setting['darkStyle']:
import qdarkstyle
app.setStyleSheet(qdarkstyle.load_stylesheet(pyside=False))
except:
pass
# 初始化主引擎和主窗口对象
mainEngine = MainEngine()
mainWindow = MainWindow(mainEngine, mainEngine.eventEngine)
mainWindow.showMaximized()
# 在主线程中启动Qt事件循环
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 23.961538 | 84 | 0.61557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 562 | 0.398582 |
d14b01acc806f687e4dbb90d8f0ce282fa3e47ab | 304 | py | Python | notes/code/sfpd/histo.py | skrilladeville/msds692 | 1690846e7299819cd5b1b24a56968bf1074e16bf | [
"MIT"
] | 87 | 2018-08-10T23:27:24.000Z | 2022-03-29T05:07:45.000Z | notes/code/sfpd/histo.py | skrilladeville/msds692 | 1690846e7299819cd5b1b24a56968bf1074e16bf | [
"MIT"
] | 1 | 2019-10-06T15:45:03.000Z | 2019-10-06T15:45:03.000Z | notes/code/sfpd/histo.py | skrilladeville/msds692 | 1690846e7299819cd5b1b24a56968bf1074e16bf | [
"MIT"
] | 171 | 2018-08-20T23:59:43.000Z | 2022-03-31T16:21:52.000Z | import sys
from csvcols import get_column
categories = get_column(sys.argv[1], col=1)
descriptions = get_column(sys.argv[1], col=2)
for c, n in categories.most_common(len(categories)):
print("%6d %s" % (n, c))
for d, n in descriptions.most_common(len(descriptions)):
print("%6d %s" % (n, d))
| 23.384615 | 56 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.052632 |
d14c9b8f7f4b5ed59766d994c17ec61a593de502 | 5,742 | py | Python | autopatch/target_finder.py | Hydrogen-OS-P/tools | 6bf6f5a9f922ca64a22434cd986db5452f7a796b | [
"Apache-2.0"
] | 2 | 2020-05-17T00:33:41.000Z | 2020-05-21T16:08:35.000Z | autopatch/target_finder.py | Hydrogen-OS-P/tools | 6bf6f5a9f922ca64a22434cd986db5452f7a796b | [
"Apache-2.0"
] | null | null | null | autopatch/target_finder.py | Hydrogen-OS-P/tools | 6bf6f5a9f922ca64a22434cd986db5452f7a796b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2
# Filename: target_finder.py
"""
Fast search the target out.
Usage: target_finder.py TARGET
- TARGET target path relative to current directory.
"""
__author__ = 'duanqz@gmail.com'
import sys
import re
import os
import fnmatch
import commands
class TargetFinder:
# The framework partitions
PARTITIONS = []
def __init__(self):
self.__initPartitions()
# Path Regex to match out useful parts
# Using named group match with "(?P<group_name>)", using minimum match end with "?"
self.pathRegex = re.compile("(?P<part1>.*?)/(?P<part2>smali/.*?)(?P<part3>.*)")
def __initPartitions(self):
""" Parse out the framework partitions.
"""
makefile = None
for filename in os.listdir(os.curdir):
if fnmatch.fnmatch(filename.lower(), "makefile"):
makefile = filename
if makefile == None:
return
fileHandle = open(makefile, "r")
content = fileHandle.read()
modifyJars = re.compile("\n\s*vendor_modify_jars\s*:=\s*(?P<jars>.*)\n")
match = modifyJars.search(content)
if match != None:
TargetFinder.PARTITIONS = match.group("jars").split(" ")
fileHandle.close()
def __findInDexPartitions(self, target):
""" Find the target in dex partition.
On Android 5.0, Files might be split to different dex-partitions
"""
if os.path.exists(target):
return target
(outClass, innerClass) = TargetFinder.__extractInnerClass(target)
match = self.pathRegex.search(outClass)
if match != None:
# Part 1: top directory of framework
# Part 2: smali or smali_classes2 ...
# Part 3: the remains of the path
part1 = match.group("part1")
part2 = match.group("part2")
part3 = match.group("part3")
if not os.path.exists(part1):
return target
for subDir in os.listdir(part1):
if subDir.startswith("smali") and subDir != part2:
newTarget = os.path.join(part1, subDir, part3)
if os.path.exists(newTarget):
return TargetFinder.__concatInnerClass(newTarget, innerClass)
# Not found
return target
def __findInFrwPartitions(self, target):
""" Find the target in the partitions.
Files might be split to different framework-partition
"""
(outClass, innerClass) = TargetFinder.__extractInnerClass(target)
match = self.pathRegex.search(outClass)
if match != None:
# Part 1: top directory of framework
# Part 2: smali or smali_classes2 ...
# Part 3: the remains of the path
part1 = match.group("part1")
part2 = match.group("part2")
part3 = match.group("part3")
for partition in TargetFinder.PARTITIONS:
if not partition.endswith(".jar.out"):
partition += ".jar.out"
newTarget = os.path.join(partition, part2, part3)
if os.path.exists(newTarget):
return TargetFinder.__concatInnerClass(outClass, innerClass)
# Not found
return target
@staticmethod
def __extractInnerClass(target):
""" Extract the inner class file from target
"""
pos = target.find("$")
if pos >= 0:
# Inner class, set outer class as new target to find
outClass = target[:pos] + ".smali"
innerClass = target[pos:]
return (outClass, innerClass)
else:
return (target, None)
@staticmethod
def __concatInnerClass(outClass, innerClass):
if innerClass != None:
return outClass.replace(".smali", innerClass)
else:
return outClass
def __findInAll(self, target):
""" Find the target in all project root
"""
basename = os.path.basename(target)
searchPath = []
for partition in TargetFinder.PARTITIONS:
if not partition.endswith(".jar.out"):
partition += ".jar.out"
searchPath.append(partition)
cmd = "find %s -name %s" % (" ".join(searchPath), commands.mkarg(basename))
(sts, text) = commands.getstatusoutput(cmd)
try:
if sts == 0:
text = text.split("\n")[0]
if len(text) > 0:
return text
except:
pass
return target
def find(self, target, loosely=False):
""" Find the target out in the current directory.
Set loosely to be True to find file base name in all directory
"""
# Firstly, check whether target exists in dex partitions
target = self.__findInDexPartitions(target)
if os.path.exists(target):
return target
# Secondly, check whether target exists in framework partitions
# It is more efficiently than find in all files
target = self.__findInFrwPartitions(target)
if os.path.exists(target):
return target
# Thirdly, still not find the target, search in all sub directories
if loosely:
return self.__findInAll(target)
else:
return target
# End of class TargetFinder
if __name__ == "__main__":
argc = len(sys.argv)
if argc != 2 :
print __doc__
sys.exit()
target = sys.argv[1]
print TargetFinder().find(target)
| 29.147208 | 91 | 0.564089 | 5,259 | 0.915883 | 0 | 0 | 602 | 0.104842 | 0 | 0 | 1,690 | 0.294323 |
d14d43cb5ea0c774889305ac6803d6586c9a4422 | 853 | py | Python | geotrek/flatpages/serializers.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 50 | 2016-10-19T23:01:21.000Z | 2022-03-28T08:28:34.000Z | geotrek/flatpages/serializers.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 1,422 | 2016-10-27T10:39:40.000Z | 2022-03-31T13:37:10.000Z | geotrek/flatpages/serializers.py | pierreloicq/Geotrek-admin | 00cd29f29843f2cc25e5a3c7372fcccf14956887 | [
"BSD-2-Clause"
] | 46 | 2016-10-27T10:59:10.000Z | 2022-03-22T15:55:56.000Z | from rest_framework import serializers as rest_serializers
from geotrek.flatpages import models as flatpages_models
from geotrek.common.serializers import (
TranslatedModelSerializer, BasePublishableSerializerMixin,
RecordSourceSerializer, TargetPortalSerializer
)
class FlatPageSerializer(BasePublishableSerializerMixin, TranslatedModelSerializer):
last_modified = rest_serializers.ReadOnlyField(source='date_update')
media = rest_serializers.ReadOnlyField(source='parse_media')
source = RecordSourceSerializer(many=True)
portal = TargetPortalSerializer(many=True)
class Meta:
model = flatpages_models.FlatPage
fields = ('id', 'title', 'external_url', 'content', 'target',
'last_modified', 'slug', 'media', 'source', 'portal') + \
BasePublishableSerializerMixin.Meta.fields
| 40.619048 | 84 | 0.757327 | 576 | 0.675264 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.131301 |
d14d5d346317e2adeb5762d4720c2c3c5e7859a8 | 460 | py | Python | examples/docs_snippets/docs_snippets/overview/modes_resources/pipeline_with_modes.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 2 | 2021-06-21T17:50:26.000Z | 2021-06-21T19:14:23.000Z | examples/docs_snippets/docs_snippets/overview/modes_resources/pipeline_with_modes.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 7 | 2022-03-16T06:55:04.000Z | 2022-03-18T07:03:25.000Z | examples/docs_snippets/docs_snippets/overview/modes_resources/pipeline_with_modes.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 1 | 2021-08-18T17:21:57.000Z | 2021-08-18T17:21:57.000Z | from dagster import ModeDefinition, pipeline
from .database_resources import postgres_database, sqlite_database
from .solids_with_resources import generate_table_1, generate_table_2
@pipeline(
mode_defs=[
ModeDefinition("local_dev", resource_defs={"database": sqlite_database}),
ModeDefinition("prod", resource_defs={"database": postgres_database}),
],
)
def generate_tables_pipeline():
generate_table_1()
generate_table_2()
| 28.75 | 81 | 0.767391 | 0 | 0 | 0 | 0 | 274 | 0.595652 | 0 | 0 | 37 | 0.080435 |
d14e2d8867ddf65a2458144b49954f5383e47eb9 | 3,687 | py | Python | tests/robot/test_files/py2.py | tiobe/modernize | d0e0188989bbad610ad35d052753985fab72e989 | [
"BSD-3-Clause"
] | null | null | null | tests/robot/test_files/py2.py | tiobe/modernize | d0e0188989bbad610ad35d052753985fab72e989 | [
"BSD-3-Clause"
] | null | null | null | tests/robot/test_files/py2.py | tiobe/modernize | d0e0188989bbad610ad35d052753985fab72e989 | [
"BSD-3-Clause"
] | null | null | null | # This file is part of krakenex.
#
# krakenex is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# krakenex is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser
# General Public LICENSE along with krakenex. If not, see
# <http://www.gnu.org/licenses/gpl-3.0.txt>.
import json
import urllib
# private query nonce
import time
# private query signing
import hashlib
import hmac
import base64
from krakenex import connection
class API(object):
"""Kraken.com cryptocurrency Exchange API.
Public methods:
load_key
query_public
query_private
"""
def __init__(self, key = '', secret = ''):
"""Create an object with authentication information.
Arguments:
key -- key required to make queries to the API (default: '')
secret -- private key used to sign API messages (default: '')
"""
self.key = key
self.secret = secret
self.uri = 'https://api.kraken.com'
self.apiversion = '0'
def load_key(self, path):
"""Load key and secret from file.
Argument:
path -- path to file (string, no default)
"""
f = open(path, "r")
self.key = f.readline().strip()
self.secret = f.readline().strip()
def _query(self, urlpath, req = {}, conn = None, headers = {}):
"""Low-level query handling.
Arguments:
urlpath -- API URL path sans host (string, no default)
req -- additional API request parameters (default: {})
conn -- kraken.Connection object (default: None)
headers -- HTTPS headers (default: {})
"""
url = self.uri + urlpath
if conn is None:
conn = connection.Connection()
ret = conn._request(url, req, headers)
return json.loads(ret)
def query_public(self, method, req = {}, conn = None):
"""API queries that do not require a valid key/secret pair.
Arguments:
method -- API method name (string, no default)
req -- additional API request parameters (default: {})
conn -- connection object to reuse (default: None)
"""
urlpath = '/' + self.apiversion + '/public/' + method
return self._query(urlpath, req, conn)
def query_private(self, method, req={}, conn = None):
"""API queries that require a valid key/secret pair.
Arguments:
method -- API method name (string, no default)
req -- additional API request parameters (default: {})
conn -- connection object to reuse (default: None)
"""
urlpath = '/' + self.apiversion + '/private/' + method
req['nonce'] = int(1000*time.time())
postdata = urllib.urlencode(req)
message = urlpath + hashlib.sha256(str(req['nonce']) +
postdata).digest()
signature = hmac.new(base64.b64decode(self.secret),
message, hashlib.sha512)
headers = {
'API-Key': self.key,
'API-Sign': base64.b64encode(signature.digest())
}
return self._query(urlpath, req, conn, headers)
| 29.97561 | 71 | 0.590453 | 2,833 | 0.768375 | 0 | 0 | 0 | 0 | 0 | 0 | 2,201 | 0.596962 |
d14f95d4a726b589d4b5ddfbe61823721f991d94 | 1,553 | py | Python | apostello/migrations/0007_auto_20160315_1213.py | LaudateCorpus1/apostello | 1ace89d0d9e1f7a1760f6247d90a60a9787a4f12 | [
"MIT"
] | 69 | 2015-10-03T20:27:53.000Z | 2021-04-06T05:26:18.000Z | apostello/migrations/0007_auto_20160315_1213.py | LaudateCorpus1/apostello | 1ace89d0d9e1f7a1760f6247d90a60a9787a4f12 | [
"MIT"
] | 73 | 2015-10-03T17:53:47.000Z | 2020-10-01T03:08:01.000Z | apostello/migrations/0007_auto_20160315_1213.py | LaudateCorpus1/apostello | 1ace89d0d9e1f7a1760f6247d90a60a9787a4f12 | [
"MIT"
] | 29 | 2015-10-23T22:00:13.000Z | 2021-11-30T04:48:06.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-15 12:13
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("apostello", "0006_userprofile_show_tour")]
operations = [
migrations.AddField(model_name="userprofile", name="can_archive", field=models.BooleanField(default=True)),
migrations.AlterField(
model_name="recipient",
name="first_name",
field=models.CharField(
db_index=True,
max_length=16,
validators=[
django.core.validators.RegexValidator(
"^[\\s\\w@?£!1$\"¥#è?¤é%ù&ì\\ò(Ç)*:Ø+;ÄäøÆ,<LÖlöæ\\-=ÑñÅß.>ÜüåÉ/§à¡¿']+$",
message="You can only use GSM characters.",
)
],
verbose_name="First Name",
),
),
migrations.AlterField(
model_name="recipient",
name="last_name",
field=models.CharField(
db_index=True,
max_length=40,
validators=[
django.core.validators.RegexValidator(
"^[\\s\\w@?£!1$\"¥#è?¤é%ù&ì\\ò(Ç)*:Ø+;ÄäøÆ,<LÖlöæ\\-=ÑñÅß.>ÜüåÉ/§à¡¿']+$",
message="You can only use GSM characters.",
)
],
verbose_name="Last Name",
),
),
]
| 33.76087 | 115 | 0.490663 | 1,424 | 0.883923 | 0 | 0 | 0 | 0 | 0 | 0 | 475 | 0.294848 |
d14f96e0734761eeae0e630ef8c89ddc1d156cc7 | 5,287 | py | Python | bridle/const_expr.py | iguessthislldo/avidly | 0257c22966c297fad1254574cac60bb52b2da6ff | [
"MIT"
] | 1 | 2022-02-16T08:23:35.000Z | 2022-02-16T08:23:35.000Z | bridle/const_expr.py | iguessthislldo/bridle | f7b0228a5d3e1e05e5643c2f787dd175dd243965 | [
"MIT"
] | null | null | null | bridle/const_expr.py | iguessthislldo/bridle | f7b0228a5d3e1e05e5643c2f787dd175dd243965 | [
"MIT"
] | null | null | null | import enum
import operator as pyop
from abc import ABC, abstractmethod
from collections.abc import Callable
from typing import Any, Optional
import inspect
import string
from dataclasses import dataclass
from typing import TYPE_CHECKING
from .errors import ConstExprError, InternalError
if TYPE_CHECKING:
from .tree import PrimitiveKind
class ConstAbc(ABC):
def uncasted_kind(self):
return None
@abstractmethod
def can_eval(self):
pass
@abstractmethod
def eval(self, to: 'PrimitiveKind'):
pass
@abstractmethod
def __str__(self):
pass
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, str(self))
class ConstValue(ConstAbc):
def __init__(self, value: Any, kind: 'PrimitiveKind'):
if kind is not None:
kind.check_value(value)
self.value = value
self.kind = kind
def uncasted_kind(self):
return self.kind
def can_eval(self):
return self.value is not None
def eval(self, to: 'PrimitiveKind') -> Any:
if to != self.kind:
to.check_value(self.value)
return self.value
def __str__(self):
return str(self.value)
@dataclass(frozen=True)
class OpTraits:
fmt: str
# TODO: Fix for Python 3.8
# impl: Optional[Callable[..., Any]]
# type_impl: Optional[Callable[PrimitiveKind, ..., Any]] = None
impl: Optional[Any]
type_impl: Optional[Any] = None
accepts_floats: bool = True
@property
def impl_details(self):
if self.impl is None:
return (True, self.type_impl)
return (False, self.impl)
@property
def operand_count(self):
subtract_one, impl = self.impl_details
impl_count = len(inspect.getfullargspec(impl).args)
if subtract_one:
impl_count -= 1
fmt_count = len(list(string.Formatter().parse(self.fmt)))
if impl_count != fmt_count:
InternalError('impl_count ({}) and fmt_count ({}) are different for {}',
impl_count, fmt_count, repr(self.fmt))
return impl_count
def divide_impl(to: 'PrimitiveKind', a, b) -> Any:
return (pyop.truediv if to.value.is_float else pyop.floordiv)(a, b)
def invert_impl(to: 'PrimitiveKind', value) -> Any:
if to.value.is_signed_int:
return -(value + 1)
return to.value.max_number_like - value
class Op(enum.Enum):
OR = OpTraits(fmt='{} | {}', impl=pyop.or_, accepts_floats=False)
XOR = OpTraits(fmt='{} ^ {}', impl=pyop.xor, accepts_floats=False)
AND = OpTraits(fmt='{} & {}', impl=pyop.and_, accepts_floats=False)
RSHIFT = OpTraits(fmt='{} >> {}', impl=pyop.rshift, accepts_floats=False)
LSHIFT = OpTraits(fmt='{} << {}', impl=pyop.lshift, accepts_floats=False)
ADD = OpTraits(fmt='{} + {}', impl=pyop.add)
SUBTRACT = OpTraits(fmt='{} - {}', impl=pyop.sub)
MULTIPLY = OpTraits(fmt='{} * {}', impl=pyop.mul)
DIVIDE = OpTraits(fmt='{} / {}', impl=None, type_impl=divide_impl)
MODULO = OpTraits(fmt='{} % {}', impl=pyop.mod, accepts_floats=False)
POSITIVE = OpTraits(fmt='+{}', impl=pyop.pos)
NEGATIVE = OpTraits(fmt='-{}', impl=pyop.neg)
INVERT = OpTraits(fmt='~{}', impl=None, type_impl=invert_impl, accepts_floats=False)
PRIORITIZE = OpTraits(fmt='({})', impl=lambda a: a)
def impl(self, to: 'PrimitiveKind', operands) -> Callable:
add_to, impl = self.value.impl_details
if add_to:
return impl(to, *operands)
else:
return impl(*operands)
@property
def operand_count(self):
return self.value.operand_count
def check_operand(self, operand: ConstAbc):
kind = operand.uncasted_kind()
if kind is not None:
if kind.value.is_float and not self.value.accepts_floats:
raise ConstExprError(
'{} operation doesn\'t accept floating point values', self.name)
if not kind.value.can_op:
raise ConstExprError('Not possible to do operations on {}', kind.name)
def fmt_operands(self, operands):
return self.value.fmt.format(*[str(i) for i in operands])
class ConstExpr(ConstAbc):
def __init__(self, op: Op, *operands):
expected_count = op.operand_count
if len(operands) != expected_count:
raise InternalError('{} expects {} operands, got {}',
op.name, expected_count, len(operands))
self.op = op
self.operands = operands
def can_eval(self):
for operand in self.operands:
if not operand.can_eval():
return False
return True
def eval(self, to: 'PrimitiveKind'):
if not to.value.can_op:
raise ConstExprError('Not possible to do operations to get to {}', to)
operand_values = []
for operand in self.operands:
self.op.check_operand(operand)
operand_values.append(operand.eval(to))
try:
value = self.op.impl(to, operand_values)
to.check_value(value)
except Exception as e:
raise ConstExprError('Eval failed: ' + str(e)) from e
return value
def __str__(self):
return self.op.fmt_operands(self.operands)
| 31.470238 | 88 | 0.621714 | 4,623 | 0.874409 | 0 | 0 | 1,155 | 0.21846 | 0 | 0 | 590 | 0.111594 |
d15217dd90e5162f260518d778ebbec59b2a77fc | 706 | py | Python | pysmock/models/Info.py | pysmock/pysmock-codegen | e95384756f9a80b49b7e015a408a29889e2a1b68 | [
"MIT"
] | null | null | null | pysmock/models/Info.py | pysmock/pysmock-codegen | e95384756f9a80b49b7e015a408a29889e2a1b68 | [
"MIT"
] | null | null | null | pysmock/models/Info.py | pysmock/pysmock-codegen | e95384756f9a80b49b7e015a408a29889e2a1b68 | [
"MIT"
] | null | null | null | from yaml import YAMLObject
from . import Contact, License
class Info(YAMLObject):
yaml_tag = u'info'
def __init__(self,name: str ="",title: str = "", description: str = "",termsOfService: str = "",
contact: Contact = None, license: License = None, version: str = "1.0.0"):
self.name = name
self.title = title
self.description = description
self.termsOfService = termsOfService
self.contact = contact
self.license = license
self.version = version
def __repr__(self):
return str({'name': self.name, 'title':self.title, 'description': self.description, 'termsOfService': self.termsOfService, 'contact':self.contact ,'license':self.license ,'version':self.version})
| 41.529412 | 199 | 0.694051 | 646 | 0.915014 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.128895 |
d152ae39f545ea4c14b90863158e7dfdebe61a2c | 2,356 | py | Python | sites/de/zdf.py | eminga/simplEPG | c38994b6bbba618d85528aa8ea426d936447c4e6 | [
"MIT"
] | 2 | 2019-10-14T05:48:23.000Z | 2021-07-29T04:32:07.000Z | sites/de/zdf.py | eminga/simplEPG | c38994b6bbba618d85528aa8ea426d936447c4e6 | [
"MIT"
] | null | null | null | sites/de/zdf.py | eminga/simplEPG | c38994b6bbba618d85528aa8ea426d936447c4e6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018 eminga
# Licensed under MIT License
import datetime, pytz, re, helper
def grab(channel, timespan):
tz = pytz.timezone("Europe/Berlin")
now = datetime.datetime.now(tz)
shows = []
a = 0
if now.time().hour < 7:
a = -1
for i in range(a, 14):
date = now + datetime.timedelta(days=i)
text = helper.download("http://www.zdf.de/live-tv?airtimeDate=" + date.strftime("%Y-%m-%d"))
if text is None:
continue
text = helper.cut(text, "<section class=\"b-epg-timeline timeline-" + channel, "</section>")
sections = helper.split(text, "<li", "</li>")
laststart = datetime.datetime.min.replace(tzinfo=tz)
for section in sections:
show = {}
temp = helper.cut(section, "<span class=\"time\">", "</span>")
temp = re.search("(\d\d):(\d\d) - (\d\d):(\d\d)", temp)
show["start"] = date.replace(hour=int(temp.group(1)), minute=int(temp.group(2)), second=0, microsecond=0)
if show["start"] < laststart:
date += datetime.timedelta(days=1)
show["start"] += datetime.timedelta(days=1)
if (show["start"] - now).total_seconds() / 3600 > timespan:
return shows
laststart = show["start"]
show["stop"] = date.replace(hour=int(temp.group(3)), minute=int(temp.group(4)), second=0, microsecond=0)
if show["stop"] < show["start"]:
show["stop"] += datetime.timedelta(days=1)
temp = re.search("<span class=\"overlay-link-category\">(.*?)<span class=\"visuallyhidden\">:</span></span>\s*(?:<.*>)*\s*(.*?)\s*?</a>", section)
if temp.group(1):
show["title"] = helper.cleanup(temp.group(1) + " - " + temp.group(2))
else:
show["title"] = helper.cleanup(temp.group(2))
temp = re.search("contentUrl\": \"(.*)\"", section)
if temp is not None:
show["details-url"] = "http://www.zdf.de" + temp.group(1)
shows.append(show)
return shows
def grabdetails(url):
text = helper.download(url)
if text is None:
return None
show = {}
subtitle = helper.cut(text, "<h3 class=\"overlay-subtitle\">", "</h3>")
if subtitle is not None and subtitle:
show["sub-title"] = helper.cleanup(subtitle)
description = helper.cut(text, "<p class=\"overlay-text\">", "</p>")
if description is not None and description:
show["desc"] = helper.cleanup(description)
if text.find("Untertitel für Hörgeschädigte") != -1:
show["subtitles"] = True
return show
| 32.273973 | 149 | 0.630306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 664 | 0.281475 |
d153802f775ba942a3655fb99bae349c7849f9df | 361 | py | Python | algorithms/warmup/compare-the-triplets.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 41 | 2018-05-11T07:54:34.000Z | 2022-03-29T19:02:32.000Z | algorithms/warmup/compare-the-triplets.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 2 | 2021-09-13T10:03:26.000Z | 2021-10-04T10:21:05.000Z | algorithms/warmup/compare-the-triplets.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 21 | 2019-01-23T19:06:59.000Z | 2021-12-23T16:03:47.000Z | # Algorithms > Warmup > Compare the Triplets
# Compare the elements in two triplets.
#
# https://www.hackerrank.com/challenges/compare-the-triplets/problem
#
a = map(int, input().split())
b = map(int, input().split())
alice, bob = 0, 0
for i, j in zip(a, b):
if i > j:
alice += 1
elif i < j:
bob += 1
print(alice, bob)
| 22.5625 | 69 | 0.581717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.443213 |
d154ae1217c3ae34783bb85b3da68ecf82e62291 | 223 | py | Python | app/__init__.py | cca/libraries_syllabus_notifications | 0d42c96ca6fd777e501024bb986418e8897b3dbc | [
"ECL-2.0"
] | null | null | null | app/__init__.py | cca/libraries_syllabus_notifications | 0d42c96ca6fd777e501024bb986418e8897b3dbc | [
"ECL-2.0"
] | 5 | 2016-01-02T20:12:21.000Z | 2022-01-21T20:31:39.000Z | app/__init__.py | cca/libraries_syllabus_notifications | 0d42c96ca6fd777e501024bb986418e8897b3dbc | [
"ECL-2.0"
] | null | null | null | # @TODO we want to "from .app import main" so the test suite can import the
# main() function but if we do that then app.py throws errors when importing
# from config.py & its other dependencies
from .has_syllabus import *
| 44.6 | 76 | 0.753363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.860987 |
d155115bca693ea9440a64b39244f42d954a8b6e | 5,153 | py | Python | sdk/notificationhubs/azure-mgmt-notificationhubs/azure/mgmt/notificationhubs/models/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/notificationhubs/azure-mgmt-notificationhubs/azure/mgmt/notificationhubs/models/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/notificationhubs/azure-mgmt-notificationhubs/azure/mgmt/notificationhubs/models/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AdmCredential
from ._models_py3 import ApnsCredential
from ._models_py3 import BaiduCredential
from ._models_py3 import CheckAvailabilityParameters
from ._models_py3 import CheckAvailabilityResult
from ._models_py3 import DebugSendResponse
from ._models_py3 import ErrorResponse
from ._models_py3 import GcmCredential
from ._models_py3 import MpnsCredential
from ._models_py3 import NamespaceCreateOrUpdateParameters
from ._models_py3 import NamespaceListResult
from ._models_py3 import NamespacePatchParameters
from ._models_py3 import NamespaceResource
from ._models_py3 import NotificationHubCreateOrUpdateParameters
from ._models_py3 import NotificationHubListResult
from ._models_py3 import NotificationHubPatchParameters
from ._models_py3 import NotificationHubResource
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import PnsCredentialsResource
from ._models_py3 import PolicykeyResource
from ._models_py3 import Resource
from ._models_py3 import ResourceListKeys
from ._models_py3 import SharedAccessAuthorizationRuleCreateOrUpdateParameters
from ._models_py3 import SharedAccessAuthorizationRuleListResult
from ._models_py3 import SharedAccessAuthorizationRuleProperties
from ._models_py3 import SharedAccessAuthorizationRuleResource
from ._models_py3 import Sku
from ._models_py3 import SubResource
from ._models_py3 import WnsCredential
except (SyntaxError, ImportError):
from ._models import AdmCredential # type: ignore
from ._models import ApnsCredential # type: ignore
from ._models import BaiduCredential # type: ignore
from ._models import CheckAvailabilityParameters # type: ignore
from ._models import CheckAvailabilityResult # type: ignore
from ._models import DebugSendResponse # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import GcmCredential # type: ignore
from ._models import MpnsCredential # type: ignore
from ._models import NamespaceCreateOrUpdateParameters # type: ignore
from ._models import NamespaceListResult # type: ignore
from ._models import NamespacePatchParameters # type: ignore
from ._models import NamespaceResource # type: ignore
from ._models import NotificationHubCreateOrUpdateParameters # type: ignore
from ._models import NotificationHubListResult # type: ignore
from ._models import NotificationHubPatchParameters # type: ignore
from ._models import NotificationHubResource # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import PnsCredentialsResource # type: ignore
from ._models import PolicykeyResource # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceListKeys # type: ignore
from ._models import SharedAccessAuthorizationRuleCreateOrUpdateParameters # type: ignore
from ._models import SharedAccessAuthorizationRuleListResult # type: ignore
from ._models import SharedAccessAuthorizationRuleProperties # type: ignore
from ._models import SharedAccessAuthorizationRuleResource # type: ignore
from ._models import Sku # type: ignore
from ._models import SubResource # type: ignore
from ._models import WnsCredential # type: ignore
from ._notification_hubs_management_client_enums import (
AccessRights,
NamespaceType,
SkuName,
)
__all__ = [
'AdmCredential',
'ApnsCredential',
'BaiduCredential',
'CheckAvailabilityParameters',
'CheckAvailabilityResult',
'DebugSendResponse',
'ErrorResponse',
'GcmCredential',
'MpnsCredential',
'NamespaceCreateOrUpdateParameters',
'NamespaceListResult',
'NamespacePatchParameters',
'NamespaceResource',
'NotificationHubCreateOrUpdateParameters',
'NotificationHubListResult',
'NotificationHubPatchParameters',
'NotificationHubResource',
'Operation',
'OperationDisplay',
'OperationListResult',
'PnsCredentialsResource',
'PolicykeyResource',
'Resource',
'ResourceListKeys',
'SharedAccessAuthorizationRuleCreateOrUpdateParameters',
'SharedAccessAuthorizationRuleListResult',
'SharedAccessAuthorizationRuleProperties',
'SharedAccessAuthorizationRuleResource',
'Sku',
'SubResource',
'WnsCredential',
'AccessRights',
'NamespaceType',
'SkuName',
]
| 44.422414 | 94 | 0.751213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,655 | 0.321172 |
d15558fb9295192842b6c51525c356994ca03dbe | 6,334 | py | Python | utils/util.py | gmshashank/Deep_Flow_Prediction | 9b4c388b70a458cddac20258242a6a36965524bc | [
"MIT"
] | null | null | null | utils/util.py | gmshashank/Deep_Flow_Prediction | 9b4c388b70a458cddac20258242a6a36965524bc | [
"MIT"
] | null | null | null | utils/util.py | gmshashank/Deep_Flow_Prediction | 9b4c388b70a458cddac20258242a6a36965524bc | [
"MIT"
] | null | null | null | from genericpath import exists
import math
import numpy as np
import os
import re
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib import cm
# append line to log file
def log(file, line, doPrint=True):
f = open(file, "a+")
f.wrtite(line + "\n")
f.close()
if doPrint:
print(line)
# reset log file
def resetLog(file):
f = open(file, "w")
f.close()
def plot_loss(history_L1, history_L1val):
l1train = np.asarray(history_L1)
l1vali = np.asarray(history_L1val)
plt.figure()
plt.plot(np.arange(l1train.shape[0]), l1train, "b", label="Training loss")
plt.plot(np.arange(l1vali.shape[0]), l1vali, "g", label="Validation loss")
plt.legend()
plt.show()
def computeLR(i, epochs, minLR, maxLR):
if i < epochs * 0.5:
return maxLR
e = (i / float(epochs) - 0.5) * 2.0
fmin = 0.0
fmax = 6.0
e = fmin + e * (fmax - fmin)
f = math.pow(0.5, e)
return minLR + (maxLR - minLR) * f
def makeDirs(directoryList):
for directory in directoryList:
if not os.path.exists(directory):
os.makedirs(directory)
def imageOut(filename, _outputs, _targets, saveTargets=False, normalize=False, saveMontage=True):
outputs = np.copy(_outputs)
targets = np.copy(_targets)
s = outputs.shape[1]
if saveMontage:
new_img = Image.new("RGB", ((s + 10) * 3, s * 2), color=(255, 255, 255))
BW_img = Image.new("RGB", ((s + 10) * 3, s * 3), color=(255, 255, 255))
for i in range(3):
outputs[i] = np.flipud(outputs[i].transpose())
targets[i] = np.flipud(targets[i].transpose())
min_value = min(np.min(outputs[i]), np.min(targets[i]))
max_value = max(np.max(outputs[i]), np.max(targets[i]))
if normalize:
outputs[i] -= min_value
targets[i] -= min_value
max_value -= min_value
outputs[i] /= max_value
targets[i] /= max_value
else:
outputs[i] -= -1.0
targets[i] -= -1.0
outputs[i] /= 2.0
targets[i] /= 2.0
if not saveMontage:
suffix = ""
if i == 0:
suffix = "_pressure"
elif i == 1:
suffix = "_velX"
else:
suffix = "_velY"
im = Image.fromarray(cm.magma(outputs[i], bytes=True))
im = im.resize((512, 512))
im.save(filename + suffix + "_pred.png")
im = Image.fromarray(cm.magma(targets[i], bytes=True))
if saveTargets:
im = im.resize((512, 512))
im.save(filename + suffix + "_target.png")
else:
im = Image.fromarray(cm.magma(targets[i], bytes=True))
new_img.paste(im, ((s + 10) * i, s * 0))
im = Image.fromarray(cm.magma(outputs[i], bytes=True))
new_img.paste(im, ((s + 10) * i, s * 1))
im = Image.fromarray(targets[i] * 256.0)
BW_img.paste(im, ((s + 10) * i, s * 0))
im = Image.fromarray(outputs[i] * 256.0)
BW_img.paste(im, ((s + 10) * i, s * 1))
im = Image.fromarray(np.abs(targets[i] - outputs[i]) * 10.0 * 256.0)
BW_img.paste(im, ((s + 10) * i, s * 2))
if saveMontage:
new_img.save(filename + ".png")
BW_img.save(filename + "_bw.png")
def imageOut(filename, _outputs, saveTargets=True, normalize=False):
outputs = np.copy(_outputs)
for i in range(3):
outputs[i] = np.flipud(outputs[i].transpose())
min_value = np.min(outputs[i])
max_value = np.max(outputs[i])
if normalize:
outputs[i] -= min_value
max_value -= min_value
outputs[i] /= max_value
else: # from -1,1 to 0,1
outputs[i] -= -1.0
outputs[i] /= 2.0
suffix = ""
if i == 0:
suffix = "_pressure"
elif i == 1:
suffix = "_velX"
else:
suffix = "_velY"
im = Image.fromarray(cm.magma(outputs[i], bytes=True))
im = im.resize((128, 128))
im.save(filename + suffix + "_pred.png")
def saveOutput(output_arr, target_arr):
if target_arr is None:
imageOut("./results/result", output_arr)
else:
imageOut(
"./results/result", output_arr, target_arr, normalize=False, saveMontage=True
) # write normalized with error
class InputData:
def __init__(self, npz_arr, removePOffset=True, makeDimLess=True):
self.input = None
self.target = None
self.max_inputs_0 = 100.0
self.max_inputs_1 = 38.12
self.max_inputs_2 = 1.0
self.max_targets_0 = 4.65
self.max_targets_1 = 2.04
self.max_targets_2 = 2.37
if npz_arr.shape[0] >= 3:
self.input = npz_arr[0:3]
if npz_arr.shape[0] == 6:
self.target = npz_arr[3:6]
self.removePOffset = removePOffset
self.makeDimLess = makeDimLess
self.normalize()
def normalize(self):
if self.target is not None:
if self.removePOffset:
self.target[0, :, :] -= np.mean(self.target[0, :, :]) # remove offset
self.target[0, :, :] -= self.target[0, :, :] * self.input[2, :, :] # pressure * mask
if self.makeDimLess:
v_norm = (np.max(np.abs(self.input[0, :, :])) ** 2 + np.max(np.abs(self.input[1, :, :])) ** 2) ** 0.5
self.target[0, :, :] /= v_norm ** 2
self.target[1, :, :] /= v_norm
self.target[2, :, :] /= v_norm
self.target[0, :, :] *= 1.0 / self.max_targets_0
self.target[1, :, :] *= 1.0 / self.max_targets_1
self.target[2, :, :] *= 1.0 / self.max_targets_2
if self.input is not None:
self.input[0, :, :] *= 1 / self.max_inputs_0
self.input[1, :, :] *= 1 / self.max_inputs_1
def denormalize(self, data, v_norm):
a = data.copy()
a[0, :, :] /= 1.0 / self.max_targets_0
a[1, :, :] /= 1.0 / self.max_targets_1
a[2, :, :] /= 1.0 / self.max_targets_2
if self.makeDimLess:
a[0, :, :] *= v_norm ** 2
a[1, :, :] *= v_norm
a[2, :, :] *= v_norm
return a
| 30.747573 | 117 | 0.526208 | 1,875 | 0.296021 | 0 | 0 | 0 | 0 | 0 | 0 | 319 | 0.050363 |
d155acb201ce5a145d858874d493c8fbc320df36 | 612 | py | Python | tests/data/pdf_download_data.py | dbbabcock/BOE_tabulator | 4d0a6176a2393610377d24a05536f8c1fe159932 | [
"MIT"
] | 1 | 2021-02-17T02:26:38.000Z | 2021-02-17T02:26:38.000Z | tests/data/pdf_download_data.py | dbbabcock/BOE_tabulator | 4d0a6176a2393610377d24a05536f8c1fe159932 | [
"MIT"
] | null | null | null | tests/data/pdf_download_data.py | dbbabcock/BOE_tabulator | 4d0a6176a2393610377d24a05536f8c1fe159932 | [
"MIT"
] | null | null | null | from .sample_boe_page import HTML_TEXT
# static sample of html text used for testing, scraped from
# https://comptroller.baltimorecity.gov/boe/meetings/minutes
SAMPLE_HTML = HTML_TEXT
# Example set of expected year links pulled from HTML_TEXT
YEAR_LINKS = {
"2020": "/minutes-2020",
"2019": "/2019",
"2018": "/minutes-2018",
"2017": "/boe/meetings/minutes",
"2016": "/minutes-2016-0",
"2015": "/minutes-2015",
"2014": "/minutes-2014",
"2013": "/minutes-2013",
"2012": "/minutes-2012",
"2011": "/minutes-2011",
"2010": "/minutes-2010",
"2009": "/minutes-2009",
}
| 27.818182 | 60 | 0.637255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.704248 |
d15614a481b75b0930ec4f17c90504a24aa1a1b1 | 1,325 | py | Python | Fib Surname.py | Gerrydh/C-S-Exercises | b4b8e6c4142e8f0c69910f5e9dd353e706671618 | [
"Apache-2.0"
] | null | null | null | Fib Surname.py | Gerrydh/C-S-Exercises | b4b8e6c4142e8f0c69910f5e9dd353e706671618 | [
"Apache-2.0"
] | null | null | null | Fib Surname.py | Gerrydh/C-S-Exercises | b4b8e6c4142e8f0c69910f5e9dd353e706671618 | [
"Apache-2.0"
] | null | null | null | # Gerard Hanlon, 30.01.2018
# A program that displays Fibonacci numbers.
def fib(n):
"""This function returns the nth Fibonacci numbers."""
i = 0 # variable i = the first fibonacci number
j = 1 # variable j = the second fibonacci number
n = n - 1 # variable n = n - 1
while n >= 0: # while n is greater than 0
i, j = j, i + j # 0, 1 = 1, 0 + 1
n = n - 1 # we want the script to add the number preceeding it
return i # return the new value of i
name = "Hanlon" # My surname
first = name[0] # The first letter of my Surname- H
last = name [-1] # The last letter of my surname- N
firstno = ord (first) # The fibonacci number for 8- H is the 8th letter in the alphabet
lastno = ord(last) # The fibonacci for number 14- N is the 14th letter in the alphabet
x = firstno + lastno # x = the final fibonacci number we are looking for- The fibonacci numbers of the first and last letters of my surname added together
ans = fib(x) # ans = the fibonacci of x
print("My surname is", name) # prints my surname
print("The first letter", first, "is number", firstno) # returns the fibonacci of the first letter of my surname
print("The last letter", last, "is number", lastno) # returns the fibonacci number of the last letter of my surname
print("Fibonacci number", x, "is", ans) # x = the total fibonacci number
| 47.321429 | 154 | 0.691321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 996 | 0.751698 |
d1573555106525d0958ee59e807692fbb800871e | 19,560 | py | Python | tools/rest_integration_test.py | osrf/cloudsim-legacy | 01ea7dd2708ed9797a860ac839028ec62fd96a23 | [
"Apache-2.0"
] | null | null | null | tools/rest_integration_test.py | osrf/cloudsim-legacy | 01ea7dd2708ed9797a860ac839028ec62fd96a23 | [
"Apache-2.0"
] | null | null | null | tools/rest_integration_test.py | osrf/cloudsim-legacy | 01ea7dd2708ed9797a860ac839028ec62fd96a23 | [
"Apache-2.0"
] | 1 | 2021-03-16T15:00:51.000Z | 2021-03-16T15:00:51.000Z | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import unittest
import time
import datetime
import logging
from cloudsim_rest_api import CloudSimRestApi
import traceback
# add cloudsim directory to system path
basepath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, basepath)
print (sys.path)
import cloudsimd.launchers.cloudsim as cloudsim
from cloudsimd.launchers.launch_utils.launch_db import ConstellationState
from cloudsimd.launchers.launch_utils.launch_db import get_unique_short_name
from cloudsimd.launchers.launch_utils.testing import get_test_runner
from cloudsimd.launchers.launch_utils.testing import get_boto_path
from cloudsimd.launchers.launch_utils.testing import get_test_path
CLOUDSIM_CONFIG = "CloudSim-stable (m1.small)"
SIM_CONFIG = "Simulator-stable (g2.2xlarge)" # Simulator-stable (cg1.4xlarge)
CLOUD_CREDS = "aws"
CLOUD_REGION = "us-east-1"
try:
logging.basicConfig(filename='/tmp/rest_integration_test.log',
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
level=logging.DEBUG)
except Exception, e:
print("Can't enable logging: %s" % e)
def create_task_dict(title, launch_file='vrc_task_1.launch'):
"""
Generates a simple task for testing purposes
"""
def _get_now_str(days_offset=0):
"""
Returns a utc string date time format of now, with optional
offset.
"""
dt = datetime.timedelta(days=days_offset)
now = datetime.datetime.utcnow()
t = now - dt
s = t.isoformat()
return s
task = {}
task['task_title'] = title
task['ros_package'] = 'drcsim_gazebo'
task['ros_launch'] = launch_file
task['launch_args'] = ''
task['timeout'] = '3600'
task['latency'] = '0'
task['uplink_data_cap'] = '0'
task['downlink_data_cap'] = '0'
task['local_start'] = _get_now_str(-1) # yesterday
task['local_stop'] = _get_now_str(1) # tomorrow
task['bash_src'] = "/home/ubuntu/cloudsim/sim_setup.bash"
task['vrc_id'] = 1
task['vrc_num'] = 1
return task
class RestException(Exception):
pass
def _diff_list(a, b):
"""
Compares 2 lists and returns the elements in list a only
"""
b = set(b)
return [aa for aa in a if aa not in b]
def launch_constellation_and_wait(api, config, max_count=100):
"""
Launch a new constellation, waits for it to appear, and
returns the new constellation name
"""
# we're about to create a new constellation... this may not
# be the first
previous_constellations = [x['constellation_name'] \
for x in api.get_constellations()]
api.launch_constellation(CLOUD_CREDS, CLOUD_REGION, config)
print("waiting 10 secs")
time.sleep(10)
found = False
count = 0
constellation_name = None
while not found:
count += 1
if count > max_count:
raise RestException("Timeout in Launch %s" % config)
constellation_list = api.get_constellations()
current_names = [x['constellation_name'] \
for x in constellation_list]
new_constellations = _diff_list(current_names, previous_constellations)
print ("%s/%s) new constellations: %s" % (count,
max_count,
new_constellations))
if len(new_constellations) > 0:
found = True
constellation_name = new_constellations[0]
return constellation_name
def terminate_constellation(api,
constellation_name,
sleep_secs=2,
max_count=100):
"""
Terminates a constellation and waits until the process is done.
"""
def exists(api, constellation_name):
constellation_list = api.get_constellations()
current_names = [x['constellation_name'] \
for x in constellation_list]
return constellation_name in current_names
constellation_exists = exists(api, constellation_name)
if not constellation_exists:
raise RestException("terminate_constellation: "
"Constellation '%s' not found" % constellation_name)
# send the termination signal
api.terminate_constellation(constellation_name)
count = 0
while constellation_exists:
time.sleep(sleep_secs)
count += 1
if count > max_count:
raise RestException("Timeout in terminate_constellation %s" % (
constellation_name))
constellation_exists = exists(api, constellation_name)
print("%s/%s %s exists: %s" % (count,
max_count,
constellation_name,
constellation_exists))
def wait_for_constellation_state(api,
constellation_name,
key="constellation_state",
value="running",
max_count=100,
sleep_secs=5):
"""
Polls constellation state key until its value matches value. This is used
to wait until a constellation is ready to run simulations
"""
count = 0
while True:
time.sleep(sleep_secs)
count += 1
if count > max_count:
raise RestException("Timeout in wait for %s = %s "
" for %s" % (key, value, constellation_name))
const_data = api.get_constellation_data(constellation_name)
state = const_data[key]
print("%s/%s) %s [%s] = %s" % (count,
max_count,
constellation_name,
key,
state))
if state == value:
return const_data
def create_task(cloudsim_api, constellation_name, task_dict):
"""
Creates a new task and retrieves the id of the new task. This
requires comparing task names before and after creation
"""
def task_names():
const_data = cloudsim_api.get_constellation_data(constellation_name)
task_names = [x['task_id'] for x in const_data['tasks']]
return task_names
previous_tasks = task_names()
cloudsim_api.create_task(constellation_name, task_dict)
new_tasks = task_names()
delta_tasks = _diff_list(new_tasks, previous_tasks)
new_task_id = delta_tasks[0]
return new_task_id
def wait_for_task_state(cloudsim_api,
constellation_name,
task_id,
target_state,
max_count=100,
sleep_secs=1):
"""
Wait until the task is in a target state (ex "running", or "stopped")
"""
count = 0
while True:
time.sleep(sleep_secs)
count += 1
if count > max_count:
raise RestException("Timeout in start_task"
"%s for %s" % (task_id, constellation_name))
task_dict = cloudsim_api.read_task(constellation_name, task_id)
current_state = task_dict['task_state']
print("%s/%s Task %s: %s" % (count, max_count,
task_id,
current_state))
if current_state == target_state:
return
def run_task(cloudsim_api, constellation_name, task_id,
max_count=100,
sleep_secs=1):
"""
Starts a task and waits for its status to be "running"
"""
# check task
task_dict = cloudsim_api.read_task(constellation_name, task_id)
state = task_dict['task_state']
if state != "ready":
raise RestException("Can't start task in state '%s'" % state)
# run task
cloudsim_api.start_task(constellation_name, task_id)
wait_for_task_state(cloudsim_api,
constellation_name,
task_id,
'running',
max_count,
sleep_secs)
def run_notebook(cloudsim_api, constellation_name):
"""
Starts the notebook service and waits for its status to be "running"
"""
cloudsim_api.start_notebook(constellation_name)
count=100
while count > 0:
time.sleep(5)
count -= 1
r = cloudsim_api.ping_notebook(constellation_name)
print("%s/100 notebook state: %s" % (count, r))
if r == "running":
return
raise RestException("Can't start notebook on %s" % constellation_name)
def stop_notebook(cloudsim_api, constellation_name):
"""
Stops the notebook service and waits for its status to "stopped"
"""
cloudsim_api.stop_notebook(constellation_name)
count=100
while count > 0:
print("count %s/100" % count)
time.sleep(5)
count -= 1
r = cloudsim_api.ping_notebook(constellation_name)
print("%s/100 notebook state: %s" % (count, r))
if r == "":
return
raise RestException("Can't start notebook on %s" % constellation_name)
def run_gzweb(cloudsim_api, constellation_name):
"""
Starts the gzweb service and waits for its status to be "running"
"""
cloudsim_api.start_gzweb(constellation_name)
count=100
while count > 0:
time.sleep(5)
count -= 1
r = cloudsim_api.ping_gzweb(constellation_name)
print("%s/100 gzweb state: %s" % (count, r))
if r == "running":
return
raise RestException("Can't start gzweb on %s" % constellation_name)
def stop_gzweb(cloudsim_api, constellation_name):
"""
Stops the gzweb service and waits for its status to "stopped"
"""
cloudsim_api.stop_gzweb(constellation_name)
count=100
while count > 0:
print("count %s/100" % count)
time.sleep(5)
count -= 1
r = cloudsim_api.ping_gzweb(constellation_name)
print("%s/100 gzweb state: %s" % (count, r))
if r == "":
return
raise RestException("Can't start notebook on %s" % constellation_name)
def stop_task(cloudsim_api, constellation_name, task_id, max_count=100,
sleep_secs=1):
"""
Stops a task and waits for its status to go from "running" to "stopped"
"""
# check task
task_dict = cloudsim_api.read_task(constellation_name, task_id)
state = task_dict['task_state']
if state != "running":
raise RestException("Can't stop task in state '%s'" % state)
# run task
cloudsim_api.stop_task(constellation_name)
wait_for_task_state(cloudsim_api,
constellation_name,
task_id,
'stopped',
max_count,
sleep_secs)
def flush():
"""
Fake method to avoid crashes, because flush is not present on Delegate_io
class used by XMLTestRunner.
"""
pass
class RestTest(unittest.TestCase):
"""
Test that Creates a CloudSim on AWS. A simulator is then launched
from that CloudSim and a simulation task is run.
This test is run by Jenkins when CloudSim code is modified.
"""
def title(self, text):
print("")
print("#######################################")
print("#")
print("# %s" % text)
print("#")
print("#######################################")
def setUp(self):
self.title("setUp")
try:
# provide no op flush to avoid crashes when sys.stdout and stderr
# are overriden to write xml files (when running with Jenkins)
sys.stdout.flush = flush
sys.stderr.flush = flush
except:
print("Using normal sys.stdout and sys.stderr")
self.cloudsim_api = None
self.simulator_name = None
self.papa_cloudsim_name = None
self.baby_cloudsim_name = None
self.user = 'admin'
self.password = 'test123'
self.papa_cloudsim_name = get_unique_short_name('rst')
self.data_dir = get_test_path("rest_test")
self.creds_fname = get_boto_path()
self.ip = None
print("data dir: %s" % self.data_dir)
print("cloudsim constellation: %s" % self.papa_cloudsim_name)
print("user: %s, password: %s" % (self.user, self.password))
def test(self):
self.title("create_cloudsim")
self.ip = cloudsim.create_cloudsim(username=self.user,
credentials_fname=self.creds_fname,
region=CLOUD_REGION,
configuration=CLOUDSIM_CONFIG,
authentication_type="Basic",
password=self.password,
data_dir=self.data_dir,
constellation_name=self.papa_cloudsim_name)
self.assertTrue(True, "cloudsim not created")
print("papa cloudsim %s created in %s" % (self.ip, self.data_dir))
print("\n\n")
print('api = CloudSimRestApi("%s", "%s", "%s")' % (self.ip,
self.user,
self.password))
self.cloudsim_api = CloudSimRestApi(self.ip, self.user, self.password)
cfgs = self.cloudsim_api.get_machine_configs()
try:
print(cfgs.keys())
print(cfgs)
cfgs_creds = cfgs[CLOUD_CREDS]['regions']
cfgs_region = cfgs_creds[CLOUD_REGION]['configurations']
cfgs_names = [x['name'] for x in cfgs_region]
print("configs: %s" % cfgs_names)
except Exception, e:
import traceback
tb = traceback.format_exc()
print("traceback: %s" % tb)
self.title("launch baby cloudsim")
self.baby_cloudsim_name = launch_constellation_and_wait(
self.cloudsim_api,
config=CLOUDSIM_CONFIG)
print("# baby cloudsim %s launched" % (self.baby_cloudsim_name))
self.assertTrue(True, "baby cloudsim not created")
self.title("launch simulator")
self.simulator_name = launch_constellation_and_wait(self.cloudsim_api,
config=SIM_CONFIG)
print("# Simulator %s launched" % (self.simulator_name))
self.assertTrue(True, "simulator not created")
self.title("Wait for baby cloudsim readyness")
print("api.get_constellation_data('%s')" % self.baby_cloudsim_name)
wait_for_constellation_state(self.cloudsim_api,
self.baby_cloudsim_name,
key="constellation_state",
value="running",
max_count=100)
self.assertTrue(True, "baby cloudsim not ready")
print("# baby cloudsim machine ready")
self.title("Update baby cloudsim")
self.cloudsim_api.update_constellation(self.baby_cloudsim_name)
wait_for_constellation_state(self.cloudsim_api,
self.baby_cloudsim_name,
key="constellation_state",
value="running",
max_count=100)
print("# baby cloudsim machine updated")
self.title("Wait for simulator readyness")
print("api.get_constellation_data('%s')" % self.simulator_name)
wait_for_constellation_state(self.cloudsim_api,
self.simulator_name,
key="launch_stage",
value="running",
max_count=100)
self.assertTrue(True, "simulator not ready")
print("# Simulator machine ready")
self.title("Test notebook")
run_notebook(self.cloudsim_api,self.simulator_name)
stop_notebook(self.cloudsim_api,self.simulator_name)
# the simulator is ready!
self.title("# create task")
print('tid = create_task(api, "%s", '
'create_task_dict("test 0"))' % self.simulator_name)
print("\n\n")
task_dict = create_task_dict("test task 1")
print("%s" % task_dict)
self.task_id = create_task(self.cloudsim_api,
self.simulator_name,
task_dict)
self.assertTrue(True, "task not created")
run_task(self.cloudsim_api,self.simulator_name, self.task_id)
self.title("Test gzweb")
run_gzweb(self.cloudsim_api,self.simulator_name)
stop_gzweb(self.cloudsim_api,self.simulator_name)
self.assertTrue(True, "task not run")
self.title("# stop task")
stop_task(self.cloudsim_api,self.simulator_name, self.task_id)
self.assertTrue(True, "task not stopped")
def tearDown(self):
self.title("tearDown")
self.title("terminate baby cloudsim")
try:
if self.cloudsim_api and self.baby_cloudsim_name:
terminate_constellation(self.cloudsim_api,
self.baby_cloudsim_name)
else:
print("No baby cloudsim created")
except Exception, e:
print("Error terminating baby cloudsim constellation %s: %s" % (
self.baby_cloudsim_name,
e))
self.title("terminate simulator")
try:
if self.cloudsim_api and self.simulator_name:
terminate_constellation(self.cloudsim_api, self.simulator_name)
else:
print("No simulator created")
except Exception, e:
print("Error terminating simulator constellation %s: %s" % (
self.simulator_name,
e))
tb = traceback.format_exc()
print("traceback: %s" % tb)
self.title("terminate papa cloudsim")
try:
if self.papa_cloudsim_name and self.ip:
print("terminate cloudsim '%s' %s" % (self.papa_cloudsim_name,
self.ip))
cloudsim.terminate(self.papa_cloudsim_name)
# remove from Redis
constellation = ConstellationState(self.papa_cloudsim_name)
constellation.expire(1)
except Exception, e:
print("Error terminating papa cloudsim '%s' : %s" % (
self.papa_cloudsim_name,
e))
tb = traceback.format_exc()
print("traceback: %s" % tb)
if __name__ == "__main__":
xmlTestRunner = get_test_runner()
unittest.main(testRunner=xmlTestRunner)
| 36.155268 | 80 | 0.561145 | 8,204 | 0.419427 | 0 | 0 | 0 | 0 | 0 | 0 | 4,741 | 0.242382 |
d1586b82c5202f4d705413de1f0ddf437f02a38a | 5,562 | py | Python | test/test_p4lib_delete.py | Tech-pandit/python-p4lib | 6b5602321c3c79151a1e603c4ef7eac4a405fb68 | [
"MIT"
] | null | null | null | test/test_p4lib_delete.py | Tech-pandit/python-p4lib | 6b5602321c3c79151a1e603c4ef7eac4a405fb68 | [
"MIT"
] | null | null | null | test/test_p4lib_delete.py | Tech-pandit/python-p4lib | 6b5602321c3c79151a1e603c4ef7eac4a405fb68 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2002-2005 ActiveState Corp.
# See LICENSE.txt for license details.
# Author:
# Trent Mick (TrentM@ActiveState.com)
# Home:
# http://trentm.com/projects/px/
"""Test p4lib.py's interface to 'p4 delete'."""
import os
import sys
import unittest
import types
import pprint
import testsupport
from p4lib import P4, P4LibError
class DeleteTestCase(unittest.TestCase):
def test_delete(self):
p4 = P4()
top = os.getcwd()
andrew = testsupport.users['andrew']
try:
os.chdir(andrew['home'])
# First add and submit a file.
fname = 'test_delete.txt'
fout = open(fname, 'w')
fout.write('Hello there.\n')
fout.close()
p4.add(fname)
p4.submit(fname, 'add this file to be deleted')
# Now delete the file.
result = p4.delete(fname)
self.failUnless(result[0]['comment'] == 'opened for delete')
self.failUnless(result[0]['depotFile']\
== p4.where(fname)[0]['depotFile'])
self.failUnless(type(result[0]['rev']) == types.IntType)
opened = p4.opened(fname)
self.failUnless(opened[0]['action'] == 'delete')
self.failUnless(opened[0]['depotFile'] == result[0]['depotFile'])
# cleanup
p4.revert(fname)
finally:
os.chdir(top)
def test_delete_multiple_files(self):
p4 = P4()
top = os.getcwd()
andrew = testsupport.users['andrew']
try:
os.chdir(andrew['home'])
# First add and submit some files.
fname1 = 'test_delete_multiple_files_1.txt'
fname2 = 'test_delete_multiple_files_2.txt'
open(fname1, 'w').write('Hello there 1.\n')
open(fname2, 'w').write('Hello there 2.\n')
p4.add([fname1, fname2])
p4.submit([fname1, fname2], 'add files to be deleted')
# Now delete the files.
results = p4.delete([fname1, fname2])
for result in results:
self.failUnless(result['comment'] == 'opened for delete')
self.failUnless(type(result['rev']) == types.IntType)
# cleanup
p4.revert([fname1, fname2])
finally:
os.chdir(top)
def test_delete_already_opened(self):
p4 = P4()
top = os.getcwd()
andrew = testsupport.users['andrew']
try:
os.chdir(andrew['home'])
# First add and submit a file.
fname = 'test_delete_already_opened.txt'
fout = open(fname, 'w')
fout.write('Hello there.\n')
fout.close()
p4.add(fname)
p4.submit(fname, 'add this file to be deleted')
# Now open it and then try to delete it.
p4.edit(fname)
result = p4.delete(fname)
self.failUnless(result[0]['comment'] != 'opened for delete')
self.failUnless(result[0]['rev'] is None)
# cleanup
p4.revert(fname)
finally:
os.chdir(top)
def test_delete_specify_change(self):
p4 = P4()
top = os.getcwd()
andrew = testsupport.users['andrew']
try:
os.chdir(andrew['home'])
# First add and submit a file.
fname = 'test_delete_specify_change.txt'
fout = open(fname, 'w')
fout.write('Hello there.\n')
fout.close()
p4.add(fname)
p4.submit(fname, 'add this file to be deleted')
# Now delete the file (specifying an existing pending
# change).
c = p4.change([], 'empty pending change for deleted files')
cnum = c['change']
result = p4.delete(fname, change=cnum)
self.failUnless(result[0]['depotFile']\
== p4.where(fname)[0]['depotFile'])
self.failUnless(type(result[0]['rev']) == types.IntType)
c = p4.change(change=cnum)
self.failUnless(c['files'][0]['depotFile']\
== result[0]['depotFile'])
self.failUnless(c['files'][0]['action'] == 'delete')
# cleanup
p4.change(files=[], change=cnum)
p4.change(change=cnum, delete=1)
p4.revert(fname)
finally:
os.chdir(top)
def test_delete_specify_bogus_change(self):
p4 = P4()
top = os.getcwd()
andrew = testsupport.users['andrew']
try:
os.chdir(andrew['home'])
# First add and submit a file.
fname = 'test_delete_specify_bogus_change.txt'
fout = open(fname, 'w')
fout.write('Hello there.\n')
fout.close()
p4.add(fname)
p4.submit(fname, 'add this file to be deleted')
latestCnum = p4.changes(max=1)[0]['change']
# Specify an already submitted change.
self.failUnlessRaises(P4LibError, p4.delete, fname,
change=latestCnum)
# Specify a non-existant change.
self.failUnlessRaises(P4LibError, p4.delete, fname,
change=latestCnum+1)
# cleanup
p4.revert(fname)
finally:
os.chdir(top)
def suite():
"""Return a unittest.TestSuite to be used by test.py."""
return unittest.makeSuite(DeleteTestCase)
| 33.506024 | 77 | 0.530025 | 5,072 | 0.911902 | 0 | 0 | 0 | 0 | 0 | 0 | 1,521 | 0.273463 |
d158b7f689bacb09ad2436e6c7462164b316215a | 1,261 | py | Python | src/infra/repo/base.py | gntzh/fastapi-tmpl | 83cb815c4fb5ced0f87286e485a4089bb0097b8f | [
"MIT"
] | null | null | null | src/infra/repo/base.py | gntzh/fastapi-tmpl | 83cb815c4fb5ced0f87286e485a4089bb0097b8f | [
"MIT"
] | null | null | null | src/infra/repo/base.py | gntzh/fastapi-tmpl | 83cb815c4fb5ced0f87286e485a4089bb0097b8f | [
"MIT"
] | null | null | null | from typing import Any, Generic, Protocol, Type, TypeVar
from loguru import logger
from sqlalchemy import select, func
from sqlalchemy.ext.asyncio import AsyncSession
class ModelBase(Protocol):
id: Any
def __init__(*args, **kwargs):
...
T = TypeVar("T")
ModelT = TypeVar("ModelT", bound=ModelBase)
class FactoryMixin:
def __call__(self: T, session: AsyncSession) -> T:
logger.debug("装填Item session")
self._session = session
return self
class RepoBase(Generic[ModelT], FactoryMixin):
model: Type[ModelT]
_session: AsyncSession
async def get(self, /, id: Any) -> ModelT | None:
return (
await self._session.execute(select(self.model).where(self.model.id == id))
).scalar()
async def get_multi(self, /, offset: int = 0, limit: int = 100) -> list[ModelT]:
return (
(
await self._session.execute(
select(self.model).offset(offset).limit(limit)
)
)
.scalars()
.all()
)
async def count(self) -> int:
return (
(await self._session.execute(select(func.count(self.model.id))))
.scalars()
.one()
)
| 23.792453 | 86 | 0.572561 | 1,024 | 0.809486 | 0 | 0 | 0 | 0 | 654 | 0.516996 | 31 | 0.024506 |
d1592de01ccfcfaa6800db9a077337ed4875fae8 | 1,723 | py | Python | shaping.py | kotikkonstantin/convasr | 3d4d7f3627269372ae1eb7ff7423b29838f47ac0 | [
"MIT"
] | 17 | 2019-08-01T07:45:46.000Z | 2022-03-25T05:15:13.000Z | shaping.py | kotikkonstantin/convasr | 3d4d7f3627269372ae1eb7ff7423b29838f47ac0 | [
"MIT"
] | 14 | 2020-05-30T16:18:28.000Z | 2021-06-24T08:08:19.000Z | shaping.py | kotikkonstantin/convasr | 3d4d7f3627269372ae1eb7ff7423b29838f47ac0 | [
"MIT"
] | 6 | 2020-07-10T14:43:02.000Z | 2021-04-08T19:28:53.000Z | import functools
import typing
import torch
# equal to 1T
class _T(torch.Tensor):
pass
class BY(torch.Tensor):
pass
class T(torch.Tensor):
pass
class B(torch.Tensor):
pass
class S(torch.Tensor):
pass
class BCT(torch.Tensor):
pass
class CT(torch.Tensor):
pass
class BCt(torch.Tensor):
pass
class Bt(torch.Tensor):
pass
class TBC(torch.Tensor):
pass
class BT(torch.Tensor):
pass
class BLY(torch.Tensor):
pass
class BS(torch.Tensor):
pass
def is_tensor_hint(cls):
return issubclass(cls, torch.Tensor)
def unbind_tensor_hint(cls):
dims = cls.__name__.split('.')[-1]
return dims
def shapecheck(hints = None, auto = None, **kwargs):
if auto is not None:
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
shapecheck.hints = typing.get_type_hints(fn)
if auto:
shapecheck(hints = {}, **kwargs)
res = fn(*args, **kwargs)
if auto:
shapecheck(hints = {}, **kwargs, **{'return' : res})
shapecheck.hints = {}
return res
return wrapper
return decorator
else:
hints = hints or shapecheck.hints
dims = {}
for k, v in kwargs.items():
h = hints.get(k)
if h is not None:
if is_tensor_hint(h):
tensor_dims = unbind_tensor_hint(h)
assert v.ndim == len(tensor_dims), f'Tensor [{k}] should be typed [{tensor_dims}] and should have rank {len(tensor_dims)} but has rank [v.ndim]'
for i, d in enumerate(tensor_dims):
s = v.shape[i]
if d in dims:
assert dims[d] == s, f'Tensor [{k}] should be typed [{tensor_dims}], dim [{d}] should have rank [{dims[d]}] but has rank [{s}]'
dims[d] = s
else:
assert isinstance(v, h), f'Arg [{k}] should be typed [{h}] but is typed [{type(v)}]'
| 20.270588 | 149 | 0.647707 | 378 | 0.219385 | 0 | 0 | 295 | 0.171213 | 0 | 0 | 298 | 0.172954 |
d15966aff54460eabbd17a44b8dbeb7ba2af747c | 305 | py | Python | snmp/simple_snmp.py | gahlberg/pynet_class_work | 2389e7e5717d4b479ee002ada3b45694b7566756 | [
"Apache-2.0"
] | null | null | null | snmp/simple_snmp.py | gahlberg/pynet_class_work | 2389e7e5717d4b479ee002ada3b45694b7566756 | [
"Apache-2.0"
] | null | null | null | snmp/simple_snmp.py | gahlberg/pynet_class_work | 2389e7e5717d4b479ee002ada3b45694b7566756 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from snmp_helper import snmp_get_oid,snmp_extract
COMMUNITY_STRING = 'galileo'
SNMP_PORT = 7961
IP = '50.76.53.27'
a_device = (IP, COMMUNITY_STRING, SNMP_PORT)
OID = '1.3.6.1.2.1.1.1.0'
snmp_data = snmp_get_oid(a_device, oid=OID)
output = snmp_extract(snmp_data)
print output
| 16.944444 | 49 | 0.740984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.203279 |
d15b50d016c594fe47cebba986b4e8896fb93412 | 8,037 | py | Python | titanic2.py | kyzoon/kaggle_titanic | 9aad72932343d3387b744688cb1cd7edbfd4ef41 | [
"MIT"
] | null | null | null | titanic2.py | kyzoon/kaggle_titanic | 9aad72932343d3387b744688cb1cd7edbfd4ef41 | [
"MIT"
] | null | null | null | titanic2.py | kyzoon/kaggle_titanic | 9aad72932343d3387b744688cb1cd7edbfd4ef41 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'preston.zhu'
import numpy as np
import pandas as pd
import re
import operator
from sklearn.ensemble import RandomForestClassifier, ExtraTreesRegressor
import pdb
def get_title(name):
# 正则表达式搜索,搜索到如' Mr.'的字符串
title_search = re.search(' ([A-Za-z]+)\.', name)
# 搜索到则非空
if title_search:
# group()返回一个字符串,字符串[0]为空格,故从[1]开始读取
return title_search.group(1)
return ""
family_id_mapping = {}
def get_family_id(row):
last_name = row['Name'].split(',')[0]
family_id = "{0}{1}".format(last_name, row['FamilySize'])
if family_id not in family_id_mapping:
if len(family_id_mapping) == 0:
current_id = 1
else:
# operator.itemgetter(1)即取出对象第一个域的内容
# dict.items(): dict所有元素成都表示成元组,然后展开成list
# max()函数按key参数比较大小,取出第一个列表中最大的对象
current_id = (max(family_id_mapping.items(), key=operator.itemgetter(1))[1] + 1)
# 姓氏名映射成数值家庭号
family_id_mapping[family_id] = current_id
return family_id_mapping[family_id]
# 函数根据年龄和性别分成三类
def get_person(passenger):
age, sex = passenger
if age < 14: # child age define 14
return 'child'
elif sex == 'female':
return 'female_adult'
else:
return 'male_adult'
# 获取家族名称
def process_surname(nm):
return nm.split(',')[0].lower()
perishing_female_surnames = []
def perishing_mother_wife(passenger):
surname, Pclass, person = passenger
# 筛选“成年女性,过逝的,有同行家庭成员的”,并返回1,否则返回0
return 1.0 if (surname in perishing_female_surnames) else 0.0
surviving_male_surnames = []
def surviving_father_husband(passenger):
surname, Pclass, person = passenger
return 1.0 if (surname in surviving_male_surnames) else 0.0
"""
特征工程:
采用将训练集与测试集的数据拼接在一起,然后进行回归,再补充'Age'的缺失值,这是一个很好的方法;
移除掉'Ticket'特征
'Embarked'特征采用众数'S'补充缺失值
'Fare'采用中间值补充缺件值
增加'TitleCat'特征:从名称中抽取表示个人身份地位的称为来表示
增加'CabinCat'特征:先将缺件值补充字符'0',然后提取第一个字符做为其分类。缺失值太多,另作为一个分类
增加'EmbarkedCat'特征:由'Cabin'特征转换成数值分类表示
增加'Sex_male'和'Sex_female'两个特征:由'Sex'特征仿拟(dummy)
增加'FamilySize'特征:由'SibSp'和'Parch'两个特征之各,表示同行家人数量
增加'NameLength'特征:由名称字符数量表示。
增加'FamilyId'特征:先提取'Name'特征中的姓氏,并按字母排序编号得出。另外,同行家人少于3人的,'FamilyId'统一
归于-1类
增加'person'特征:由'Age'和'Sex'特征,小于14岁定义为儿童'child',大于14岁的女性定义为成年女性
'female_adult',大于14岁的男性定义为成年男性'male_adult'
增加'persion_child', 'person_female_adult', 'person_male_adult'三个特征:由'person'特征仿拟(dummy)
增加'surname'特征:由'Name'特征提取出姓氏部分
增加'perishing_mother_wife'特征:过逝的母亲或妻子,对家人的存活影响会比较大
增加‘surviving_father_husband'特征:存活的父亲或丈夫,对家人的存活影响也会比较大
最后选择进行训练的特征为:
'Age', 'Fare', 'Parch', 'Pclass', 'SibSp','male_adult', 'female_adult', 'child',
'perishing_mother_wife', 'surviving_father_husband', 'TitleCat', 'CabinCat',
'Sex_female', 'Sex_male', 'EmbarkedCat', 'FamilySize', 'NameLength', 'FamilyId'
由于经过拼接,所以需要对训练集与测试集进行拆分,前891个实例为训练集,后418个实例为测试集
"""
def features():
train_data = pd.read_csv("input/train.csv", dtype={"Age": np.float64})
test_data = pd.read_csv("input/test.csv", dtype={"Age": np.float64})
# 按列方向连接两个DataFrame,test_data排在train_data之后
combined2 = pd.concat([train_data, test_data], axis=0)
# 去掉'Ticket'特征,axis=1表示每行,inplace=True表示直接作用于本身
combined2.drop(['Ticket'], axis=1, inplace=True)
# Embarked特征使用众数补充缺失值
# inplace参数为True,函数作用于当前变量之上
combined2.Embarked.fillna('S', inplace=True)
# Fare特征使用中间值补充缺失值
combined2.Fare.fillna(combined2.Fare[combined2.Fare.notnull()].median(), inplace=True)
# 新建'Title'特征,从'Name'特征中提取称谓来表示
# Series.apply(func)对Series中每一个元素都调用一次func函数
combined2['Title'] = combined2["Name"].apply(get_title)
title_mapping = {
"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Dr": 5, "Rev": 6, "Major": 7,
"Col": 7, "Mlle": 8, "Mme": 8, "Don": 7, "Dona": 10, "Lady": 10,
"Countess": 10, "Jonkheer": 10, "Sir": 7, "Capt": 7, "Ms": 2
}
# 新建'TitleCat'特征,则'Title'映射成数值
# map()函数使用dict对'Title'每个元素都进行映射
combined2["TitleCat"] = combined2.loc[:, 'Title'].map(title_mapping)
# 新建'CabinCat'特征。缺失值补充为0,其它项取第一个字母
# Categorical先统计数组中有多少个不同项,按升序排列,然后用数值表示各个分类
combined2["CabinCat"] = pd.Categorical(combined2.Cabin.fillna('0').apply(lambda x: x[0])).codes
# Cabin特征缺失值为'0'填充
combined2.Cabin.fillna('0', inplace=True)
# 以数值表示'Embarked'各个分类
combined2['EmbarkedCat'] = pd.Categorical(combined2.Embarked).codes
# 延行方向进行连接,创建新DataFrame;增加两个性别相关列,并将'Survived'特征移动至最后一列
# pandas.get_dummies()将'Sex'特征重构成'Sex_male'和'Sex_female'两列,
# 'Sex_male'列中,male表示为1,female表示为0. 'Sex_female'则相反
full_data = pd.concat([
combined2.drop(['Survived'], axis=1),
pd.get_dummies(combined2.Sex, prefix='Sex'),
combined2.Survived
], axis=1)
# 新建'FamilySize'特征,使用'SibSp'和'Parch'两个家庭成员特征求和得出
full_data['FamilySize'] = full_data['SibSp'] + full_data['Parch']
# 新建'NameLength'特征,使用名称长度表示
full_data['NameLength'] = full_data.Name.apply(lambda x: len(x))
family_ids = full_data.apply(get_family_id, axis=1)
# 将所有家庭成员人数小于3个的设置成-1类,归成一类
family_ids[full_data['FamilySize'] < 3] = -1
# 新建'FamilyId'特征
full_data['FamilyId'] = family_ids
# 追加'person'特征列,'person'特征由年龄和性别划分成child, femal_adult, male_adult
full_data = pd.concat([
full_data,
pd.DataFrame(full_data[['Age', 'Sex']].apply(get_person, axis=1), columns=['person'])
], axis=1)
# dummies person
dummies = pd.get_dummies(full_data['person'])
# 追加'persion_child', 'person_female_adult', 'person_male_adult'三个特征
full_data = pd.concat([full_data, dummies], axis=1)
# 新建姓氏名称'surname'特征
full_data['surname'] = full_data['Name'].apply(process_surname)
# 筛选“成年女性,过逝的,有同行家庭成员的”,去重
perishing_female_surnames = list(set(full_data[
(full_data.female_adult == 1.0)
& (full_data.Survived == 0.0)
& ((full_data.Parch > 0) | (full_data.SibSp > 0))]['surname'].values))
# 新建'perishing_mother_wife'特征,如果是“已过逝且有同行家人的成年女性”为1,否则为0
full_data['perishing_mother_wife'] \
= full_data[['surname', 'Pclass', 'person']].apply(perishing_mother_wife, axis=1)
# 筛选“成年男性,存活的,有同行家人了”,去重
surviving_male_surnames = list(set(full_data[
(full_data.male_adult == 1.0)
& (full_data.Survived == 1.0)
& ((full_data.Parch > 0) | (full_data.SibSp > 0))]['surname']))
full_data['surviving_father_husband'] \
= full_data[['surname', 'Pclass', 'person']].apply(surviving_father_husband, axis=1)
# 定义筛选器
classers = [
'Fare', 'Parch', 'Pclass', 'SibSp', 'TitleCat', 'CabinCat', 'Sex_female', 'Sex_male',
'EmbarkedCat', 'FamilySize', 'NameLength', 'FamilyId'
]
# ExtraTreesRegressor模型,用带'Age'特征的数据回归出'Age'特征缺失的值
age_et = ExtraTreesRegressor(n_estimators=200)
# 筛选'Age'不为空的数据作为训练集
X_train = full_data.loc[full_data.Age.notnull(), classers]
# 筛选'Age'不为空的数据的'Age'特征作为训练集的结果标签
Y_train = full_data.loc[full_data.Age.notnull(), ['Age']]
# 'Age'为空即为测试集
X_test = full_data.loc[full_data.Age.isnull(), classers]
# np.ravel()转换为np.array
age_et.fit(X_train, np.ravel(Y_train))
age_preds = age_et.predict(X_test)
# 将回归预测的结果,填充到原数据集中
full_data.loc[full_data.Age.isnull(), ['Age']] = age_preds
# 定义筛选器
model_dummys = [
'Age', 'Fare', 'Parch', 'Pclass', 'SibSp','male_adult', 'female_adult', 'child',
'perishing_mother_wife', 'surviving_father_husband', 'TitleCat', 'CabinCat',
'Sex_female', 'Sex_male', 'EmbarkedCat', 'FamilySize', 'NameLength', 'FamilyId'
]
# 筛选出训练集,测试集
X_data = full_data.iloc[:891, :]
X_train = X_data.loc[:, model_dummys]
Y_data = full_data.iloc[:891, :]
y_train = Y_data.loc[:, ['Survived']]
X_t_data = full_data.iloc[891:, :]
X_test = X_t_data.loc[:, model_dummys]
test_PassengerId = X_t_data.PassengerId.as_matrix()
return X_train, y_train, X_test, test_PassengerId
def titanic():
print('Preparing Data...')
X_train, y_train, X_test, test_PassengerId = features()
print('Train RandomForestClassifier Model...')
# 随机森林模型
model_rf = RandomForestClassifier(n_estimators=300,
min_samples_leaf=4,
class_weight={0:0.745,1:0.255})
# 训练
model_rf.fit(X_train, np.ravel(y_train))
print('Predictings...')
model_results = model_rf.predict(X_test)
print('Generate Submission File...')
submission = pd.DataFrame({
'PassengerId': test_PassengerId,
'Survived': model_results.astype(np.int32)
})
submission.to_csv('prediction7.csv', index=False)
print('Done.')
if __name__ == '__main__':
titanic() | 32.938525 | 96 | 0.725395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,865 | 0.57147 |
d15c49fa119b79934420f79c9fd8d2957dc1d3c8 | 397 | py | Python | genericCode/sortingAlgorithms/CountingSort.py | tejasnikumbh/Algorithms | 2a2983a522be295ce95bd970a0ee8a617866992f | [
"BSD-2-Clause"
] | 8 | 2015-04-16T03:43:49.000Z | 2018-08-14T22:47:03.000Z | genericCode/sortingAlgorithms/CountingSort.py | tejasnikumbh/Algorithms | 2a2983a522be295ce95bd970a0ee8a617866992f | [
"BSD-2-Clause"
] | null | null | null | genericCode/sortingAlgorithms/CountingSort.py | tejasnikumbh/Algorithms | 2a2983a522be295ce95bd970a0ee8a617866992f | [
"BSD-2-Clause"
] | 7 | 2016-03-22T20:29:27.000Z | 2018-09-29T18:55:47.000Z | '''
Normal Counting sort without any associated array to keep track of
Time Complexity = O(n)
Space Complexity = O(n + k)
Auxilary Space = O(k)
'''
def countingSort(a):
b = [0]*(max(a) + 1)
c = []
for i in range(len(a)):
b[a[i]] += 1
for i in range(len(b)):
if(b[i] != 0):
for j in range(b[i]):
c.append(i)
return c
| 23.352941 | 70 | 0.493703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.410579 |
d15cc02ff61cc22a23fc2bcad4c8987b71be6858 | 2,768 | py | Python | src/td/auth.py | annihilatorrrr/opentele | ff90c36a867cf7902e80f480a35041c5e7902e4c | [
"MIT"
] | 30 | 2022-01-17T20:46:02.000Z | 2022-03-31T18:49:07.000Z | src/td/auth.py | studasd/opentele | ff90c36a867cf7902e80f480a35041c5e7902e4c | [
"MIT"
] | 4 | 2022-02-13T10:21:12.000Z | 2022-03-28T16:05:51.000Z | src/td/auth.py | studasd/opentele | ff90c36a867cf7902e80f480a35041c5e7902e4c | [
"MIT"
] | 9 | 2022-01-24T18:02:08.000Z | 2022-03-24T14:23:16.000Z | from __future__ import annotations
from .configs import *
from . import shared as td
import hashlib
# if TYPE_CHECKING:
# from ..opentele import *
class AuthKeyType(IntEnum):
"""
Type of `AuthKey`
### Attributes:
Generated (`IntEnum`):
Generated key
Temporary (`IntEnum`):
Temporary key
ReadFromFile (`IntEnum`):
Key red from file
Local (`IntEnum`):
Local key
"""
Generated = 0
Temporary = 1
ReadFromFile = 2
Local = 3
class AuthKey(BaseObject):
"""
Authorization key used for [MTProto](https://core.telegram.org/mtproto)
It's also used to encrypt and decrypt local tdata
### Attributes:
DcId (DcId):
Data Center ID (from 1 to 5).
type (AuthKeyType):
Type of the key.
key (bytes):
The actual key, 256 `bytes` in length.
"""
kSize = 256
def __init__(self, key: bytes = bytes(), type: AuthKeyType = AuthKeyType.Generated, dcId: DcId = DcId.Invalid) -> None: # type: ignore
self.__type = type
self.__dcId = dcId
self.__key = key
# if (type == self.Type.Generated) or (type == self.Type.Temporary):
# self.__creationtime = ...
self.__countKeyId()
@property
def dcId(self) -> DcId:
return self.__dcId
@property
def type(self) -> AuthKeyType:
return self.__type
@property
def key(self) -> bytes:
return self.__key
def write(self, to: QDataStream) -> None:
to.writeRawData(self.key)
def __countKeyId(self) -> None:
hash = hashlib.sha1(self.__key).digest()
self.__keyId = int.from_bytes(hash[12 : 12 + 8], "little")
def prepareAES_oldmtp(
self, msgKey: bytes, send: bool
) -> typing.Tuple[bytes, bytes]:
x = 0 if send else 8
sha1_a = hashlib.sha1(msgKey[:16] + self.__key[x : x + 32]).digest()
sha1_b = hashlib.sha1(
self.__key[x + 32 : x + 32 + 16]
+ msgKey[:16]
+ self.__key[x + 48 : x + 48 + 16]
).digest()
sha1_c = hashlib.sha1(self.__key[x + 64 : x + 64 + 32] + msgKey[:16]).digest()
sha1_d = hashlib.sha1(msgKey[:16] + self.__key[x + 96 : x + 96 + 32]).digest()
aesKey = sha1_a[:8] + sha1_b[8 : 8 + 12] + sha1_c[4 : 4 + 12]
aesIv = sha1_a[8 : 8 + 12] + sha1_b[:8] + sha1_c[16 : 16 + 4] + sha1_d[:8]
return aesKey, aesIv
@staticmethod
def FromStream(
stream: QDataStream,
type: AuthKeyType = AuthKeyType.ReadFromFile,
dcId: DcId = DcId(0),
) -> AuthKey:
keyData = stream.readRawData(AuthKey.kSize)
return AuthKey(keyData, type, dcId)
| 25.394495 | 139 | 0.561055 | 2,609 | 0.942558 | 0 | 0 | 458 | 0.165462 | 0 | 0 | 807 | 0.291546 |
d15d785d728aebc40b0768e439bd949eef225e9d | 1,867 | py | Python | qingmi/utils/crypto.py | xiongxianzhu/qingmi | ae5a446abec3982ebf2c5dde8546ef72f9453137 | [
"BSD-3-Clause"
] | 20 | 2018-05-22T09:29:40.000Z | 2020-12-11T04:53:15.000Z | qingmi/utils/crypto.py | xiongxianzhu/qingmi | ae5a446abec3982ebf2c5dde8546ef72f9453137 | [
"BSD-3-Clause"
] | 65 | 2019-03-07T02:43:06.000Z | 2021-01-07T03:43:52.000Z | qingmi/utils/crypto.py | xiongxianzhu/qingmi | ae5a446abec3982ebf2c5dde8546ef72f9453137 | [
"BSD-3-Clause"
] | 6 | 2019-03-08T06:39:47.000Z | 2021-07-01T11:02:56.000Z | # coding: utf-8
"""
Qingmi's standard crypto functions and utilities.
"""
import hashlib
import hmac
import random
import time
import base64
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
""" 生成随机的字符串, 默认长度12个字符 """
return ''.join(random.choice(allowed_chars) for i in range(length))
def get_random_secret_key():
""" 生成一个50个字符组成的随机字符串作为SECRET_KEY的setting值 """
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def get_phone_verify_code(length=4):
""" 生成手机短信验证码 """
chars = '0123456789'
return get_random_string(length, chars)
def get_email_verify_code(length=4):
""" 生成邮箱验证码 """
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' \
+ '0123456789'
return get_random_string(length, chars)
def get_session_id(length=48):
""" 生成session id字符串 """
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' \
+ '0123456789-_'
return get_random_string(length, chars)
def get_invite_code(length=6):
""" 生成邀请码 """
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
return get_random_string(length, chars)
def md5(data):
""" md5算法加密字符串 """
""" type(data): str """
m = hashlib.md5()
m.update(data.encode('utf-8'))
return m.hexdigest()
def b64(data):
""" base64 encode """
""" type(data): str """
base64_encrypt = base64.b64encode(data.encode('utf-8'))
return str(base64_encrypt, 'utf-8')
def b64decode(data):
""" base64 decode """
base64_decrypt = base64.b64decode(data.encode('utf-8'))
return str(base64_decrypt, 'utf-8')
def base64_md5(data):
""" 进行MD5加密,然后Base64编码 """
""" type(data): str """
return b64(md5(data))
| 24.246753 | 76 | 0.658811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 863 | 0.426594 |
d15dbe35e2489fb154babfaa98a14ea5839eeee9 | 1,286 | py | Python | chap3-3.py | mikidake/Ex3 | 0dcbc3c673d3311914e90febf26b578499658535 | [
"MIT"
] | null | null | null | chap3-3.py | mikidake/Ex3 | 0dcbc3c673d3311914e90febf26b578499658535 | [
"MIT"
] | null | null | null | chap3-3.py | mikidake/Ex3 | 0dcbc3c673d3311914e90febf26b578499658535 | [
"MIT"
] | null | null | null | # Chapter3 Ex.3)
# Modification of 'Guess My Number'
# Guess My Number
#
# The computer picks a random number between 1 and 100
# The player tries to guess it and the computer lets
# the player know if the guess is too high, too low
# or right on the money
# The player can try to guess up to three times
import random
print("\tWelcome to 'Guess My Number'!")
print("\nI'm thinking of a number between 1 and 100.")
print("Try to guess it in as few attempts as possible.")
print("You have three chances to try!\n")
# set the initial values
the_number = random.randint(1, 100)
tries = 1
maxTries = 3
# guessing loop
while tries <= maxTries:
guess = int(input("Take a guess: "))
if guess == the_number:
print("\nYou guessed it! The number was", the_number)
if tries == 1:
print("And it only took you", tries, "try!\n")
break
else:
print("And it only took you", tries, "tries!\n")
break
elif guess > the_number:
print("Guess Lower..")
else:
print("Guess Higher..")
tries += 1
if tries > maxTries:
print("\nSorry you ran out of tries! The random number was ", the_number)
input("\n\nPress the enter key to exit.") | 29.906977 | 82 | 0.615086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 740 | 0.575428 |
d15f9a96fb68b6e5f7144ec6a07111b116feb832 | 702 | py | Python | setup.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 3 | 2016-12-09T06:05:18.000Z | 2018-03-01T13:00:29.000Z | setup.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T00:51:32.000Z | 2020-12-02T08:48:55.000Z | setup.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
version = '0.1.2'
setup(
name = 'etsproxy',
version = version,
description = 'proxy modules for backwards compatibility',
long_description = open('README.rst').read(),
packages = find_packages(),
author = 'Enthought, Inc.',
author_email = 'info@enthought.com',
download_url = ('http://www.enthought.com/repo/ets/etsproxy-%s.tar.gz' %
version),
license = 'BSD',
maintainer = 'ETS Developers',
maintainer_email = 'enthought-dev@enthought.com',
namespace_packages = ['enthought'],
entry_points = dict(console_scripts=[
'ets3to4 = enthought.ets3to4:main',
]),
)
| 28.08 | 76 | 0.632479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.367521 |
d15fa493017a48b72a5d8296a6eda7a3577ea9b0 | 618 | py | Python | VSCode_work/chapter5/chapter5_5_5.py | yangyahu-1994/Python-Crash-Course | 6f8ef7fe8466d88931a0d3cc423ba5d966663b9d | [
"MIT"
] | 12 | 2020-10-22T14:03:27.000Z | 2022-03-28T08:14:22.000Z | VSCode_work/chapter5/chapter5_5_5.py | syncccc/Python-Crash-Course | 51fe429dd606583a790f3c1603bb3439382c09e0 | [
"MIT"
] | null | null | null | VSCode_work/chapter5/chapter5_5_5.py | syncccc/Python-Crash-Course | 51fe429dd606583a790f3c1603bb3439382c09e0 | [
"MIT"
] | 9 | 2020-12-22T10:22:12.000Z | 2022-03-28T08:14:53.000Z | # 创建变量,外星人为绿色
alien_color = 'green'
# 条件判断
if alien_color == 'green':
print("You get 5 points.")
elif alien_color == 'yellow':
print("\nYou get 10 points.")
else:
print("\nYou get 15 points.")
# 创建变量,外星人为黄色
alien_color = 'yellow'
# 条件判断
if alien_color == 'green':
print("You get 5 points.")
elif alien_color == 'yellow':
print("\nYou get 10 points.")
else:
print("\nYou get 15 points.")
# 创建变量,外星人为红色
alien_color = 'red'
# 条件判断
if alien_color == 'green':
print("You get 5 points.")
elif alien_color == 'yellow':
print("\nYou get 10 points.")
else:
print("\nYou get 15 points.") | 19.3125 | 33 | 0.634304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 395 | 0.562678 |
d160b7200db507198e23acc84368e5834d8ad4b2 | 11,630 | py | Python | openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py | Tilix4/OpenPype | 8909bd890170880aa7ec8b673abaa25a9bdf40f2 | [
"MIT"
] | 1 | 2022-03-23T06:24:24.000Z | 2022-03-23T06:24:24.000Z | openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py | Tilix4/OpenPype | 8909bd890170880aa7ec8b673abaa25a9bdf40f2 | [
"MIT"
] | null | null | null | openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py | Tilix4/OpenPype | 8909bd890170880aa7ec8b673abaa25a9bdf40f2 | [
"MIT"
] | null | null | null | import copy
import json
import collections
import ftrack_api
from openpype_modules.ftrack.lib import (
ServerAction,
statics_icon,
)
from openpype_modules.ftrack.lib.avalon_sync import create_chunks
class TransferHierarchicalValues(ServerAction):
"""Transfer values across hierarhcical attributes.
Aalso gives ability to convert types meanwhile. That is limited to
conversions between numbers and strings
- int <-> float
- in, float -> string
"""
identifier = "transfer.hierarchical.values"
label = "OpenPype Admin"
variant = "- Transfer values between 2 custom attributes"
description = (
"Move values from a hierarchical attribute to"
" second hierarchical attribute."
)
icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg")
all_project_entities_query = (
"select id, name, parent_id, link"
" from TypedContext where project_id is \"{}\""
)
cust_attr_query = (
"select value, entity_id from CustomAttributeValue"
" where entity_id in ({}) and configuration_id is \"{}\""
)
settings_key = "transfer_values_of_hierarchical_attributes"
def discover(self, session, entities, event):
"""Show anywhere."""
return self.valid_roles(session, entities, event)
def _selection_interface(self, session, event_values=None):
title = "Transfer hierarchical values"
attr_confs = session.query(
(
"select id, key from CustomAttributeConfiguration"
" where is_hierarchical is true"
)
).all()
attr_items = []
for attr_conf in attr_confs:
attr_items.append({
"value": attr_conf["id"],
"label": attr_conf["key"]
})
if len(attr_items) < 2:
return {
"title": title,
"items": [{
"type": "label",
"value": (
"Didn't found custom attributes"
" that can be transfered."
)
}]
}
attr_items = sorted(attr_items, key=lambda item: item["label"])
items = []
item_splitter = {"type": "label", "value": "---"}
items.append({
"type": "label",
"value": (
"<h2>Please select source and destination"
" Custom attribute</h2>"
)
})
items.append({
"type": "label",
"value": (
"<b>WARNING:</b> This will take affect for all projects!"
)
})
if event_values:
items.append({
"type": "label",
"value": (
"<b>Note:</b> Please select 2 different custom attributes."
)
})
items.append(item_splitter)
src_item = {
"type": "enumerator",
"label": "Source",
"name": "src_attr_id",
"data": copy.deepcopy(attr_items)
}
dst_item = {
"type": "enumerator",
"label": "Destination",
"name": "dst_attr_id",
"data": copy.deepcopy(attr_items)
}
delete_item = {
"type": "boolean",
"name": "delete_dst_attr_first",
"label": "Delete first",
"value": False
}
if event_values:
src_item["value"] = event_values["src_attr_id"]
dst_item["value"] = event_values["dst_attr_id"]
delete_item["value"] = event_values["delete_dst_attr_first"]
items.append(src_item)
items.append(dst_item)
items.append(item_splitter)
items.append({
"type": "label",
"value": (
"<b>WARNING:</b> All values from destination"
" Custom Attribute will be removed if this is enabled."
)
})
items.append(delete_item)
return {
"title": title,
"items": items
}
def interface(self, session, entities, event):
if event["data"].get("values", {}):
return None
return self._selection_interface(session)
def launch(self, session, entities, event):
values = event["data"].get("values", {})
if not values:
return None
src_attr_id = values["src_attr_id"]
dst_attr_id = values["dst_attr_id"]
delete_dst_values = values["delete_dst_attr_first"]
if not src_attr_id or not dst_attr_id:
self.log.info("Attributes were not filled. Nothing to do.")
return {
"success": True,
"message": "Nothing to do"
}
if src_attr_id == dst_attr_id:
self.log.info((
"Same attributes were selected {}, {}."
" Showing interface again."
).format(src_attr_id, dst_attr_id))
return self._selection_interface(session, values)
# Query custom attrbutes
src_conf = session.query((
"select id from CustomAttributeConfiguration where id is {}"
).format(src_attr_id)).one()
dst_conf = session.query((
"select id from CustomAttributeConfiguration where id is {}"
).format(dst_attr_id)).one()
src_type_name = src_conf["type"]["name"]
dst_type_name = dst_conf["type"]["name"]
# Limit conversion to
# - same type -> same type (there is no need to do conversion)
# - number <Any> -> number <Any> (int to float and back)
# - number <Any> -> str (any number can be converted to str)
src_type = None
dst_type = None
if src_type_name == "number" or src_type_name != dst_type_name:
src_type = self._get_attr_type(dst_conf)
dst_type = self._get_attr_type(dst_conf)
valid = False
# Can convert numbers
if src_type in (int, float) and dst_type in (int, float):
valid = True
# Can convert numbers to string
elif dst_type is str:
valid = True
if not valid:
self.log.info((
"Don't know how to properly convert"
" custom attribute types {} > {}"
).format(src_type_name, dst_type_name))
return {
"message": (
"Don't know how to properly convert"
" custom attribute types {} > {}"
).format(src_type_name, dst_type_name),
"success": False
}
# Query source values
src_attr_values = session.query(
(
"select value, entity_id"
" from CustomAttributeValue"
" where configuration_id is {}"
).format(src_attr_id)
).all()
self.log.debug("Queried source values.")
failed_entity_ids = []
if dst_type is not None:
self.log.debug("Converting source values to desctination type")
value_by_id = {}
for attr_value in src_attr_values:
entity_id = attr_value["entity_id"]
value = attr_value["value"]
if value is not None:
try:
if dst_type is not None:
value = dst_type(value)
value_by_id[entity_id] = value
except Exception:
failed_entity_ids.append(entity_id)
if failed_entity_ids:
self.log.info(
"Couldn't convert some values to destination attribute"
)
return {
"success": False,
"message": (
"Couldn't convert some values to destination attribute"
)
}
# Delete destination custom attributes first
if delete_dst_values:
self.log.info("Deleting destination custom attribute values first")
self._delete_custom_attribute_values(session, dst_attr_id)
self.log.info("Applying source values on destination custom attribute")
self._apply_values(session, value_by_id, dst_attr_id)
return True
def _delete_custom_attribute_values(self, session, dst_attr_id):
dst_attr_values = session.query(
(
"select configuration_id, entity_id"
" from CustomAttributeValue"
" where configuration_id is {}"
).format(dst_attr_id)
).all()
delete_operations = []
for attr_value in dst_attr_values:
entity_id = attr_value["entity_id"]
configuration_id = attr_value["configuration_id"]
entity_key = collections.OrderedDict((
("configuration_id", configuration_id),
("entity_id", entity_id)
))
delete_operations.append(
ftrack_api.operation.DeleteEntityOperation(
"CustomAttributeValue",
entity_key
)
)
if not delete_operations:
return
for chunk in create_chunks(delete_operations, 500):
for operation in chunk:
session.recorded_operations.push(operation)
session.commit()
def _apply_values(self, session, value_by_id, dst_attr_id):
dst_attr_values = session.query(
(
"select configuration_id, entity_id"
" from CustomAttributeValue"
" where configuration_id is {}"
).format(dst_attr_id)
).all()
dst_entity_ids_with_value = {
item["entity_id"]
for item in dst_attr_values
}
operations = []
for entity_id, value in value_by_id.items():
entity_key = collections.OrderedDict((
("configuration_id", dst_attr_id),
("entity_id", entity_id)
))
if entity_id in dst_entity_ids_with_value:
operations.append(
ftrack_api.operation.UpdateEntityOperation(
"CustomAttributeValue",
entity_key,
"value",
ftrack_api.symbol.NOT_SET,
value
)
)
else:
operations.append(
ftrack_api.operation.CreateEntityOperation(
"CustomAttributeValue",
entity_key,
{"value": value}
)
)
if not operations:
return
for chunk in create_chunks(operations, 500):
for operation in chunk:
session.recorded_operations.push(operation)
session.commit()
def _get_attr_type(self, conf_def):
type_name = conf_def["type"]["name"]
if type_name == "text":
return str
if type_name == "number":
config = json.loads(conf_def["config"])
if config["isdecimal"]:
return float
return int
return None
def register(session):
'''Register plugin. Called when used as an plugin.'''
TransferHierarchicalValues(session).register()
| 33.51585 | 79 | 0.524678 | 11,283 | 0.970163 | 0 | 0 | 0 | 0 | 0 | 0 | 3,327 | 0.286071 |
d161ec660784d01b878001017831664382622e75 | 382 | py | Python | cyan/util/_enum.py | huajitech/cyan | 6809f7b738b2b4c458d08346f533167c7e7c0a83 | [
"MIT"
] | 5 | 2022-01-23T11:57:55.000Z | 2022-01-25T07:03:09.000Z | cyan/util/_enum.py | huajitech/cyan | 6809f7b738b2b4c458d08346f533167c7e7c0a83 | [
"MIT"
] | null | null | null | cyan/util/_enum.py | huajitech/cyan | 6809f7b738b2b4c458d08346f533167c7e7c0a83 | [
"MIT"
] | 2 | 2022-01-25T03:04:43.000Z | 2022-01-25T07:03:17.000Z | from enum import EnumMeta
from typing import Any
def get_enum_key(enum: EnumMeta, value: Any, default: Any = ...) -> Any:
"""
获取 `Enum` 值对应的键。
参数:
- enum: Enum 类型
- value: 将要查询对应键的值
- default: 当对应键不存在时返回的默认值(默认返回传入的 `value` 参数)
"""
return enum._value2member_map_.get(
value,
value if default == ... else default
)
| 20.105263 | 72 | 0.581152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.50211 |
d16267f20a44ebfcce9ce2338b818e8f62ab0d51 | 2,809 | py | Python | test/test_ean.py | blazaid/pycodes | e263fad64ad7d056feb7ac2056e1d27aec52a6d9 | [
"Apache-2.0"
] | null | null | null | test/test_ean.py | blazaid/pycodes | e263fad64ad7d056feb7ac2056e1d27aec52a6d9 | [
"Apache-2.0"
] | null | null | null | test/test_ean.py | blazaid/pycodes | e263fad64ad7d056feb7ac2056e1d27aec52a6d9 | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase
from pycodes.ean import Ean13
from pycodes.exceptions import EmptyCode, CharacterNotAllowed, BadCodeLength, \
WrongChecksum
class Ean13TestCase(TestCase):
def setUp(self):
self.valid_codes = (
'0000000000000',
'1111111111116',
'2222222222222',
'3333333333338',
'4444444444444',
'5555555555550',
'6666666666666',
'7777777777772',
'8888888888888',
'9999999999994',
)
def test_null_or_empty_code_raises_error(self):
""" Checks if it's pickeable by writing it into a temporary file. """
for code in (None, ''):
for checksum in (True, False):
with self.assertRaises(EmptyCode):
Ean13(code, checksum)
def test_at_least_one_no_digit_raises_error(self):
no_digits = 'agz_¿?`.<´ñ*'
for item in no_digits:
for checksum in (True, False):
for code in self.valid_codes:
for i in range(1, len(code)):
wrong_code = code[i:] + item + code[i + 1:]
with self.assertRaises(CharacterNotAllowed):
Ean13(wrong_code, checksum)
def test_wrong_length_raises_error(self):
# Smaller
for i in range(1, 12):
for checksum in (True, False):
with self.assertRaises(BadCodeLength):
Ean13('0' * i, checksum)
# Smaller when checksum is True
with self.assertRaises(BadCodeLength):
Ean13('0' * 12, True)
# Greater when checksum is False
with self.assertRaises(BadCodeLength):
Ean13('0' * 13, False)
# Greater
for i in range(14, 100, 10):
for checksum in (True, False):
with self.assertRaises(BadCodeLength):
Ean13('0' * i, checksum)
def test_wrong_checksum_raises_error(self):
for valid_code in self.valid_codes:
code_12, checksum = valid_code[:-1], valid_code[-1:]
for i in range(10):
if str(i) != checksum:
with self.assertRaises(WrongChecksum):
Ean13(code_12 + str(i))
def test_checksums_are_computed_correctly(self):
for valid_code in self.valid_codes:
code_12, valid_checksum = valid_code[:-1], valid_code[-1:]
computed_code = Ean13(code_12, checksum=False)
self.assertEqual(str(computed_code), valid_code)
def test_valid_codes_create_a_correct_object(self):
for valid_code in self.valid_codes:
computed_code = Ean13(valid_code)
self.assertEqual(str(computed_code), valid_code)
| 36.960526 | 79 | 0.57209 | 2,650 | 0.94239 | 0 | 0 | 0 | 0 | 0 | 0 | 331 | 0.11771 |
d164f315ded6b300d4bc413ca69bcb22b80fe89a | 1,479 | py | Python | storage_cost_analysis.py | akosfenyvesi/FobSimApp | 27e48dfbc5176a8f91cf30b2a1fdf7a181b56968 | [
"CC0-1.0"
] | null | null | null | storage_cost_analysis.py | akosfenyvesi/FobSimApp | 27e48dfbc5176a8f91cf30b2a1fdf7a181b56968 | [
"CC0-1.0"
] | null | null | null | storage_cost_analysis.py | akosfenyvesi/FobSimApp | 27e48dfbc5176a8f91cf30b2a1fdf7a181b56968 | [
"CC0-1.0"
] | null | null | null | import os
from pandas import DataFrame
import time
times = [0]
sizes = [0]
run_time_seconds = 200
def run_storage_analysis():
path = 'temporary'
# initialize the size\
total_size = 0
# use the walk() method to navigate through directory tree
for dirpath, dirnames, filenames in os.walk(path):
for name in filenames:
while True:
try:
# use join to concatenate all the components of path
f = os.path.join(dirpath, name)
# use getsize to generate size in bytes and add it to the total size
total_size += os.path.getsize(f)
break
except Exception as e:
time.sleep(0.01)
return total_size
def upload_analysis():
df = DataFrame({'Time': times, 'Size (bytes)': sizes})
df.to_excel('Storage_analysis.xlsx', sheet_name='sheet1', index=False)
past_run_file_size = run_storage_analysis()
sizes[-1] = past_run_file_size
print("Storage analysis started.")
while True:
current_file_size = run_storage_analysis()
if current_file_size == sizes[-1]:
time.sleep(1)
else:
for i in range(run_time_seconds):
times.append(times[-1] + 1)
sizes.append(current_file_size)
time.sleep(1)
current_file_size = run_storage_analysis()
upload_analysis()
break
| 30.183673 | 89 | 0.586207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.198107 |
d1658eb7471fa12b2945fd99872da777877398b1 | 10,163 | py | Python | conda_forge_tick/mamba_solver.py | Prodyte/cf-scripts | 1800432178f158d824b17faef9ed70d557a06c8d | [
"MIT"
] | null | null | null | conda_forge_tick/mamba_solver.py | Prodyte/cf-scripts | 1800432178f158d824b17faef9ed70d557a06c8d | [
"MIT"
] | null | null | null | conda_forge_tick/mamba_solver.py | Prodyte/cf-scripts | 1800432178f158d824b17faef9ed70d557a06c8d | [
"MIT"
] | null | null | null | """This module has code to use mamba to test if a given package can be solved.
The basic workflow is for yaml file in .ci_support
1. run the conda_build api to render the recipe
2. pull out the host/build and run requirements, possibly for more than one output.
3. send them to mamba to check if they can be solved.
Most of the code here is due to @wolfv in this gist,
https://gist.github.com/wolfv/cd12bd4a448c77ff02368e97ffdf495a.
"""
import os
import logging
import glob
import functools
import pprint
from ruamel.yaml import YAML
from conda.models.match_spec import MatchSpec
from conda.models.channel import Channel
from conda.core.index import calculate_channel_urls, check_whitelist
from conda.core.subdir_data import cache_fn_url, create_cache_dir
import conda_build.api
from mamba import mamba_api as api
logger = logging.getLogger("conda_forge_tick.mamba_solver")
# these characters are start requirements that do not need to be munged from
# 1.1 to 1.1.*
REQ_START = ["!=", "==", ">", "<", ">=", "<="]
def _munge_req_star(req):
reqs = []
# now we split on ',' and '|'
# once we have all of the parts, we then munge the star
csplit = req.split(",")
ncs = len(csplit)
for ic, p in enumerate(csplit):
psplit = p.split("|")
nps = len(psplit)
for ip, pp in enumerate(psplit):
# clear white space
pp = pp.strip()
# finally add the star if we need it
if any(pp.startswith(__v) for __v in REQ_START) or "*" in pp:
reqs.append(pp)
else:
if pp.startswith("="):
pp = pp[1:]
reqs.append(pp + ".*")
# add | back on the way out
if ip != nps - 1:
reqs.append("|")
# add , back on the way out
if ic != ncs - 1:
reqs.append(",")
# put it all together
return "".join(reqs)
def _norm_spec(myspec):
m = MatchSpec(myspec)
# this code looks like MatchSpec.conda_build_form() but munges stars in the
# middle
parts = [m.get_exact_value("name")]
version = m.get_raw_value("version")
build = m.get_raw_value("build")
if build and not version:
raise RuntimeError("spec '%s' has build but not version!" % myspec)
if version:
parts.append(_munge_req_star(m.version.spec_str))
if build:
parts.append(build)
return " ".join(parts)
def get_index(
channel_urls=(),
prepend=True,
platform=None,
use_local=False,
use_cache=False,
unknown=None,
prefix=None,
repodata_fn="repodata.json",
):
"""Get an index?
Function from @wolfv here:
https://gist.github.com/wolfv/cd12bd4a448c77ff02368e97ffdf495a.
"""
real_urls = calculate_channel_urls(channel_urls, prepend, platform, use_local)
check_whitelist(real_urls)
dlist = api.DownloadTargetList()
index = []
for idx, url in enumerate(real_urls):
channel = Channel(url)
full_url = channel.url(with_credentials=True) + "/" + repodata_fn
full_path_cache = os.path.join(
create_cache_dir(), cache_fn_url(full_url, repodata_fn),
)
sd = api.SubdirData(
channel.name + "/" + channel.subdir, full_url, full_path_cache,
)
sd.load()
index.append((sd, channel))
dlist.add(sd)
is_downloaded = dlist.download(True)
if not is_downloaded:
raise RuntimeError("Error downloading repodata.")
return index
class MambaSolver:
"""Run the mamba solver.
Parameters
----------
channels : list of str
A list of the channels (e.g., `[conda-forge/linux-64]`, etc.)
Example
-------
>>> solver = MambaSolver(['conda-forge/linux-64', 'conda-forge/noarch'])
>>> solver.solve(["xtensor 0.18"])
"""
def __init__(self, channels, platform):
self.channels = channels
self.platform = platform
index = get_index(channels, platform=platform)
self.pool = api.Pool()
self.repos = []
priority = 0
subpriority = 0 # wrong! :)
for subdir, channel in index:
repo = api.Repo(
self.pool,
str(channel),
subdir.cache_path(),
channel.url(with_credentials=True),
)
repo.set_priority(priority, subpriority)
self.repos.append(repo)
def solve(self, specs):
"""Solve given a set of specs.
Parameters
----------
specs : list of str
A list of package specs. You can use `conda.models.match_spec.MatchSpec`
to get them to the right form by calling
`MatchSpec(mypec).conda_build_form()`
Returns
-------
solvable : bool
True if the set of specs has a solution, False otherwise.
"""
solver_options = [(api.SOLVER_FLAG_ALLOW_DOWNGRADE, 1)]
solver = api.Solver(self.pool, solver_options)
_specs = [_norm_spec(s) for s in specs]
solver.add_jobs(_specs, api.SOLVER_INSTALL)
success = solver.solve()
if not success:
logger.warning(
"MAMBA failed to solve specs \n\n%s\n\nfor channels "
"\n\n%s\n\nThe reported errors are:\n\n%s",
pprint.pformat(_specs),
pprint.pformat(self.channels),
solver.problems_to_str(),
)
return success
@functools.lru_cache(maxsize=32)
def _mamba_factory(channels, platform):
return MambaSolver(list(channels), platform)
def is_recipe_solvable(feedstock_dir):
"""Compute if a recipe is solvable.
We look through each of the conda build configs in the feedstock
.ci_support dir and test each ones host and run requirements.
The final result is a logical AND of all of the results for each CI
support config.
Parameters
----------
feedstock_dir : str
The directory of the feedstock.
Returns
-------
solvable : bool
The logical AND of the solvability of the recipe on all platforms
in the CI scripts.
"""
cbcs = sorted(glob.glob(os.path.join(feedstock_dir, ".ci_support", "*.yaml")))
if len(cbcs) == 0:
logger.warning(
"No `.ci_support/*.yaml` files found! This can happen when a rerender "
"results in no builds for a recipe (e.g., a recipe is python 2.7 only). "
"This attempted migration is being reported as not solvable.",
)
return False
if not os.path.exists(os.path.join(feedstock_dir, "recipe", "meta.yaml")):
logger.warning(
"No `recipe/meta.yaml` file found! This issue is quite weird and "
"someone should investigate!",
)
return False
solvable = True
for cbc_fname in cbcs:
# we need to extract the platform (e.g., osx, linux) and arch (e.g., 64, aarm64)
# conda smithy forms a string that is
#
# {{ platform }} if arch == 64
# {{ platform }}_{{ arch }} if arch != 64
#
# Thus we undo that munging here.
_parts = os.path.basename(cbc_fname).split("_")
platform = _parts[0]
arch = _parts[1]
if arch not in ["32", "aarch64", "ppc64le", "armv7l"]:
arch = "64"
solvable &= _is_recipe_solvable_on_platform(
os.path.join(feedstock_dir, "recipe"), cbc_fname, platform, arch,
)
return solvable
def _clean_reqs(reqs, names):
return [r for r in reqs if not any(r.split(" ")[0] == nm for nm in names)]
def _is_recipe_solvable_on_platform(recipe_dir, cbc_path, platform, arch):
# parse the channel sources from the CBC
parser = YAML(typ="jinja2")
parser.indent(mapping=2, sequence=4, offset=2)
parser.width = 320
with open(cbc_path, "r") as fp:
cbc_cfg = parser.load(fp.read())
if "channel_sources" in cbc_cfg:
channel_sources = cbc_cfg["channel_sources"][0].split(",")
else:
channel_sources = ["conda-forge", "defaults", "msys2"]
if "msys2" not in channel_sources:
channel_sources.append("msys2")
logger.debug(
"MAMBA: using channels %s on platform-arch %s-%s",
channel_sources,
platform,
arch,
)
# here we extract the conda build config in roughly the same way that
# it would be used in a real build
config = conda_build.config.get_or_merge_config(
None, platform=platform, arch=arch, variant_config_files=[cbc_path],
)
cbc, _ = conda_build.variants.get_package_combined_spec(recipe_dir, config=config)
# now we render the meta.yaml into an actual recipe
metas = conda_build.api.render(
recipe_dir,
platform=platform,
arch=arch,
ignore_system_variants=True,
variants=cbc,
permit_undefined_jinja=True,
finalize=False,
bypass_env_check=True,
channel_urls=channel_sources,
)
# now we loop through each one and check if we can solve it
# we check run and host and ignore the rest
mamba_solver = _mamba_factory(tuple(channel_sources), f"{platform}-{arch}")
solvable = True
outnames = [m.name() for m, _, _ in metas]
for m, _, _ in metas:
build_req = m.get_value("requirements/build", [])
if build_req:
build_req = _clean_reqs(build_req, outnames)
solvable &= mamba_solver.solve(build_req)
host_req = m.get_value("requirements/host", [])
if host_req:
host_req = _clean_reqs(host_req, outnames)
solvable &= mamba_solver.solve(host_req)
run_req = m.get_value("requirements/run", [])
run_req = _clean_reqs(run_req, outnames)
solvable &= mamba_solver.solve(run_req)
tst_req = (
m.get_value("test/requires", [])
+ m.get_value("test/requirements", [])
+ run_req
)
tst_req = _clean_reqs(tst_req, outnames)
solvable &= mamba_solver.solve(tst_req)
return solvable
| 29.372832 | 88 | 0.60858 | 1,987 | 0.195513 | 0 | 0 | 121 | 0.011906 | 0 | 0 | 3,644 | 0.358556 |
d16648848c84b4c377e73350d46f1aaa0e9b2444 | 649 | py | Python | student_core/urls.py | michaelchen-lab/LMS_Backend | f8727398c66b94926e625ebd194e8330481727eb | [
"MIT"
] | null | null | null | student_core/urls.py | michaelchen-lab/LMS_Backend | f8727398c66b94926e625ebd194e8330481727eb | [
"MIT"
] | null | null | null | student_core/urls.py | michaelchen-lab/LMS_Backend | f8727398c66b94926e625ebd194e8330481727eb | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from student_core.views import *
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r'initial', StudentInitialViewSet, basename="student_initial")
router.register(r'submission', StudentSubmissionViewSet, basename="student_submission")
router.register(r'submission_status', StudentSubmissionStatusViewSet, basename="student_submission_status")
# The API URLs are now determined automatically by the router.
urlpatterns = [
path('leaderboard', Leaderboard, name="leaderboard"),
path('', include(router.urls))
]
| 40.5625 | 107 | 0.802773 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.383667 |
d1680b983b55af635dd1b1c4efc3a00f490e8be1 | 10,276 | py | Python | core/database/generator.py | xorond/l0l | bb0c2bb23fc49997b695cf27d2b2b25169395521 | [
"WTFPL"
] | 6 | 2018-10-29T19:46:49.000Z | 2022-03-10T15:39:47.000Z | core/database/generator.py | xorond/l0l | bb0c2bb23fc49997b695cf27d2b2b25169395521 | [
"WTFPL"
] | null | null | null | core/database/generator.py | xorond/l0l | bb0c2bb23fc49997b695cf27d2b2b25169395521 | [
"WTFPL"
] | 4 | 2018-10-16T13:28:27.000Z | 2022-02-05T18:43:57.000Z | #------------------Bombermans Team---------------------------------#
#Author : B3mB4m
#Concat : b3mb4m@protonmail.com
#Project : https://github.com/b3mb4m/Shellsploit
#LICENSE : https://github.com/b3mb4m/Shellsploit/blob/master/LICENSE
#------------------------------------------------------------------#
def generator( choose, shellcode, argv="None", argv2="None"):
if choose == "linux_x86":
if shellcode == "bin_sh":
from Linux86.bin_shx86 import bin_shx86
return bin_shx86()
elif shellcode == "exec":
from Linux86.execc import execc
return execc( argv)
elif shellcode == "read":
from Linux86.readfilex86 import readx86
from stackconvert import stackconvertSTR
return readx86( stackconvertSTR(argv))
elif shellcode == "download&exec":
from Linux86.download import downloadANDexecute
from stackconvert import stackconvertSTR
filename = argv.split("/")[-1]
return downloadANDexecute( stackconvertSTR(argv), stackconvertSTR(filename))
elif shellcode == "chmod":
from Linux86.chmod import ch
from stackconvert import stackconvertSTR
return ch( stackconvertSTR(argv))
elif shellcode == "tcp_bind":
from Linux86.tcp_bindx86 import tcp_bindx86
from stackconvert import PORT
return tcp_bindx86( PORT(argv))
elif shellcode == "reverse_tcp":
from Linux86.reverse_tcpx86 import reverse_tcpx86
from stackconvert import IP
from stackconvert import PORT
return reverse_tcpx86( IP(argv), PORT(argv2))
elif shellcode == "cd_eject":
from Linux86.cd_eject import cd_eject
return cd_eject()
elif choose == "linux_x64":
if shellcode == "bin_sh":
from Linux64.bin_shx64 import bin_shx64
return bin_shx64()
elif shellcode == "tcp_bind":
from Linux64.tcp_bindx64 import tcp_bindx64
from stackconvert import PORT
return tcp_bindx64( PORT(argv))
elif shellcode == "reverse_tcp":
from Linux64.reverse_tcpx64 import reverse_tcpx64
from stackconvert import IP
from stackconvert import PORT
return reverse_tcpx64( IP(argv), PORT(argv2))
elif shellcode == "read":
from Linux64.readfilex64 import readx64
from stackconvert import plaintext
return readx64( plaintext(argv))
elif choose == "linux":
from Linux.magic import merlin
if shellcode == "bin_sh":
from Linux86.bin_shx86 import bin_shx86
from Linux64.bin_shx64 import bin_shx64
value = hex(len(bin_shx86().split("\\x"))-1)[2:]
value = "\\x{0}".format(value)
return merlin( value)+bin_shx86()+bin_shx64()
elif shellcode == "read":
from Linux86.readfilex86 import readx86
from Linux64.readfilex64 import readx64
from stackconvert import stackconvertSTR
from stackconvert import plaintext
value = hex(len(readx86( stackconvertSTR(argv)).split("\\x"))-1)[2:]
value = "\\x{0}".format(value)
return merlin( value)+readx86( stackconvertSTR(argv))+readx64( plaintext(argv))
elif shellcode == "reverse_tcp":
from Linux64.reverse_tcpx64 import reverse_tcpx64
from Linux86.reverse_tcpx86 import reverse_tcpx86
from stackconvert import IP
from stackconvert import PORT
value = hex(len(reverse_tcpx86( IP(argv), PORT(argv2)).split("\\x"))-1)[2:]
value = "\\x{0}".format(value)
return merlin( value)+reverse_tcpx86( IP(argv), PORT(argv2))+reverse_tcpx64( IP(argv), PORT(argv2))
elif shellcode == "tcp_bind":
from Linux64.tcp_bindx64 import tcp_bindx64
from Linux86.tcp_bindx86 import tcp_bindx86
from stackconvert import PORT
value = hex(len(tcp_bindx86( PORT(argv)).split("\\x"))-1)[2:]
value = "\\x{0}".format(value)
return merlin( value)+tcp_bindx86( PORT(argv))+tcp_bindx64( PORT(argv))
elif choose == "osx86":
if shellcode == "tcp_bind":
from OSX86.tcp_bind import tcp_bind
from stackconvert import PORT
return tcp_bind( PORT(argv))
elif shellcode == "bin_sh":
from OSX86.bin_sh import bin_sh
return bin_sh()
elif shellcode == "reverse_tcp":
from OSX86.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv), PORT(argv2))
elif choose == "osx64":
if shellcode == "bin_sh":
from OSX64.bin_sh import bin_sh
return bin_sh()
elif shellcode == "reverse_tcp":
from OSX64.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv), PORT(argv2))
elif shellcode == "tcp_bind":
from OSX64.tcp_bind import tcp_bind
from stackconvert import PORT
return tcp_bind( PORT(argv))
elif choose == "freebsd_x86":
if shellcode == "bin_sh":
from FreeBSDx86.bin_sh import bin_sh
return bin_sh()
elif shellcode == "read":
from FreeBSDx86.read import read
from stackconvert import plaintext
return read(plaintext(argv))
elif shellcode == "reverse_tcp":
from FreeBSDx86.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv2), PORT(argv))
elif shellcode == "reverse_tcp2":
from FreeBSDx86.reverse_tcp2 import reverse_tcp2
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp2( IP(argv2), PORT(argv))
elif shellcode == "tcp_bind":
from FreeBSDx86.tcp_bind import tcp_bind
if len(str(argv)) == 5:
PORT = "\\x{0}\\x{1}".format(*(hex(int(argv))[2:][0:2],hex(int(argv))[2:][2:]))
else:
PORT = "\\x{0}\\x{1}".format(*("0"+hex(int(argv))[2:][0],hex(int(argv))[2:][1:]))
return tcp_bind( PORT)
elif shellcode == "exec":
from FreeBSDx86.execc import execc
from stackconvert import plaintext
command = '/bin/sh -c {0}'.format(argv)
return execc(plaintext(argv))
elif choose == "freebsd_x64":
if shellcode == "bin_sh":
from FreeBSDx64.bin_sh import bin_sh
return bin_sh()
elif shellcode == "exec":
from FreeBSDx64.execc import execc
from stackconvert import plaintext
command = '/bin/sh -c {0}'.format(argv)
return execc(plaintext(argv))
elif shellcode == "tcp_bind":
from stackconvert import plaintext
from stackconvert import PORT
from FreeBSDx64.tcp_bind import tcp_bind
return tcp_bind( PORT(argv), plaintext(argv2))
elif shellcode == "reverse_tcp":
from FreeBSDx64.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv), PORT(argv2))
elif choose == "linux_arm":
if shellcode == "chmod":
from LinuxARM.chmod import chmod
from stackconvert import plaintext
if argv == "None":
return "FILE PATH must be declared."
else:
return chmod( plaintext(argv))
elif shellcode == "bin_sh":
from LinuxARM.bin_sh import bin_sh
return bin_sh()
elif shellcode == "exec":
from LinuxARM.execc import execc
return execc( argv)
elif shellcode == "reverse_tcp":
from LinuxARM.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv2), PORT(argv))
elif choose == "linux_mips":
if shellcode == "reverse_tcp":
from LinuxMIPS.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
return reverse_tcp( IP(argv), PORT(argv2))
elif shellcode == "bin_sh":
from LinuxMIPS.bin_sh import bin_sh
return bin_sh()
elif shellcode == "chmod":
from LinuxMIPS.chmod import chmod
from stackconvert import plaintext
return chmod( plaintext(argv))
elif shellcode == "tcp_bind":
from LinuxMIPS.tcp_bind import tcp_bind
from stackconvert import PORT
return tcp_bind( PORT(argv))
elif choose == "windows":
if shellcode == "messagebox":
from Windows import messagebox
from stackconvert import stackconvertSTR
if argv == "None":
return messagebox.messagebox( False)
else:
return messagebox.messagebox( stackconvertSTR(argv, True))
elif shellcode == "downloadandexecute":
from Windows.downloadandexecute import downANDexecute
from stackconvert import rawSTR
from stackconvert import stackconvertSTR
if argv2 == "None":
argv2 = argv.split("/")[-1]
powershell = '''powershell -command "& { (New-Object Net.WebClient).DownloadFile('%s', '%s') ;(New-Object -com Shell.Application).ShellExecute('%s');}"''' % (argv, argv2, argv2)
return downANDexecute(payload=stackconvertSTR(powershell))
elif shellcode == "exec":
from Windows.execc import WinExec
return WinExec(argv)
elif shellcode == "tcp_bind":
from Windows.bind_tcp import PayloadModule
return PayloadModule( argv).gen_shellcode()
elif shellcode == "reverse_tcp":
from Windows.rev_tcp import PayloadModule
return PayloadModule( argv, argv2).gen_shellcode()
elif choose == "solarisx86":
if shellcode == "read":
from Solarisx86.read import read
from stackconvert import plaintext
return read( plaintext(argv))
elif shellcode == "reverse_tcp":
from Solarisx86.reverse_tcp import reverse_tcp
from stackconvert import IP
from stackconvert import PORT
#return reverse_tcp(host=IP(argv), port=PORT(argv2))
dombili = IP(argv)
kocakari = PORT(argv2)
return reverse_tcp(host=dombili, port=kocakari)
elif shellcode == "bin_sh":
from Solarisx86.bin_sh import bin_sh
return bin_sh()
elif shellcode == "tcp_bind":
from Solarisx86.tcp_bind import tcp_bind
from stackconvert import PORT
return tcp_bind( PORT(argv))
| 34.139535 | 201 | 0.647042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,262 | 0.12281 |
d168739d8cc490f771d23c7a1b691bf5116d7173 | 430 | py | Python | bugtests/test286c.py | doom38/jython_v2.2.1 | 0803a0c953c294e6d14f9fc7d08edf6a3e630a15 | [
"CNRI-Jython"
] | null | null | null | bugtests/test286c.py | doom38/jython_v2.2.1 | 0803a0c953c294e6d14f9fc7d08edf6a3e630a15 | [
"CNRI-Jython"
] | null | null | null | bugtests/test286c.py | doom38/jython_v2.2.1 | 0803a0c953c294e6d14f9fc7d08edf6a3e630a15 | [
"CNRI-Jython"
] | null | null | null | """
Test multilevel overriding of java methods in jythonc.
"""
from java.util import Date
class SubDate(Date):
def toString(self):
s = Date.toString(self)
return 'SubDate -> Date'
class SubSubDate(SubDate):
def toString(self):
return 'SubSubDate -> ' + SubDate.toString(self)
assert SubDate().toString() == 'SubDate -> Date'
assert SubSubDate().toString() == 'SubSubDate -> SubDate -> Date'
| 23.888889 | 65 | 0.655814 | 217 | 0.504651 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.332558 |
d168c674a46ffa65b77a20654f78a65b574815fc | 2,195 | py | Python | segmentation/utils/converter.py | enjoy-the-science/brain-texts | 2f90cff6b7efd610791b278579c62ba802eb0f02 | [
"MIT"
] | null | null | null | segmentation/utils/converter.py | enjoy-the-science/brain-texts | 2f90cff6b7efd610791b278579c62ba802eb0f02 | [
"MIT"
] | null | null | null | segmentation/utils/converter.py | enjoy-the-science/brain-texts | 2f90cff6b7efd610791b278579c62ba802eb0f02 | [
"MIT"
] | null | null | null | import warnings
import sys
if not sys.warnoptions:
warnings.simplefilter("ignore")
from segmentation.utils.DataReader import DataReader
import argparse
import glob
import os
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=str,
help="Path to folder with patients")
parser.add_argument("-o", "--output", type=str,
help="Path to output folder")
parser.add_argument("-a", "--aug", type=int, default=3,
help="Count augmented images per slice. Default: 2")
parser.add_argument("-he", "--height", type=int, default=224,
help="Height of output slices. Default: 224")
parser.add_argument("-wi", "--width", type=int, default=224,
help="Width of output slices. Default: 224")
return parser.parse_args()
def get_orig_mask_filenames_from_patient_directory(patient_path):
mask_filename = ""
orig_filename = ""
for i in glob.glob1(patient_path, "*.mhd"):
lower_i = i.lower()
if "label" in lower_i:
mask_filename = os.path.join(patient_path, i)
if "flair" in lower_i:
orig_filename = os.path.join(patient_path, i)
return orig_filename, mask_filename
if __name__ == '__main__':
opt = arguments()
datapath = r"/data/brain/rs-mhd-dataset" # opt.input
path_save = r"/data/brain/rs-mhd-dataset-augmented" # opt.output
patients = glob.glob1(datapath, "**")
template_orig = 'sub-%s_ses-NFB3_T1w.nii.gz'
template_mask = 'sub-%s_ses-NFB3_T1w_brainmask.nii.gz'
height = 224 #opt.height
width = 224 #opt.width
aug_size = 3 # opt.aug
reader = DataReader((height, width), False)
for patient in patients:
if patient == "AR-5":
continue
patient_path = os.path.join(datapath, patient)
if not os.path.isdir(patient_path):
continue
print("Patient: ", patient)
orig_filename, mask_filename = get_orig_mask_filenames_from_patient_directory(patient_path)
reader.save_to_npy(path_save, patient, orig_filename, mask_filename, aug_size)
| 30.486111 | 99 | 0.638269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 494 | 0.225057 |
d16a18b5e5a64b815eb735cb177e88912486769f | 1,020 | py | Python | bomber/views.py | acdh-oeaw/DAAC-DB | e1332db708bb6f5bfe5f202e6ae7e04bf4b593b3 | [
"MIT"
] | null | null | null | bomber/views.py | acdh-oeaw/DAAC-DB | e1332db708bb6f5bfe5f202e6ae7e04bf4b593b3 | [
"MIT"
] | null | null | null | bomber/views.py | acdh-oeaw/DAAC-DB | e1332db708bb6f5bfe5f202e6ae7e04bf4b593b3 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django_tables2 import RequestConfig
from django.views.generic.detail import DetailView
from django.db.models import Count
from crew.models import Person
from .models import Bomber
from .tables import BomberTable
def bomber(request):
table = BomberTable(Bomber.objects.all())
RequestConfig(request).configure(table)
object_list = Bomber.objects.all()
return render(request, 'bomber/list_bomber.html', {'table': table, 'object_list': object_list})
class BomberDetailView(DetailView):
model = Bomber
def get_context_data(self, **kwargs):
context = super(BomberDetailView, self).get_context_data(**kwargs)
current_object = self.object
context['destiny'] = Person.objects.filter(bomber=current_object.id).values('destiny_checked').annotate(total=Count('destiny_checked')).order_by('destiny_checked')
context['crew_list'] = Person.objects.filter(bomber=current_object.id).order_by('destiny_checked')
return context
| 37.777778 | 171 | 0.755882 | 511 | 0.50098 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.130392 |
d16ceb896d9da20d75ec5f24a103e4ea1f377294 | 187 | py | Python | BOJ/14000~14999/14471.py | shinkeonkim/today-ps | f3e5e38c5215f19579bb0422f303a9c18c626afa | [
"Apache-2.0"
] | 2 | 2020-01-29T06:54:41.000Z | 2021-11-07T13:23:27.000Z | BOJ/14000~14999/14471.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | BOJ/14000~14999/14471.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | n,m=map(int,input().split())
L = [list(map(int,input().split())) for i in range(m)]
ans = 0
L.sort(key = lambda t:t[0],reverse = True)
for i in L[:-1]:
ans += max(0,n-i[0])
print(ans) | 26.714286 | 54 | 0.582888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d16d1cb99b4e9f346c6042010a46e200ea4ee6ee | 343 | py | Python | libra/discovery_set.py | Xing-Huang/libra-client | bf74bc66b98a279476d751b637b1f84da84a51fe | [
"MIT"
] | null | null | null | libra/discovery_set.py | Xing-Huang/libra-client | bf74bc66b98a279476d751b637b1f84da84a51fe | [
"MIT"
] | null | null | null | libra/discovery_set.py | Xing-Huang/libra-client | bf74bc66b98a279476d751b637b1f84da84a51fe | [
"MIT"
] | null | null | null | from canoser import Struct
from libra.account_config import AccountConfig
from libra.event import EventKey
class DiscoverySet(Struct):
_fields = []
DISCOVERY_SET_STRUCT_NAME = "DiscoverySet"
@classmethod
def change_event_key(cls):
return EventKey.new_from_address(AccountConfig.discovery_set_address(), 2) | 28.583333 | 82 | 0.752187 | 231 | 0.673469 | 0 | 0 | 128 | 0.373178 | 0 | 0 | 14 | 0.040816 |
d16e384cf387a664a33b991f18c0766cbc5a4c0d | 4,294 | py | Python | dev_tools/scan_inclusions.py | frannuca/quantlib | 63e66f5f767397e5b7c79fa78eaed4e3e0a6b7c6 | [
"BSD-3-Clause"
] | null | null | null | dev_tools/scan_inclusions.py | frannuca/quantlib | 63e66f5f767397e5b7c79fa78eaed4e3e0a6b7c6 | [
"BSD-3-Clause"
] | null | null | null | dev_tools/scan_inclusions.py | frannuca/quantlib | 63e66f5f767397e5b7c79fa78eaed4e3e0a6b7c6 | [
"BSD-3-Clause"
] | 1 | 2022-02-24T04:54:18.000Z | 2022-02-24T04:54:18.000Z | import os, sys, re, string
import xml.dom.minidom
import xml.dom.ext
QL_ROOT = "C:/Projects/QuantLibSVN/trunk/"
VC8 = "C:/Program Files/Microsoft Visual Studio 8/"
BOOST = "C:/Boost/boost_1_33_1/"
QL = QL_ROOT +"QuantLib/"
QL_ADDIN = QL_ROOT + "QuantLibAddin/"
OBJECT_HANDLER = QL_ROOT + "ObjectHandler/"
QL_XL = QL_ROOT + "QuantLibXL/"
STD = VC8 + "VC/include/"
SDK = VC8 + "VC/PlatformSDK/Include"
INCLUDE_PATH = [QL, QL_ADDIN, OBJECT_HANDLER, QL_XL, BOOST, STD, SDK]
PREFIX_PATH = ["ql", "qlo", "oh", "boost", "qlxl", "ohxl", "xlsdk"]
class MyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def searchAndParseHeaderFile(fileName):
for includePath in INCLUDE_PATH:
filePath = includePath + fileName[0].lower() + fileName[1:]
if os.path.isfile(filePath):
return parseHeaderFile(filePath)
filePath = includePath + fileName[0].upper() + fileName[1:]
if os.path.isfile(filePath):
return parseHeaderFile(filePath)
raise MyError("searchAndParseHeaderFile: " + fileName + " not found")
def getFilePrefix(include):
for prefix in PREFIX_PATH:
if re.match(prefix + '/.*',include):
return prefix
return "std"
def parseHeaderFile(filePath):
includes = []
nbLines = 0
f=open(filePath)
for line in f:
nbLines +=1
if not re.match("//", line):
includesLines = re.findall('^#include.*<.*>', line)
if includesLines:
includeName = re.findall('<.*>', includesLines[0])[0][1:-1]
includes.append(includeName)
f.close()
return includes, nbLines
def walkThroughIncludesFiles(fileName, files, filesCounters, node, document):
new = document.createElement('header')
node.appendChild(new)
parsingResults = searchAndParseHeaderFile(fileName)
includes = parsingResults[0]
attribute = "%i" % parsingResults[1]
new.setAttribute('nbLines', attribute)
nbLines = parsingResults[1]
for include in includes:
#if the son is not recorded yet we explore it
include = "%s" % include
if not files.count(include) > 0:
files.append(include)
try:
prefix = getFilePrefix(include)
filesCounters[prefix][0] +=1
result = walkThroughIncludesFiles(include, files, filesCounters, new, document)
nbLines += result[0]
filesCounters[prefix][1] += result[1]
except MyError, e:
print e.value, " in : " + fileName
attribute = "%i" % nbLines
new.setAttribute('total', attribute)
new.setAttribute('name', fileName)
return int(nbLines), parsingResults[1]
def trackDependencies(fileName):
document = xml.dom.minidom.Document()
filesCounters = {}
filesCounters["boost"] = [0,0]
filesCounters["ql"] = [0,0]
filesCounters["qlo"] = [0,0]
filesCounters["qlxl"] = [0,0]
filesCounters["oh"] = [0,0]
filesCounters["ohxl"] = [0,0]
filesCounters["xlsdk"] = [0,0]
filesCounters["std"] = [0,0]
files = []
files.append(fileName)
nbLines = walkThroughIncludesFiles(fileName, files, filesCounters, document, document)
return filesCounters, document, nbLines, files
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Give the relative path of the file you want to scan (wrt to the included folders)'
sys.exit()
args = sys.argv[1:]
fileName = args[0]
result = trackDependencies(fileName)
nbLinesParsed = result[2][0]
print "number of files parsed ", len(result[3])
print "number of lines parsed ", nbLinesParsed
namespaces = result[0]
for namespace in namespaces:
print namespace, ":\tnb Files ", namespaces[namespace][0]
print "\tnb lines ", namespaces[namespace][1]
print "\t%(nbLines)02d" % {'nbLines': float(namespaces[namespace][1])/nbLinesParsed * 100}, "%"
outputName = fileName.replace("/", "-") + ".xml"
output = "./" + outputName
f=open(output, 'w')
xml.dom.ext.PrettyPrint(result[1], f)
f.close()
print "result saved in ", outputName
| 33.811024 | 98 | 0.617839 | 144 | 0.033535 | 0 | 0 | 0 | 0 | 0 | 0 | 686 | 0.159758 |
d16e58dab4d8d43ee2c7010a1953ed764c83accd | 531 | py | Python | python/15_bsearch/bsearch_recursion.py | shipan3452/algo | 0494cc0d8f5daf108daf4358c4531a29279dd380 | [
"Apache-2.0"
] | 22,028 | 2018-09-27T05:55:19.000Z | 2022-03-30T10:44:46.000Z | python/15_bsearch/bsearch_recursion.py | wangjing013/algo | b2c1228ff915287ad7ebeae4355fa26854ea1557 | [
"Apache-2.0"
] | 164 | 2018-10-06T15:11:08.000Z | 2022-03-28T10:04:34.000Z | python/15_bsearch/bsearch_recursion.py | wangjing013/algo | b2c1228ff915287ad7ebeae4355fa26854ea1557 | [
"Apache-2.0"
] | 7,250 | 2018-09-30T00:45:25.000Z | 2022-03-31T20:15:33.000Z | """
Author: dreamkong
"""
from typing import List
def bsearch(nums: List[int], target: int) -> int:
return bsearch_internally(nums, 0, len(nums)-1, target)
def bsearch_internally(nums: List[int], low: int, high: int, target: int) -> int:
if low > high:
return -1
mid = low+int((high-low) >> 2)
if nums[mid] == target:
return mid
elif nums[mid] < target:
return bsearch_internally(nums, mid+1, high, target)
else:
return bsearch_internally(nums, low, mid-1, target)
| 23.086957 | 81 | 0.619586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.054614 |
d16e9d0e26c5e30db9fe137457ef9304c8e4a910 | 5,779 | py | Python | ports/esp32/boards/METERBOARD32/modules/mbus/device.py | henriknelson/micropython | eb6c2bd0f4ac133bcb8edb81fb29aa21ade5211b | [
"MIT"
] | 1 | 2020-01-21T01:49:20.000Z | 2020-01-21T01:49:20.000Z | ports/esp32/boards/METERBOARD32/modules/mbus/device.py | henriknelson/micropython | eb6c2bd0f4ac133bcb8edb81fb29aa21ade5211b | [
"MIT"
] | null | null | null | ports/esp32/boards/METERBOARD32/modules/mbus/device.py | henriknelson/micropython | eb6c2bd0f4ac133bcb8edb81fb29aa21ade5211b | [
"MIT"
] | null | null | null | from mbus.record import ValueRecord
from machine import RTC
import ubinascii
import random
import time
import re
class MBusDevice:
"""Class that encapulates/emulates a single MBus device"""
def __init__(self, primary_address, secondary_address, manufacturer, meter_type):
self._primary_address = primary_address
self._secondary_address = secondary_address
self._manufacturer = manufacturer
self._type = meter_type
self._access_number = random.randint(0,255)
self._records = []
self._rsp_ud2 = []
self._selected = False
self.rtc = RTC()
def get_time(self):
"""Returns the current time, as known by this MBus device"""
return "%02u:%02u:%02u (%d)" % self.rtc.datetime()[4:8]
def select(self):
"""Puts this MBus device in the 'selected' state"""
if not self._selected:
self._selected = True
self.log("device {} is now selected".format(self._secondary_address))
def deselect(self):
"""Puts this MBus device in an 'unselected' state"""
if self._selected:
self._selected = False
self.log("device {} is now deselected".format(self._secondary_address))
def is_selected(self):
"""Returns the current selection state for this MBus device"""
return self._selected
def log(self, message):
print("[{}][debug ] {}".format(self.get_time(),message))
def update(self):
for record in self._records:
record.update()
self.log("Device with ID {} has updated its data".format(self._secondary_address))
self.seal()
def add_record(self,record):
self._records.append(record)
def seal(self):
self._rsp_ud2 = self.get_rsp_ud2()
def get_primary_address(self):
"""Returns the primary address for this MBus device"""
return self._primary_address
def get_secondary_address(self):
"""Returns the secondary address for this MBus device"""
return self._secondary_address
def matches_secondary_address(self,search_string):
"""Returns true if the secondary address of this MBus device matches the provided search string"""
pattern = re.compile(search_string.replace('f','[0-9]'))
if pattern.match(self._secondary_address):
return True
return False
def get_manufacturer_id(self):
"""Returns the manufacturer id for this MBus device"""
return self._manufacturer
def get_type(self):
"""Returns the MBus attribute 'type' for this MBus device"""
return self._type
def get_address_bytes(self):
"""Returns the secondary address for this MBus device, as a byte array"""
resp_bytes = []
resp_bytes.append(self._secondary_address[6])
resp_bytes.append(self._secondary_address[7])
resp_bytes.append(self._secondary_address[4])
resp_bytes.append(self._secondary_address[5])
resp_bytes.append(self._secondary_address[2])
resp_bytes.append(self._secondary_address[3])
resp_bytes.append(self._secondary_address[0])
resp_bytes.append(self._secondary_address[1])
resp_str = []
resp_str.append(resp_bytes[0] + resp_bytes[1])
resp_str.append(resp_bytes[2] + resp_bytes[3])
resp_str.append(resp_bytes[4] + resp_bytes[5])
resp_str.append(resp_bytes[6] + resp_bytes[7])
ret = [x for x in resp_str]
return ret
def get_manufacturer_bytes(self):
"""Returns the manufacturer id for this MBus device, as a byte array"""
manufacturer = self._manufacturer.upper()
id = ((ord(manufacturer[0]) - 64) * 32 * 32 +
(ord(manufacturer[1]) - 64) * 32 +
(ord(manufacturer[2]) - 64))
if 0x0421 <= id <= 0x6b5a:
return self.manufacturer_encode(id, 2)
return False
def manufacturer_encode(self, value, size):
"""Converts a manufacturer id to its byte equivalent"""
if value is None or value == False:
return None
data = []
for i in range(0, size):
data.append((value >> (i * 8)) & 0xFF)
return data
def calculate_checksum(self, message):
"""Calculates the checksum of the provided data"""
return sum([int(x, 16) if type(x) == str else x for x in message]) & 0xFF
def get_latest_values(self):
return self._rsp_ud2
def get_rsp_ud2(self):
"""Generates a RSP_UD2 response message"""
resp_bytes = []
resp_bytes.append(0x68) # start
resp_bytes.append(0xFF) # length
resp_bytes.append(0xFF) # length
resp_bytes.append(0x68) # start
resp_bytes.append(0x08) # C
resp_bytes.append(self._primary_address) # A
resp_bytes.append(0x72) # CI
resp_bytes.extend(self.get_address_bytes())
resp_bytes.extend(self.get_manufacturer_bytes())
resp_bytes.append(0x01) # version
resp_bytes.append(self._type) # medium (heat)
resp_bytes.append(self._access_number) # access no
resp_bytes.append(0x00) # status
resp_bytes.append(0x00) # configuration 1
resp_bytes.append(0x00) # configuration 2
for record in self._records:
resp_bytes.extend(record.get_bytes())
resp_bytes.append(self.calculate_checksum(resp_bytes[4:]))
resp_bytes.append(0x16) # stop
length = len(resp_bytes) - 9 + 3
resp_bytes[1] = length
resp_bytes[2] = length
ret = ["{:>2}".format(hex(x)[2:]).replace(' ', '0') if type(x) == int else x for x in resp_bytes]
if self._access_number < 255:
self._access_number = self._access_number + 1
else:
self._access_number = 1
return ''.join(ret).upper()
| 37.283871 | 106 | 0.643191 | 5,664 | 0.9801 | 0 | 0 | 0 | 0 | 0 | 0 | 1,180 | 0.204188 |
d170c6d1ba1ad41f7fcd3c9b748bcfe597baaf93 | 20,442 | py | Python | alphamind/model/data_preparing.py | rongliang-tech/alpha-mind | 39f720974c637d17e185e445dc05c9fc4863a241 | [
"MIT"
] | 186 | 2017-11-27T01:26:44.000Z | 2022-03-28T16:11:33.000Z | alphamind/model/data_preparing.py | rongliang-tech/alpha-mind | 39f720974c637d17e185e445dc05c9fc4863a241 | [
"MIT"
] | 2 | 2017-12-19T02:47:36.000Z | 2021-01-09T05:25:18.000Z | alphamind/model/data_preparing.py | vishalbelsare/alpha-mind | 9b7a23bc3354103f16e46ea31fd1ba6c7b69e0ae | [
"MIT"
] | 65 | 2017-11-27T01:26:47.000Z | 2022-03-17T10:50:52.000Z | # -*- coding: utf-8 -*-
"""
Created on 2017-8-24
@author: cheng.li
"""
import bisect
import datetime as dt
from typing import Iterable
from typing import Union
import numpy as np
import pandas as pd
from simpleutils.asserts import require
from PyFin.DateUtilities import Period
from PyFin.api import BizDayConventions
from PyFin.api import DateGeneration
from PyFin.api import advanceDateByCalendar
from PyFin.api import makeSchedule
from alphamind.data.engines.sqlengine import SqlEngine
from alphamind.data.engines.sqlengine import total_risk_factors
from alphamind.data.engines.universe import Universe
from alphamind.data.processing import factor_processing
from alphamind.data.transformer import Transformer
from alphamind.utilities import alpha_logger
from alphamind.utilities import map_freq
def _merge_df(engine, names, factor_df, target_df, universe, dates, risk_model, neutralized_risk):
risk_df = engine.fetch_risk_model_range(universe, dates=dates, risk_model=risk_model)[1]
used_neutralized_risk = list(set(total_risk_factors).difference(names))
risk_df = risk_df[['trade_date', 'code'] + used_neutralized_risk].dropna()
target_df = pd.merge(target_df, risk_df, on=['trade_date', 'code']).dropna()
if neutralized_risk:
train_x = pd.merge(factor_df, risk_df, on=['trade_date', 'code'])
train_y = target_df.copy()
risk_exp = train_x[neutralized_risk].values.astype(float)
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
else:
risk_exp = None
train_x = factor_df.copy()
train_y = target_df.copy()
x_values = train_x[names].values.astype(float)
y_values = train_y[['dx']].values
codes = train_x['code'].values
date_label = pd.DatetimeIndex(factor_df.trade_date).to_pydatetime()
dates = np.unique(date_label)
return target_df, dates, date_label, risk_exp, x_values, y_values, train_x, train_y, codes
def prepare_data(engine: SqlEngine,
factors: Union[Transformer, Iterable[object]],
start_date: str,
end_date: str,
frequency: str,
universe: Universe,
benchmark: int,
warm_start: int = 0,
fit_target: Union[Transformer, object] = None):
if warm_start > 0:
p = Period(frequency)
p = Period(length=-warm_start * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', start_date, p).strftime('%Y-%m-%d')
dates = makeSchedule(start_date,
end_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Forward)
dates = [d.strftime('%Y-%m-%d') for d in dates]
horizon = map_freq(frequency)
if isinstance(factors, Transformer):
transformer = factors
else:
transformer = Transformer(factors)
factor_df = engine.fetch_factor_range(universe,
factors=transformer,
dates=dates).sort_values(['trade_date', 'code'])
alpha_logger.info("factor data loading finished")
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
alpha_logger.info("fit target data loading finished")
industry_df = engine.fetch_industry_range(universe, dates=dates)
alpha_logger.info("industry data loading finished")
benchmark_df = engine.fetch_benchmark_range(benchmark, dates=dates)
alpha_logger.info("benchmark data loading finished")
df = pd.merge(factor_df, target_df, on=['trade_date', 'code']).dropna()
df = pd.merge(df, benchmark_df, on=['trade_date', 'code'], how='left')
df = pd.merge(df, industry_df, on=['trade_date', 'code'])
df['weight'] = df['weight'].fillna(0.)
df.dropna(inplace=True)
return dates, df[['trade_date', 'code', 'dx']], df[
['trade_date', 'code', 'weight', 'industry_code', 'industry'] + transformer.names]
def batch_processing(names,
x_values,
y_values,
groups,
group_label,
batch,
risk_exp,
pre_process,
post_process,
codes):
train_x_buckets = {}
train_y_buckets = {}
train_risk_buckets = {}
predict_x_buckets = {}
predict_y_buckets = {}
predict_risk_buckets = {}
predict_codes_bucket = {}
for i, start in enumerate(groups[:-batch]):
end = groups[i + batch]
left_index = bisect.bisect_left(group_label, start)
right_index = bisect.bisect_left(group_label, end)
this_raw_x = x_values[left_index:right_index]
this_raw_y = y_values[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
train_x_buckets[end] = pd.DataFrame(factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process),
columns=names)
train_y_buckets[end] = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
train_risk_buckets[end] = this_risk_exp
left_index = bisect.bisect_right(group_label, start)
right_index = bisect.bisect_right(group_label, end)
sub_dates = group_label[left_index:right_index]
this_raw_x = x_values[left_index:right_index]
this_codes = codes[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
inner_left_index = bisect.bisect_left(sub_dates, end)
inner_right_index = bisect.bisect_right(sub_dates, end)
predict_x_buckets[end] = pd.DataFrame(ne_x[inner_left_index:inner_right_index],
columns=names)
if risk_exp is not None:
predict_risk_buckets[end] = this_risk_exp[inner_left_index:inner_right_index]
else:
predict_risk_buckets = None
predict_codes_bucket[end] = this_codes[inner_left_index:inner_right_index]
this_raw_y = y_values[left_index:right_index]
if len(this_raw_y) > 0:
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
predict_y_buckets[end] = ne_y[inner_left_index:inner_right_index]
return train_x_buckets, \
train_y_buckets, \
train_risk_buckets, \
predict_x_buckets, \
predict_y_buckets, \
predict_risk_buckets, \
predict_codes_bucket
def fetch_data_package(engine: SqlEngine,
alpha_factors: Iterable[object],
start_date: str,
end_date: str,
frequency: str,
universe: Universe,
benchmark: int,
warm_start: int = 0,
batch: int = 1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
fit_target: Union[Transformer, object] = None) -> dict:
alpha_logger.info("Starting data package fetching ...")
transformer = Transformer(alpha_factors)
names = transformer.names
dates, target_df, factor_df = prepare_data(engine,
transformer,
start_date,
end_date,
frequency,
universe,
benchmark,
warm_start + batch,
fit_target=fit_target)
target_df, dates, date_label, risk_exp, x_values, y_values, train_x, train_y, codes = \
_merge_df(engine, names, factor_df, target_df, universe, dates, risk_model,
neutralized_risk)
alpha_logger.info("data merging finished")
target_df['weight'] = train_x['weight']
target_df['industry'] = train_x['industry']
target_df['industry_code'] = train_x['industry_code']
if neutralized_risk:
for i, name in enumerate(neutralized_risk):
target_df.loc[:, name] = risk_exp[:, i]
alpha_logger.info("Loading data is finished")
train_x_buckets, train_y_buckets, train_risk_buckets, predict_x_buckets, predict_y_buckets, predict_risk_buckets, predict_codes_bucket \
= batch_processing(names,
x_values,
y_values,
dates,
date_label,
batch,
risk_exp,
pre_process,
post_process,
codes)
alpha_logger.info("Data processing is finished")
ret = dict()
ret['x_names'] = names
ret['settlement'] = target_df[target_df.trade_date >= start_date]
train_x_buckets = {k: train_x_buckets[k] for k in train_x_buckets if
k.strftime('%Y-%m-%d') >= start_date}
train_y_buckets = {k: train_y_buckets[k] for k in train_y_buckets if
k.strftime('%Y-%m-%d') >= start_date}
train_risk_buckets = {k: train_risk_buckets[k] for k in train_risk_buckets if
k.strftime('%Y-%m-%d') >= start_date}
predict_x_buckets = {k: predict_x_buckets[k] for k in predict_x_buckets if
k.strftime('%Y-%m-%d') >= start_date}
predict_y_buckets = {k: predict_y_buckets[k] for k in predict_y_buckets if
k.strftime('%Y-%m-%d') >= start_date}
if neutralized_risk:
predict_risk_buckets = {k: predict_risk_buckets[k] for k in predict_risk_buckets if
k.strftime('%Y-%m-%d') >= start_date}
else:
predict_risk_buckets = None
predict_codes_bucket = {k: predict_codes_bucket[k] for k in predict_codes_bucket if
k.strftime('%Y-%m-%d') >= start_date}
ret['train'] = {'x': train_x_buckets, 'y': train_y_buckets, 'risk': train_risk_buckets}
ret['predict'] = {'x': predict_x_buckets, 'y': predict_y_buckets, 'risk': predict_risk_buckets,
'code': predict_codes_bucket}
return ret
def fetch_train_phase(engine,
alpha_factors: Union[Transformer, Iterable[object]],
ref_date,
frequency,
universe,
batch=1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
warm_start: int = 0,
fit_target: Union[Transformer, object] = None) -> dict:
if isinstance(alpha_factors, Transformer):
transformer = alpha_factors
else:
transformer = Transformer(alpha_factors)
p = Period(frequency)
p = Period(length=-(warm_start + batch) * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', ref_date, p, BizDayConventions.Following)
dates = makeSchedule(start_date,
ref_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Backward)
horizon = map_freq(frequency)
factor_df = engine.fetch_factor_range(universe, factors=transformer, dates=dates)
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
df = pd.merge(factor_df, target_df, on=['trade_date', 'code']).dropna()
target_df, factor_df = df[['trade_date', 'code', 'dx']], df[
['trade_date', 'code'] + transformer.names]
target_df, dates, date_label, risk_exp, x_values, y_values, _, _, codes = \
_merge_df(engine, transformer.names, factor_df, target_df, universe, dates, risk_model,
neutralized_risk)
if dates[-1] == dt.datetime.strptime(ref_date, '%Y-%m-%d'):
require(len(dates) >= 2, ValueError,
"No previous data for training for the date {0}".format(ref_date))
end = dates[-2]
start = dates[-batch - 1] if batch <= len(dates) - 1 else dates[0]
else:
end = dates[-1]
start = dates[-batch] if batch <= len(dates) else dates[0]
index = (date_label >= start) & (date_label <= end)
this_raw_x = x_values[index]
this_raw_y = y_values[index]
this_code = codes[index]
if risk_exp is not None:
this_risk_exp = risk_exp[index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ret = dict()
ret['x_names'] = transformer.names
ret['train'] = {'x': pd.DataFrame(ne_x, columns=transformer.names), 'y': ne_y,
'code': this_code}
return ret
def fetch_predict_phase(engine,
alpha_factors: Union[Transformer, Iterable[object]],
ref_date,
frequency,
universe,
batch=1,
neutralized_risk: Iterable[str] = None,
risk_model: str = 'short',
pre_process: Iterable[object] = None,
post_process: Iterable[object] = None,
warm_start: int = 0,
fillna: str = None,
fit_target: Union[Transformer, object] = None):
if isinstance(alpha_factors, Transformer):
transformer = alpha_factors
else:
transformer = Transformer(alpha_factors)
p = Period(frequency)
p = Period(length=-(warm_start + batch - 1) * p.length(), units=p.units())
start_date = advanceDateByCalendar('china.sse', ref_date, p, BizDayConventions.Following)
dates = makeSchedule(start_date,
ref_date,
frequency,
calendar='china.sse',
dateRule=BizDayConventions.Following,
dateGenerationRule=DateGeneration.Backward)
horizon = map_freq(frequency)
factor_df = engine.fetch_factor_range(universe, factors=transformer, dates=dates)
if fillna:
factor_df = factor_df.groupby('trade_date').apply(
lambda x: x.fillna(x.median())).reset_index(
drop=True).dropna()
else:
factor_df = factor_df.dropna()
if fit_target is None:
target_df = engine.fetch_dx_return_range(universe, dates=dates, horizon=horizon)
else:
one_more_date = advanceDateByCalendar('china.sse', dates[-1], frequency)
target_df = engine.fetch_factor_range_forward(universe, factors=fit_target,
dates=dates + [one_more_date])
target_df = target_df[target_df.trade_date.isin(dates)]
target_df = target_df.groupby('code').apply(lambda x: x.fillna(method='pad'))
names = transformer.names
if neutralized_risk:
risk_df = engine.fetch_risk_model_range(universe, dates=dates, risk_model=risk_model)[1]
used_neutralized_risk = list(set(neutralized_risk).difference(names))
risk_df = risk_df[['trade_date', 'code'] + used_neutralized_risk].dropna()
train_x = pd.merge(factor_df, risk_df, on=['trade_date', 'code'])
train_x = pd.merge(train_x, target_df, on=['trade_date', 'code'], how='left')
risk_exp = train_x[neutralized_risk].values.astype(float)
else:
train_x = pd.merge(factor_df, target_df, on=['trade_date', 'code'], how='left')
risk_exp = None
train_x.dropna(inplace=True, subset=train_x.columns[:-1])
x_values = train_x[names].values.astype(float)
y_values = train_x[['dx']].values.astype(float)
date_label = pd.DatetimeIndex(train_x.trade_date).to_pydatetime()
dates = np.unique(date_label)
if dates[-1] == dt.datetime.strptime(ref_date, '%Y-%m-%d'):
end = dates[-1]
start = dates[-batch] if batch <= len(dates) else dates[0]
left_index = bisect.bisect_left(date_label, start)
right_index = bisect.bisect_right(date_label, end)
this_raw_x = x_values[left_index:right_index]
this_raw_y = y_values[left_index:right_index]
sub_dates = date_label[left_index:right_index]
if risk_exp is not None:
this_risk_exp = risk_exp[left_index:right_index]
else:
this_risk_exp = None
ne_x = factor_processing(this_raw_x,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
ne_y = factor_processing(this_raw_y,
pre_process=pre_process,
risk_factors=this_risk_exp,
post_process=post_process)
inner_left_index = bisect.bisect_left(sub_dates, end)
inner_right_index = bisect.bisect_right(sub_dates, end)
ne_x = ne_x[inner_left_index:inner_right_index]
ne_y = ne_y[inner_left_index:inner_right_index]
left_index = bisect.bisect_left(date_label, end)
right_index = bisect.bisect_right(date_label, end)
codes = train_x.code.values[left_index:right_index]
else:
ne_x = None
ne_y = None
codes = None
ret = dict()
ret['x_names'] = transformer.names
ret['predict'] = {'x': pd.DataFrame(ne_x, columns=transformer.names, index=codes), 'code': codes,
'y': ne_y.flatten()}
return ret
| 41.54878 | 140 | 0.57964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,208 | 0.059094 |
d170d5f0af719c619aa67b12d13b02a0402cfc23 | 666 | py | Python | AppleFluenza/__main__.py | fxrcha/AppleFluenza | 707f5bd991d11c523e7f363a501deec17b5ef6c3 | [
"MIT"
] | 6 | 2021-02-18T06:45:28.000Z | 2021-02-24T14:59:43.000Z | AppleFluenza/__main__.py | fxrcha/AppleFluenza | 707f5bd991d11c523e7f363a501deec17b5ef6c3 | [
"MIT"
] | null | null | null | AppleFluenza/__main__.py | fxrcha/AppleFluenza | 707f5bd991d11c523e7f363a501deec17b5ef6c3 | [
"MIT"
] | null | null | null | import logging
import sys
from AppleFluenza.bot import auto_load_cogs, bot
from utils.getenv import getenv
from utils.cli import header, option_parser
if __name__ == "__main__":
header()
auto_load_cogs(bot)
optparser = option_parser()
(options, args) = optparser.parse_args(sys.argv)
token = getenv("TOKEN")
if options.debug is not None:
logging.getLogger().setLevel(logging.DEBUG)
bot.logger.info("WARNING: AppleFluenza is now in debug mode.")
token = getenv("TEST_TOKEN")
if options.override is not None:
bot.logger.info("Overriding token.")
token = options.override
bot.run(token)
| 22.2 | 70 | 0.686186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.13964 |
d172045e2991b3f71ea33d94ff2354816c13eea0 | 3,017 | py | Python | tests/utils.py | data4help/shiny-bassoon | a240f4b5ec3ad8642e206b582266dc79125eba58 | [
"MIT"
] | null | null | null | tests/utils.py | data4help/shiny-bassoon | a240f4b5ec3ad8642e206b582266dc79125eba58 | [
"MIT"
] | null | null | null | tests/utils.py | data4help/shiny-bassoon | a240f4b5ec3ad8642e206b582266dc79125eba58 | [
"MIT"
] | null | null | null | # %% Packages
import os
import pickle
from pyhocon import ConfigTree
# %% Functions
def load_pickle(loading_path: str):
"""This method loads the file at the specified path
:param loading_path: Path at which object is saved
:type loading_path: str
:return: Desired file
:rtype: Could be basically anything
"""
file = open(f"{loading_path}.pickle", "rb")
return pickle.load(file)
def check_scrapping_task(task, config: ConfigTree) -> None:
"""This method tests the scrapping task. It is checked whether
the task can scrape the images and whether the result are actually
image-filled folders.
:param task: The task we would like to do
:type task: self-written class
:param config: Configuration file for the class
:type config: ConfigTree
"""
# Initiate task and run it
task = task(config=config, re_scrape_data=False)
task.run()
# Checking whether the every number in the dataframe has a corresponding image
path_config = config.get_config("paths").get_config(task.name)
path_output = path_config.get_config("path_output")
image_path = path_output.get_string("image_data")
meta_df_path = path_output.get_string("processed_meta_information")
meta_df = load_pickle(meta_df_path)
image_number_list = meta_df.loc[:, "number"].tolist()
meta_df_images = sorted([f"athlete_{x}.png" for x in image_number_list])
sorted_images = sorted(os.listdir(image_path))
assert (
meta_df_images == sorted_images
), "We have a mismatch between meta information and images"
# Checking that we do not have any missing values
assert meta_df.isna().sum().sum() == 0, "We have missing observations"
# Checking age for sensibility
age_min = meta_df.loc[:, "age"].min()
age_max = meta_df.loc[:, "age"].max()
assert age_min >= 0 and age_max <= 100, "The age range seems questionable"
def check_preprocessing(task, config: ConfigTree) -> None:
"""This method checks the image preprocessing task
:param task: Image classification task
:type task: self-written class
:param config: Corresponding Configuration file
:type config: ConfigTree
"""
# Initiate task and run it
task = task(config=config)
task.run()
# Getting testing paths ready
path_config = config.get_config("paths").get_config(task.name)
path_input = path_config.get_config("path_input")
path_output = path_config.get_config("path_output")
def check_image_classifer(task, config: ConfigTree) -> None:
"""This method checks the image classification task
:param task: Image classification task
:type task: self-written class
:param config: Corresponding Configuration file
:type config: ConfigTree
"""
# Initiate task and run it
task = task(config=config)
task.run()
# Getting testing paths ready
path_config = config.get_config("paths").get_config(task.name)
path_output = path_config.get_config("path_output")
| 31.103093 | 82 | 0.706331 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,592 | 0.527676 |
d1720a3f200947c6d598c557c5d06099c334bc22 | 11,905 | py | Python | chatbrick/brick/icn.py | BluehackRano/cb-wh | ecf11100ad83df71eac9d56f6abbd59ceeda9d83 | [
"MIT"
] | null | null | null | chatbrick/brick/icn.py | BluehackRano/cb-wh | ecf11100ad83df71eac9d56f6abbd59ceeda9d83 | [
"MIT"
] | null | null | null | chatbrick/brick/icn.py | BluehackRano/cb-wh | ecf11100ad83df71eac9d56f6abbd59ceeda9d83 | [
"MIT"
] | 1 | 2019-03-05T06:50:11.000Z | 2019-03-05T06:50:11.000Z | import logging
import blueforge.apis.telegram as tg
import requests
from blueforge.apis.facebook import Message, ImageAttachment, QuickReply, QuickReplyTextItem, TemplateAttachment, \
GenericTemplate, Element, PostBackButton
from chatbrick.util import get_items_from_xml, UNKNOWN_ERROR_MSG
import time
logger = logging.getLogger(__name__)
BRICK_DEFAULT_IMAGE = 'https://www.chatbrick.io/api/static/brick/img_brick_13_001.png'
GATE_INFO = {
'0': '원할',
'1': '보통',
'2': '혼잡',
'3': '매우혼잡',
'9': '종료'
}
class Icn(object):
def __init__(self, fb, brick_db):
self.brick_db = brick_db
self.fb = fb
async def facebook(self, command):
if command == 'get_started':
# send_message = [
# Message(
# attachment=ImageAttachment(
# url=BRICK_DEFAULT_IMAGE
# )
# ),
# Message(
# text='인천국제공항공사에서 제공하는 "출국장 대기인원 조회 서비스"에요.'
# ),
# Message(
# attachment=TemplateAttachment(
# payload=GenericTemplate(
# elements=[
# Element(
# image_url='https://www.chatbrick.io/api/static/brick/img_brick_13_002.png',
# title='제 1여객터미널',
# subtitle='제 1여객터미널의 게이트별 대기인원을 알려드려요.',
# buttons=[
# PostBackButton(
# title='1여객터미널 조회',
# payload='brick|icn|1'
# )
# ]
# ),
# Element(
# image_url='https://www.chatbrick.io/api/static/brick/img_brick_13_002.png',
# title='제 2여객터미널',
# subtitle='제 2여객터미널의 게이트별 대기인원을 알려드려요.',
# buttons=[
# PostBackButton(
# title='2여객터미널 조회',
# payload='brick|icn|2'
# )
# ]
# )
# ]
# )
# )
# )
# ]
send_message = [
Message(
attachment=TemplateAttachment(
payload=GenericTemplate(
elements=[
Element(image_url=BRICK_DEFAULT_IMAGE,
title='출국장 대기인원 조회 서비스',
subtitle='인천국제공항공사에서 제공하는 "출국장 대기인원 조회 서비스"에요.')
]
)
)
),
Message(
attachment=TemplateAttachment(
payload=GenericTemplate(
elements=[
Element(
image_url='https://www.chatbrick.io/api/static/brick/img_brick_13_002.png',
title='제 1여객터미널',
subtitle='제 1여객터미널의 게이트별 대기인원을 알려드려요.',
buttons=[
PostBackButton(
title='1여객터미널 조회',
payload='brick|icn|1'
)
]
),
Element(
image_url='https://www.chatbrick.io/api/static/brick/img_brick_13_002.png',
title='제 2여객터미널',
subtitle='제 2여객터미널의 게이트별 대기인원을 알려드려요.',
buttons=[
PostBackButton(
title='2여객터미널 조회',
payload='brick|icn|2'
)
]
)
]
)
)
)
]
await self.fb.send_messages(send_message)
await self.brick_db.save()
elif command == '1' or command == '2':
input_data = await self.brick_db.get()
res = requests.get(
url='http://openapi.airport.kr/openapi/service/StatusOfDepartures/getDeparturesCongestion?serviceKey=%s&terno=%s' % (
input_data['data']['api_key'], command), headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'})
items = get_items_from_xml(res)
if type(items) is dict:
if items.get('code', '00') == '99' or items.get('code', '00') == '30':
send_message = [
Message(
text='chatbrick 홈페이지에 올바르지 않은 API key를 입력했어요. 다시 한번 확인해주세요.',
)
]
else:
send_message = [
Message(
text=UNKNOWN_ERROR_MSG
)
]
else:
if command == '1':
the_other = '2'
else:
the_other = '1'
raw_data = items[0]
sending_message = '제 {terno} 여객터미널\n조회날짜 : {cgtdt}\n조회시간 : {cgthm}'.format(**raw_data)
if command == '1':
sending_message += '\n2번 출국장: %s명 (%s)' % (raw_data['gateinfo1'], GATE_INFO[raw_data['gate1']])
sending_message += '\n3번 출국장: %s명 (%s)' % (raw_data['gateinfo2'], GATE_INFO[raw_data['gate2']])
sending_message += '\n4번 출국장: %s명 (%s)' % (raw_data['gateinfo3'], GATE_INFO[raw_data['gate3']])
sending_message += '\n5번 출국장: %s명 (%s)' % (raw_data['gateinfo4'], GATE_INFO[raw_data['gate4']])
elif command == '2':
sending_message += '\n1번 출국장: %s명 (%s)' % (raw_data['gateinfo1'], GATE_INFO[raw_data['gate1']])
sending_message += '\n2번 출국장: %s명 (%s)' % (raw_data['gateinfo2'], GATE_INFO[raw_data['gate2']])
send_message = [
Message(
text=sending_message,
quick_replies=QuickReply(
quick_reply_items=[
QuickReplyTextItem(
title='새로고침',
payload='brick|icn|%s' % command
),
QuickReplyTextItem(
title='제%s여객터미널 조회' % the_other,
payload='brick|icn|%s' % the_other
)
]
)
)
]
await self.fb.send_messages(send_message)
return None
async def telegram(self, command):
if command == 'get_started':
send_message = [
tg.SendPhoto(
photo=BRICK_DEFAULT_IMAGE
),
tg.SendMessage(
text='인천국제공항공사에서 제공하는 "출국장 대기인원 조회 서비스"에요.',
reply_markup=tg.MarkUpContainer(
inline_keyboard=[
[
tg.CallbackButton(
text='제1여객터미널',
callback_data='BRICK|icn|1'
),
tg.CallbackButton(
text='제2여객터미널',
callback_data='BRICK|icn|2'
)
]
]
)
)
]
await self.fb.send_messages(send_message)
await self.brick_db.save()
elif command == '1' or command == '2':
input_data = await self.brick_db.get()
res = requests.get(
url='http://openapi.airport.kr/openapi/service/StatusOfDepartures/getDeparturesCongestion?serviceKey=%s&terno=%s' % (
input_data['data']['api_key'], command), headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'})
items = get_items_from_xml(res)
if type(items) is dict:
if items.get('code', '00') == '99' or items.get('code', '00') == '30':
send_message = [
tg.SendMessage(
text='chatbrick 홈페이지에 올바르지 않은 API key를 입력했어요. 다시 한번 확인해주세요.',
)
]
else:
send_message = [
tg.SendMessage(
text=UNKNOWN_ERROR_MSG
)
]
else:
if command == '1':
the_other = '2'
else:
the_other = '1'
raw_data = items[0]
sending_message = '*제 {terno} 여객터미널*\n조회날짜 : {cgtdt}\n조회시간 : {cgthm}'.format(**raw_data)
if command == '1':
sending_message += '\n2번 출국장: %s명 (%s)' % (raw_data['gateinfo1'], GATE_INFO[raw_data['gate1']])
sending_message += '\n3번 출국장: %s명 (%s)' % (raw_data['gateinfo2'], GATE_INFO[raw_data['gate2']])
sending_message += '\n4번 출국장: %s명 (%s)' % (raw_data['gateinfo3'], GATE_INFO[raw_data['gate3']])
sending_message += '\n5번 출국장: %s명 (%s)' % (raw_data['gateinfo4'], GATE_INFO[raw_data['gate4']])
elif command == '2':
sending_message += '\n1번 출국장: %s명 (%s)' % (raw_data['gateinfo1'], GATE_INFO[raw_data['gate1']])
sending_message += '\n2번 출국장: %s명 (%s)' % (raw_data['gateinfo2'], GATE_INFO[raw_data['gate2']])
send_message = [
tg.SendMessage(
text=sending_message,
parse_mode='Markdown',
reply_markup=tg.MarkUpContainer(
inline_keyboard=[
[
tg.CallbackButton(
text='새로고침',
callback_data='BRICK|icn|%s' % command
)
],
[
tg.CallbackButton(
text='제%s여객터미널 조회' % the_other,
callback_data='BRICK|icn|%s' % the_other
)
]
]
)
)
]
await self.fb.send_messages(send_message)
return None
| 44.256506 | 159 | 0.374549 | 12,196 | 0.956624 | 0 | 0 | 0 | 0 | 12,074 | 0.947055 | 4,298 | 0.337124 |
66f44f9766c4d040eac6704c6c2ae8556c45fffa | 342 | py | Python | Monitoring/dht22_monitor.py | jpradass/Raspberry-Utils | b14c25e7dc9bedbea62d19240db3fb202372ea2c | [
"MIT"
] | null | null | null | Monitoring/dht22_monitor.py | jpradass/Raspberry-Utils | b14c25e7dc9bedbea62d19240db3fb202372ea2c | [
"MIT"
] | null | null | null | Monitoring/dht22_monitor.py | jpradass/Raspberry-Utils | b14c25e7dc9bedbea62d19240db3fb202372ea2c | [
"MIT"
] | null | null | null | import time
import requests
INFLUX_URL = 'http://localhost:8086/write?db=DHT22'
def sendDataToGrafana(humidity, temp, pressure):
requests.post(INFLUX_URL, data='temperature value=' + str(temp))
requests.post(INFLUX_URL, data='humidity value=' + str(humidity))
requests.post(INFLUX_URL, data='pressure value=' + str(pressure))
| 28.5 | 69 | 0.736842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.269006 |
66f4c17269c6170a21fe09050ef187d175632d22 | 3,384 | py | Python | compiler/parser/expression_models/comparison.py | Fire-Script/FireScript | 8103b9bafe68163c8018aae2e760b6ad50310595 | [
"MIT"
] | 2 | 2021-12-31T02:23:13.000Z | 2022-01-13T09:59:52.000Z | compiler/parser/expression_models/comparison.py | classPythonAddike/FireScript | 8103b9bafe68163c8018aae2e760b6ad50310595 | [
"MIT"
] | 1 | 2021-12-31T13:24:07.000Z | 2021-12-31T13:24:07.000Z | compiler/parser/expression_models/comparison.py | classPythonAddike/FireScript | 8103b9bafe68163c8018aae2e760b6ad50310595 | [
"MIT"
] | 3 | 2021-12-31T12:08:23.000Z | 2022-01-02T12:00:57.000Z | from compiler.bytecode.opcodes import OpCodes
from compiler.errors.errors import FTypeError
from compiler.parser.expressions import Expression
from typing import List, Dict
class EqualToExp(Expression):
"""
Syntax: (= arg1 arg2)
Argument Types: Any
Return Type: Bool
Check if two objects are equal
"""
def __init__(self, line: int, *args: "Expression"):
self.line = line
self.lval = args[0]
self.rval = args[1]
def eval(self, variables: Dict[str, int]) -> List[List[str]]:
return self.rval.eval(variables) + self.lval.eval(variables) + [[OpCodes.COMPARE, "0"]]
def load_type(self, variables: Dict[str, str]) -> Dict[str, str]:
variables = self.lval.load_type(variables)
variables = self.rval.load_type(variables)
if self.lval.value_type != self.rval.value_type:
FTypeError(
self.line,
f"Cannot compare objects of type {self.lval.value_type} and {self.rval.value_type}!"
).raise_error()
self._value_type = "Bool"
return variables
@classmethod
def keyword(cls) -> str:
return "="
@classmethod
def num_args(cls) -> int:
return 2
class GreaterThanExp(EqualToExp):
"""
Syntax: (> arg1 arg2)
Argument Types: Integer | Float
Return Type: Bool
Check if arg1 > arg2
"""
def load_type(self, variables: Dict[str, str]) -> Dict[str, str]:
variables = self.lval.load_type(variables)
variables = self.rval.load_type(variables)
if self.lval.value_type != self.rval.value_type:
FTypeError(
self.line,
f"Cannot compare objects of type {self.lval.value_type} and {self.rval.value_type}!"
).raise_error()
if self.lval.value_type not in ["Integer", "Float"]:
FTypeError(
self.line,
f"Cannot compare objects of type {self.lval.value_type}!"
).raise_error()
self._value_type = "Bool"
return variables
def eval(self, variables: Dict[str, int]) -> List[List[str]]:
return self.rval.eval(variables) + self.lval.eval(variables) + [[OpCodes.COMPARE, "1"]]
@classmethod
def keyword(cls) -> str:
return ">"
class LessThanExp(GreaterThanExp):
"""
Syntax: (< arg1 arg2)
Argument Types: Integer | Float
Return Type: Bool
Check if arg1 < arg2
"""
def __init__(self, line: int, *args: "Expression"):
self.line = line
self.lval = args[1]
self.rval = args[0]
@classmethod
def keyword(cls) -> str:
return "<"
class GreaterThanOrEqualExp(GreaterThanExp):
"""
Syntax: (>= arg1 arg2)
Argument Types: Integer | Float
Return Type: Bool
Check if arg1 >= arg2
"""
def eval(self, variables: Dict[str, int]) -> List[List[str]]:
return self.rval.eval(variables) + self.lval.eval(variables) + [[OpCodes.COMPARE, "2"]]
@classmethod
def keyword(cls) -> str:
return ">" # First identifier will be `>`
@classmethod
def num_args(cls) -> int:
return 3 # 1 argument for the `=`
class LessThanOrEqualExp(GreaterThanOrEqualExp):
def __init__(self, line: int, *args: "Expression"):
self.line = line
self.lval = args[1]
self.rval = args[0]
| 27.737705 | 100 | 0.60195 | 3,195 | 0.944149 | 0 | 0 | 414 | 0.12234 | 0 | 0 | 844 | 0.249409 |
66f5b0cbb8ef944f7945f54d1777a667ec6dbe6b | 3,024 | py | Python | Echoes/Filezilla.py | xeddmc/BrainDamage | 855f696883d495e2f1b1b55ced31a54f3426c50e | [
"Apache-2.0"
] | 1,520 | 2020-10-23T06:22:06.000Z | 2022-03-26T09:17:47.000Z | Echoes/Filezilla.py | 1612480331/BrainDamage | ac412e32583436cab3e836713008c207229c9cf2 | [
"Apache-2.0"
] | 12 | 2017-03-25T16:31:20.000Z | 2021-12-28T05:04:52.000Z | Echoes/Filezilla.py | 1612480331/BrainDamage | ac412e32583436cab3e836713008c207229c9cf2 | [
"Apache-2.0"
] | 661 | 2020-10-23T06:23:53.000Z | 2021-09-06T23:05:30.000Z | # Based on the work of https://github.com/AlessandroZ/LaZagne/blob/master/Windows/lazagne/
import xml.etree.cElementTree as ET
import os, base64
class Filezilla():
def __init__(self):
options = {'command': '-f', 'action': 'store_true', 'dest': 'filezilla', 'help': 'filezilla'}
def run(self):
if 'APPDATA' in os.environ:
directory = os.environ['APPDATA'] + '\FileZilla'
else:
return
interesting_xml_file = []
info_xml_file = []
if os.path.exists(os.path.join(directory, 'sitemanager.xml')):
interesting_xml_file.append('sitemanager.xml')
info_xml_file.append('Stores all saved sites server info including password in plaintext')
if os.path.exists(os.path.join(directory, 'recentservers.xml')):
interesting_xml_file.append('recentservers.xml')
info_xml_file.append('Stores all recent server info including password in plaintext')
if os.path.exists(os.path.join(directory, 'filezilla.xml')):
interesting_xml_file.append('filezilla.xml')
info_xml_file.append('Stores most recent server info including password in plaintext')
if interesting_xml_file != []:
pwdFound = []
for i in range(len(interesting_xml_file)):
xml_file = os.path.expanduser(directory + os.sep + interesting_xml_file[i])
tree = ET.ElementTree(file=xml_file)
root = tree.getroot()
servers = root.getchildren()
for ss in servers:
server = ss.getchildren()
jump_line = 0
for s in server:
s1 = s.getchildren()
values = {}
for s11 in s1:
if s11.tag == 'Host':
values[s11.tag] = s11.text
if s11.tag == 'Port':
values[s11.tag] = s11.text
if s11.tag == 'User':
values['Login'] = s11.text
if s11.tag == 'Pass':
try:
# if base64 encoding
if 'encoding' in s11.attrib:
if s11.attrib['encoding'] == 'base64':
values['Password'] = base64.b64decode(s11.text)
else:
values['Password'] = s11.text
except:
values['Password'] = s11.text
# password found
if len(values) != 0:
pwdFound.append(values)
# print the results
return pwdFound
else:
pass
#tem = Filezilla()
#a = tem.run()
#print a
| 37.8 | 102 | 0.472553 | 2,833 | 0.936839 | 0 | 0 | 0 | 0 | 0 | 0 | 668 | 0.220899 |
66f69535e1d0a44683902c8b5bdee87b710b453d | 1,519 | py | Python | flights/models.py | solnsubuga/flightapp | 2da79cb4edef51507152a1d27388292a15b67815 | [
"Apache-2.0"
] | null | null | null | flights/models.py | solnsubuga/flightapp | 2da79cb4edef51507152a1d27388292a15b67815 | [
"Apache-2.0"
] | 8 | 2020-02-12T00:24:07.000Z | 2021-09-08T01:11:22.000Z | flights/models.py | solnsubuga/flightapp | 2da79cb4edef51507152a1d27388292a15b67815 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
class Flight(models.Model):
STATUSES = (
('SCHEDULED', 'SCHEDULED'),
('DELAYED', 'DELAYED'),
('ON_TIME', 'ON TIME'),
('ARRIVED', 'ARRIVED'),
('LATE', 'LATE')
)
number = models.CharField(max_length=10)
departure_time = models.DateTimeField()
arrival_time = models.DateTimeField()
origin = models.CharField(max_length=150)
destination = models.CharField(max_length=150)
status = models.CharField(choices=STATUSES, max_length=100)
@property
def duration(self):
timespan = self.arrival_time - self.departure_time
days, seconds = timespan.days, timespan.seconds
return days * 24 + seconds // 3600 # return hours
@property
def available_seats(self):
return self.seats.all()
def __str__(self):
return self.number
class Seat(models.Model):
flight = models.ForeignKey(
Flight, on_delete=models.CASCADE, related_name='seats')
number = models.CharField(max_length=50)
is_available = models.BooleanField(default=True)
def __str__(self):
return self.number
class Reservation(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
flight = models.ForeignKey(Flight, on_delete=models.CASCADE)
seat = models.ForeignKey(Seat, on_delete=models.CASCADE)
is_notified = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
| 30.38 | 64 | 0.681369 | 1,437 | 0.946017 | 0 | 0 | 279 | 0.183673 | 0 | 0 | 109 | 0.071758 |
66f7a9674e5203f57287eed9d7aa1d82b181de7d | 709 | py | Python | look_w.py | kakkarja/english-words | aa6a7c20044d95b6209f06189c8feb424a9a3c2a | [
"Unlicense"
] | 1 | 2020-10-08T00:30:06.000Z | 2020-10-08T00:30:06.000Z | look_w.py | kakkarja/english-words | aa6a7c20044d95b6209f06189c8feb424a9a3c2a | [
"Unlicense"
] | 1 | 2018-05-01T14:01:14.000Z | 2018-05-01T14:01:14.000Z | look_w.py | kakkarja/english-words | aa6a7c20044d95b6209f06189c8feb424a9a3c2a | [
"Unlicense"
] | 1 | 2018-04-29T18:38:40.000Z | 2018-04-29T18:38:40.000Z | import os
from pathlib import Path
# Geting users home directory
h_path = str(Path.home()) # + any addtional path to a folder that contain word.txt
# Change directory to h_path where words.txt is located
os.chdir(h_path)
# Open words.txt.
words = open('words.txt').read().split()
def look_w(word,num):
# Looking words that in the words list
# by number of letters and alphabets
if num <= len(word) and num != 0:
return [w for w in words if len(w) == num and
all(w.lower().count(c) <= word.lower().count(c) for c in w.lower())]
else:
return "⚔ Exceeding total letters ⚔".upper()
# Usage
print(look_w('insane', 6)) # prints ['inanes', 'insane', 'sienna']
| 29.541667 | 85 | 0.64598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.461431 |
66f8eeacb2ab5fc5e4c283571992585729baa9ec | 6,284 | py | Python | libcity/model/trajectory_loc_prediction/SERM.py | moghadas76/test_bigcity | 607b9602c5b1113b23e1830455e174b0901d7558 | [
"Apache-2.0"
] | 221 | 2021-09-06T03:33:31.000Z | 2022-03-28T05:36:49.000Z | libcity/model/trajectory_loc_prediction/SERM.py | moghadas76/test_bigcity | 607b9602c5b1113b23e1830455e174b0901d7558 | [
"Apache-2.0"
] | 43 | 2021-09-19T16:12:28.000Z | 2022-03-31T16:29:03.000Z | libcity/model/trajectory_loc_prediction/SERM.py | moghadas76/test_bigcity | 607b9602c5b1113b23e1830455e174b0901d7558 | [
"Apache-2.0"
] | 64 | 2021-09-06T07:56:10.000Z | 2022-03-25T08:48:35.000Z | import torch
import torch.nn as nn
import numpy as np
from libcity.model.abstract_model import AbstractModel
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
class EmbeddingMatrix(nn.Module): # text_embdeding
def __init__(self, input_size, output_size, word_vec):
super(EmbeddingMatrix, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.layer = nn.Linear(in_features=self.input_size, out_features=self.output_size, bias=False)
self.init_weight(word_vec)
def init_weight(self, word_vec):
# word_vec为text_embedding初始权重矩阵,从data_feature传入.
# self.weight output_size*input_size namely length_of_wordvect_glove_pretrained(50)
# *text_size(the size of dictionary)
# 按照论文源代码 word_vec = text_size(the size of dictionary)*length_of_wordvect_glove_pretrained
word_vec = torch.Tensor(word_vec).t() # 转置
self.layer.weight = nn.Parameter(word_vec)
def forward(self, x): # x:batch*seq*input_size
# return torch.matmul(x, self.weights) #batch*seq*text_size * text_size*output_size = batch*seq*output_size
return self.layer(x) # batch*seq*output_size
class SERM(AbstractModel):
def __init__(self, config, data_feature):
super(SERM, self).__init__(config, data_feature)
# initialize parameters
# print(config['dataset_class'])
self.loc_size = data_feature['loc_size']
self.loc_emb_size = config['loc_emb_size']
self.tim_size = data_feature['tim_size']
self.tim_emb_size = config['tim_emb_size']
self.user_size = data_feature['uid_size']
self.user_emb_size = data_feature['loc_size'] # 根据论文
self.text_size = data_feature['text_size']
self.text_emb_size = len(data_feature['word_vec'][0]) # 这个受限于 word_vec 的长度
self.hidden_size = config['hidden_size']
self.word_one_hot_matrix = np.eye(self.text_size)
self.device = config['device']
# Embedding layer
self.emb_loc = nn.Embedding(num_embeddings=self.loc_size, embedding_dim=self.loc_emb_size,
padding_idx=data_feature['loc_pad'])
self.emb_tim = nn.Embedding(num_embeddings=self.tim_size, embedding_dim=self.tim_emb_size,
padding_idx=data_feature['tim_pad'])
self.emb_user = nn.Embedding(num_embeddings=self.user_size, embedding_dim=self.user_emb_size)
self.emb_text = EmbeddingMatrix(self.text_size, self.text_emb_size, data_feature['word_vec'])
# lstm layer
self.lstm = nn.LSTM(input_size=self.loc_emb_size + self.tim_emb_size + self.text_emb_size,
hidden_size=self.hidden_size)
# self.lstm = nn.LSTM(input_size=self.loc_emb_size + self.tim_emb_size, hidden_size=self.hidden_size)
# dense layer
self.dense = nn.Linear(in_features=self.hidden_size, out_features=self.loc_size)
# init weight
self.apply(self._init_weight)
def _init_weight(self, module):
if isinstance(module, nn.Embedding):
nn.init.xavier_normal_(module.weight)
elif isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight)
elif isinstance(module, nn.LSTM):
for name, param in module.named_parameters():
if 'weight_ih' in name:
nn.init.xavier_uniform_(param.data)
elif 'weight_hh' in name:
nn.init.orthogonal_(param.data)
elif 'bias' in name:
nn.init.constant_(param.data, 0)
def forward(self, batch):
loc = batch['current_loc']
tim = batch['current_tim']
user = batch['uid']
text = batch['text']
max_len = batch['current_loc'].shape[1]
text_pad = np.zeros((self.text_size))
# text 现在是 word index 的形式,还需要进行 one_hot encoding
one_hot_text = []
for word_index in text:
one_hot_text_a_slice = []
for words in word_index:
if len(words) == 0:
one_hot_text_a_slice.append(np.zeros((self.text_size)))
else:
one_hot_text_a_slice.append(np.sum(self.word_one_hot_matrix[words], axis=0) /
len(words))
# pad
one_hot_text_a_slice += [text_pad] * (max_len - len(one_hot_text_a_slice))
one_hot_text.append(np.array(one_hot_text_a_slice)) # batch_size * seq_len * text_size
one_hot_text = torch.FloatTensor(one_hot_text).to(self.device)
loc_emb = self.emb_loc(loc)
tim_emb = self.emb_tim(tim)
user_emb = self.emb_user(user)
text_emb = self.emb_text(one_hot_text)
# change batch*seq*emb_size to seq*batch*emb_size
x = torch.cat([loc_emb, tim_emb, text_emb], dim=2).permute(1, 0, 2)
# attrs_latent = torch.cat([loc_emb, tim_emb], dim=2).permute(1, 0, 2)
# print(attrs_latent.size())
# pack attrs_latent
seq_len = batch.get_origin_len('current_loc')
pack_x = pack_padded_sequence(x, lengths=seq_len, enforce_sorted=False)
lstm_out, (h_n, c_n) = self.lstm(pack_x) # seq*batch*hidden_size
# print(lstm_out.size())
# unpack
lstm_out, out_len = pad_packed_sequence(lstm_out, batch_first=True)
# user_emb is batch*loc_size, so we need get the final lstm_out
for i in range(lstm_out.shape[0]):
if i == 0:
out = lstm_out[0][seq_len[i] - 1].reshape(1, -1) # .reshape(1,-1)表示:转化为1行
else:
out = torch.cat((out, lstm_out[i][seq_len[i] - 1].reshape(1, -1)), 0)
dense = self.dense(out) # batch * loc_size
out_vec = torch.add(dense, user_emb) # batch * loc_size
pred = nn.LogSoftmax(dim=1)(out_vec) # result
# print(pred.size())
return pred # batch*loc_size
def predict(self, batch):
return self.forward(batch)
def calculate_loss(self, batch):
criterion = nn.NLLLoss()
scores = self.forward(batch) # batch*loc_size
return criterion(scores, batch['target'])
| 45.868613 | 117 | 0.635901 | 6,166 | 0.965852 | 0 | 0 | 0 | 0 | 0 | 0 | 1,498 | 0.234649 |
66fccecdd57f544efb11cfb6e000ed732e25712c | 826 | py | Python | not_used/jobs_to_csv.py | oaklandanalytics/parcel_cutting_board | c134ab3c239090e7acb04d1257186763bf437640 | [
"BSD-3-Clause"
] | null | null | null | not_used/jobs_to_csv.py | oaklandanalytics/parcel_cutting_board | c134ab3c239090e7acb04d1257186763bf437640 | [
"BSD-3-Clause"
] | null | null | null | not_used/jobs_to_csv.py | oaklandanalytics/parcel_cutting_board | c134ab3c239090e7acb04d1257186763bf437640 | [
"BSD-3-Clause"
] | 1 | 2019-12-27T15:28:17.000Z | 2019-12-27T15:28:17.000Z | import geopandas as gpd
# not used anymore - converts esri jobs shapefile to a csv
# see assign_jobs_lat_lng.py
gdf = gpd.GeoDataFrame.from_file("est10_esri_gt1.shp")
gdf = gdf.to_crs(epsg=4326)
fname_map = {
'Duns_Numbe': 'duns_number',
'Business_N': 'business_name',
'Emp_Total': 'total_employment',
'Emp_Here': 'local_employment',
'Year_Start': 'start_year',
'sixcat': 'PBA_category',
'remi70': 'REMI_category',
'steelhead': 'steelhead_category',
'naics2': 'NAICS'
}
out_gdf = gdf[['Duns_Numbe', 'Business_N', 'geometry', 'Emp_Total', 'Emp_Here',
'Year_Start', 'sixcat', 'remi70', 'steelhead', 'naics2']].\
rename(columns=fname_map)
# see the bigger establishments
out_gdf.sort_values('total_employment', ascending=False)
out_gdf.to_csv("jobs.csv", index=False)
| 29.5 | 79 | 0.687651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 491 | 0.594431 |
66fcf10eec6db42f209edd690e099b600c633815 | 3,968 | py | Python | sentence_generator.py | gabrielilharco/sentence-generator | 5d75dca74363bd9ddcd54f9559b94bb6185667e3 | [
"MIT"
] | 5 | 2018-05-12T13:54:07.000Z | 2019-06-16T09:56:52.000Z | sentence_generator.py | gabrielilharco/sentence-generator | 5d75dca74363bd9ddcd54f9559b94bb6185667e3 | [
"MIT"
] | null | null | null | sentence_generator.py | gabrielilharco/sentence-generator | 5d75dca74363bd9ddcd54f9559b94bb6185667e3 | [
"MIT"
] | null | null | null | import argparse
import random
import operator
import os
def parse_grammar(file_path):
"""
Generate a grammar from a file describing the production rules.
Note that the symbols are inferred from the production rules.
For more information on the format of the file, please reffer to
the README.md or the the sample grammars provided in this repository.
:param file_path: Path to the file containing the description of the grammar.
:returns: the grammar object and the starting symbol.
"""
with open(file_path) as f:
content = f.read().splitlines()
if len(content) <= 1:
raise Exception('Grammar should have at least one production rule and a starting symbol')
# First line should be the starting symbol
start_symbol = content[0]
grammar = {}
for line in content[1:]:
# Each line should be in the format:
# X -> A B ... C
symbols = line.split()
if len(symbols) <= 2 or symbols[1] != '->':
raise Exception('Each production line should be in the format: X -> A B ... C')
if symbols[0] not in grammar:
grammar[symbols[0]] = []
grammar[symbols[0]].append(symbols[2:])
if start_symbol not in grammar:
raise Exception('Grammar should have at leats one production rule with the start_symbol.')
return grammar, start_symbol
def find_terminals(grammar):
"""
For a given grammar, return a set of the terminal symbols.
:param grammar: The grammar (set of productions rules).
:return: set of terminal symbols.
"""
terminals = set()
for key, val in grammar.items():
for word_list in val:
for word in word_list:
if word not in grammar:
terminals.add(word)
return terminals
def analyze_stats(sentences):
"""
For a given set of sentences, print how many times each symbol appears,
printing statistics sorted by occurrance.
:param sentences: List of sentences.
"""
counts = {}
for sentence in sentences:
for element in sentence.split():
if element not in counts:
counts[element] = 1
else:
counts[element] += 1
# print stats
sorted_counts = sorted(counts.items(), key = operator.itemgetter(1))
for key, val in sorted_counts:
print("%5d %s" % (val, key))
def generate_random_sentence(grammar, start_symbol, print_sentence = True):
"""
For a given grammar (set of production rules) and a starting symbol,
randomly generate a sentence using the production rules.
:param sentences: The grammar (set of productions rules).
:param start_symbol: The starting symbol.
:param print_sentence: Wether to print the generated sentence. Defaults to true.
:returns: A randomly generated sentence.
"""
# Starting symbol must be a part of the grammar
assert start_symbol in grammar
sentence = [start_symbol]
idx = 0
while idx < len(sentence):
if sentence[idx] in terminals:
idx += 1
else:
choices = grammar[sentence[idx]]
choice = random.choice(choices)
sentence = sentence[:idx] + choice + sentence[idx+1:]
sentence = " ".join([word.upper() for word in sentence])
if print_sentence:
print(sentence)
return sentence
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Grammar utils')
parser.add_argument('--grammar', type=str, default='simple_grammar.txt',
help='Path to grammar file.')
parser.add_argument('--print_terminal_symbols', type=bool, default=False,
help='Print the terminal symbols of the grammar.')
parser.add_argument('--num_sentences', type=int, default=0,
help='The number of random sentences to generate.')
args = parser.parse_args()
grammar, start_symbol = parse_grammar(args.grammar)
terminals = find_terminals(grammar)
if args.print_terminal_symbols:
for terminal in sorted(terminals):
print(terminal)
print('-----------------')
print('There are', len(terminals), 'terminals')
sentences = []
for i in range(args.num_sentences):
sentences.append(generate_random_sentence(grammar, start_symbol, False))
for i in range(len(sentences)):
print("%d. %s" % (i, sentences[i])) | 29.834586 | 92 | 0.717742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,732 | 0.436492 |
66fe1b2c78a96ea5174f4741a76ed0ec07d633c4 | 6,639 | py | Python | student_paulo.py | IgaoGuru/Csgo-NeuralNetwork | d161548cb5b61cf4f515e3e0c845daf4cfcaa8ba | [
"MIT"
] | 2 | 2020-10-18T19:20:16.000Z | 2021-11-15T14:11:39.000Z | student_paulo.py | IgaoGuru/Csgo-NeuralNetwork | d161548cb5b61cf4f515e3e0c845daf4cfcaa8ba | [
"MIT"
] | 3 | 2021-06-08T21:51:43.000Z | 2022-01-13T02:54:14.000Z | student_paulo.py | IgaoGuru/Csgo-NeuralNetwork | d161548cb5b61cf4f515e3e0c845daf4cfcaa8ba | [
"MIT"
] | null | null | null | #TODO: use only one (RGB) channel
import numpy as np
import pandas as pd
import os
from torch.utils import data
from torch.utils.data.dataloader import DataLoader as DataLoader
import torch
from torchvision import transforms
from natsort import natsorted, ns
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
dataset_path = "C:\\Users\\User\\Documents\\GitHub\\Csgo-NeuralNetwork\\output\\"
#train_split and test_split 0.1 > x > 0.9 and must add up to 1
train_split = 0.7
test_split = 0.3
num_epochs = 10
batch_size = 100
if torch.cuda.is_available():
device = torch.device("cuda:0")
print("Running on: %s"%(torch.cuda.get_device_name(device)))
else:
device = torch.device("cpu")
print('running on: CPU')
class CsgoPersonNoPersonDataset(data.Dataset):
"""pretty description."""
length = -1
def __init__(self, root_dir, transform=None):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.6)
"""
self.root_dir = root_dir
self.transform = transform
self.length = 0
# dictionary that marks what the last frame of each folder is
# ie. number of examples in specific folder
self.folder_system = {2426: 'CSGOraw2'}
for folder_index in self.folder_system:
self.length += folder_index
# returns name of folder that contains specific frame
def find_folder(self, idx):
for num_frames in self.folder_system:
if num_frames >= idx:
return str(self.folder_system[num_frames])
def __len__(self):
return self.length
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# sets path and gets txt/jpg files
img_path = self.find_folder(idx)
img_name = "%sframe#%s" % (img_path, idx)
img_path = os.path.join(self.root_dir,
img_path, img_name)
img_path_ext = img_path + '.jpg'
img = Image.open((img_path_ext))
# img = np.array(img)
label_path = str(img_path) + '.txt'
label = 0
# loads label from disk, converts csv to tensor
label = torch.as_tensor(os.stat(label_path).st_size != 0, dtype=torch.float).reshape((1,))
sample = {'image': img, 'label': label}
# apply transforms
# TODO: farofa aqui hein
if self.transform:
img = self.transform(sample['image'])
# img = img.reshape(172800)
sample['image'] = img
return sample
#defining NN layeres
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 61 * 33, 120)
self.fc2 = nn.Linear(120, 60)
self.fc3 = nn.Linear(60, 1)
self.fc4 = nn.Linear(30, 15)
self.fc5 = nn.Linear(15, 7)
self.fc6 = nn.Linear(7, 1)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 61 * 33)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
#x = F.relu(self.fc4(x))
#x = F.relu(self.fc5(x))
#x = F.relu(self.fc6(x))
return x
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
torch.nn.init.xavier_uniform_(m.weight.data)
#runs NN in training mode
def train_run(train_loader, criterion, optimizer, device):
losses = []
print(len(train_loader.dataset))
for epoch in range(num_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data['image'], data['label']
#if labels[0].item() == -1:
# continue
#sends batch to gpu
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
#print(f"{epoch}, {i}")
outputs = net(inputs)
#print(f"Labels: {labels.shape}, {labels.dtype}")
#print(f"Outputs: {outputs.shape}, {outputs.dtype}")
loss = criterion(outputs, labels)
losses.append(loss.item())
running_loss += loss.item()
if (i + 1) % 10 == 0: # print every 10 mini-batches
print(f"Labels: {torch.transpose(labels, 0, 1)}")
print(f"Outputs: {torch.transpose(outputs, 0, 1)}")
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 10))
running_loss = 0.0
print("-------------------------------------")
loss.backward()
optimizer.step()
print('Finished Training')
return losses
net = Net().to(device)
net.apply(weights_init)
transform = transforms.Compose([
transforms.Resize([256, 144]),
# transforms.Resize([57600, 1]),
transforms.ToTensor(),
])
dataset = CsgoPersonNoPersonDataset(dataset_path, transform)
dataset_len = len(dataset)
train_split = int(np.floor(dataset_len * train_split))
test_split = int(np.floor(dataset_len * test_split))
while train_split + test_split != dataset_len:
train_split += 1
train_set, test_set = torch.utils.data.random_split(\
dataset, [train_split, test_split])
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=False, drop_last=True)
test_loader = DataLoader(dataset=test_set, batch_size=batch_size, shuffle=True, drop_last=True)
def my_binary_loss(output, target):
return (output and target).mean
criterion = nn.MSELoss()
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(net.parameters())
# for i in range(500):
# image, label = dataset[i]['image'], dataset[i]['label']
# print(label)
losses = train_run(train_loader, criterion, optimizer, device)
print("------------------------------------------------------------")
print("Losses")
for loss in losses:
print(loss)
print("------------------------------------------------------------") | 32.072464 | 98 | 0.595722 | 2,769 | 0.417081 | 0 | 0 | 0 | 0 | 0 | 0 | 1,760 | 0.2651 |
66ff808193f491101625deb10d4b03096229d8fa | 204 | py | Python | tests/deeply/test_exception.py | achillesrasquinha/deeply | fd1ce32da130591fc92df8df89e07f1497b2b902 | [
"MIT"
] | 2 | 2021-10-05T16:37:30.000Z | 2021-10-11T21:31:43.000Z | tests/deeply/test_exception.py | achillesrasquinha/deeply | fd1ce32da130591fc92df8df89e07f1497b2b902 | [
"MIT"
] | null | null | null | tests/deeply/test_exception.py | achillesrasquinha/deeply | fd1ce32da130591fc92df8df89e07f1497b2b902 | [
"MIT"
] | 1 | 2021-07-16T02:23:37.000Z | 2021-07-16T02:23:37.000Z | # imports - module imports
from deeply.exception import (
DeeplyError
)
# imports - test imports
import pytest
def test_deeply_error():
with pytest.raises(DeeplyError):
raise DeeplyError | 18.545455 | 36 | 0.730392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.245098 |
66ffbb10c3681dba5f743ff2d041228d5ccdc263 | 1,153 | py | Python | test/common.py | philippe-goetz/python-jwt | 8dff3e43023f344642af55ad82f3cfb28b00f8d5 | [
"MIT"
] | null | null | null | test/common.py | philippe-goetz/python-jwt | 8dff3e43023f344642af55ad82f3cfb28b00f8d5 | [
"MIT"
] | null | null | null | test/common.py | philippe-goetz/python-jwt | 8dff3e43023f344642af55ad82f3cfb28b00f8d5 | [
"MIT"
] | null | null | null | """ Common setup and patching for tests """
#pylint: disable=wrong-import-order
from datetime import datetime as orig_datetime, timedelta
from mock import patch
import threading
#pylint: disable=W0401,W0614
from test.fixtures import *
_thread_state = threading.local()
def _new_utcnow():
""" Return last set datetime, or set it to current datetime if not set """
if not hasattr(_thread_state, 'utcnow'):
_thread_state.utcnow = orig_datetime.utcnow()
return _thread_state.utcnow
def _new_now():
""" Work out current local datetime """
return _new_utcnow() + (orig_datetime.now() - orig_datetime.utcnow())
def clock_load(utcnow):
""" Set datetime """
_thread_state.utcnow = utcnow
return _thread_state.utcnow
def clock_tick(delta=timedelta()):
""" Tick clock """
return clock_load(_new_utcnow() + delta)
def clock_reset():
""" Forget set datetime """
if hasattr(_thread_state, 'utcnow'):
delattr(_thread_state, 'utcnow')
_config = {'utcnow.side_effect': _new_utcnow,
'now.side_effect': _new_now}
_patcher = patch('datetime.datetime', **_config)
_mocker = _patcher.start()
| 28.825 | 78 | 0.70425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 364 | 0.315698 |
0f0041c3812a531ebd6ee9156ded1a3d042762da | 11,015 | py | Python | modules/bot_oc.py | opencitations/telegrambot | 7a74893bb0a7a85d5db04045832d7b2c676bff48 | [
"MIT"
] | 1 | 2021-07-18T02:48:40.000Z | 2021-07-18T02:48:40.000Z | modules/bot_oc.py | opencitations/telegrambot | 7a74893bb0a7a85d5db04045832d7b2c676bff48 | [
"MIT"
] | null | null | null | modules/bot_oc.py | opencitations/telegrambot | 7a74893bb0a7a85d5db04045832d7b2c676bff48 | [
"MIT"
] | null | null | null |
my_commands = {
"/contact" : {"notes":" Retrieve all the accounts (email, Twitter, GitHub, etc.) to contact the OpenCitations folks", 'parse_mode':'Markdown'},
"/ask" : {"notes":"Params: <DOI>. Retrieve information about the entity identified by the input DOI (source: COCI)", 'parse_mode':'Markdown'},
"/citations" : {"notes":"Params: <DOI>. Retrieve all the entities that cite the one identified by the input DOI (source: COCI)", 'parse_mode':'Markdown'},
"/references" : {"notes":"Params: <DOI>. Retrieve all the entities that are cited by the one identified by the input DOI (source: COCI)", 'parse_mode':'Markdown'},
}
def get_my_commands():
return my_commands
def exec_my_commands(command,param):
if command == "/contact":
return how_to_contact_you(param)
if command == "/ask":
return ask_coci(param)
if command == "/citations":
return who_cite_me_in_coci(param)
if command == "/references":
return what_are_my_ref_in_coci(param)
#ADD all the methods you want to use
####################################
import csv
import urllib.request
import json
import re
contact = {
'Website':'http://opencitations.net/',
'Email':'contact@opencitations.net',
'Twitter':'https://twitter.com/opencitations',
'Github': 'https://github.com/essepuntato/opencitations',
'Wordpress': 'https://opencitations.wordpress.com/'
}
def how_to_contact_you(a_text):
str_to_return = ""
for c in contact:
str_to_return = str_to_return + "\n*"+c+"*: "+contact[c]
return str_to_return
#'http://opencitations.net/index/coci/api/v1/metadata/10.1108/jd-12-2013-0166'
def ask_coci(a_text):
str_to_return = ""
try:
a_text = a_text[0]
except:
return "You must text me a *DOI* !"
find_list = re.findall(r"(10.\d{4,9}\/\S*)",a_text)
if len(find_list) == 0:
return "Please, text me a correct *DOI format*"
res = find_list[0]
api_call = 'http://opencitations.net/index/coci/api/v1/metadata/'
input = res
api_call = api_call+input+'?json=array(";%20",author).dict(",%20",author,fn,gn,orcid)'
#call API
try:
contents = urllib.request.urlopen(api_call).read().decode('utf-8')
json_output = json.loads(contents)
if len(json_output) == 0:
return "No data found for: *"+ input+"*"
else:
rc_data = json_output[0]
#Title
str_title = "\n\n*Title:* "+rc_data['title']
if str_title != "\n\n*Title:* ":
str_to_return = str_to_return + str_title
#Authors
str_authors = "\n\n*Author(s):* "
for an_author in rc_data['author']:
an_author_str = ""
if 'fn' in an_author:
an_author_str = an_author_str + str(an_author['fn'])
if 'gn' in an_author:
an_author_str = an_author_str+", "+str(an_author['gn'])
if 'orcid' in an_author:
an_author_str = an_author_str + " "+"https://orcid.org/"+str(an_author['orcid'])
if an_author_str != "":
str_authors = str_authors + '\n' + an_author_str
if str_authors != "\n\n*Author(s):* ":
str_to_return = str_to_return + str_authors
#list_authors = rc_data['author'].split('; ')
#for an_author in list_authors:
# str_authors = str_authors + "\n" + str(an_author)
#if str_authors != "\n\nAuthor(s): ":
# str_to_return = str_to_return + str_authors
#Publication year
str_year = "\n\n*Publication year:* " + rc_data['year']
if str_year != "\n\n*Publication year:* ":
str_to_return = str_to_return + str_year
#DOI
str_to_return = str_to_return + "\n\n*DOI:* "+'https://www.doi.org/'+input
#OA URL
str_cit = "\n\n*OA URL:* "+rc_data['oa_link']
if str_cit != "\n\n*OA URL:* ":
str_to_return = str_to_return + str_cit
#Citations
str_cit = "\n\n*Cited by:* "+rc_data['citation_count']
if str_cit != "\n\n*Cited by:* ":
str_to_return = str_to_return + str_cit
except:
return "Sorry, the connection went wrong!"
return str_to_return
def who_cite_me_in_coci(a_text):
str_to_return = ""
try:
a_text = a_text[0]
except:
return "You must text me a *DOI* !"
find_list = re.findall(r"(10.\d{4,9}\/\S*)",a_text)
if len(find_list) == 0:
return "Please, text me a correct *DOI format*"
res = find_list[0]
api_call = 'http://opencitations.net/index/coci/api/v1/citations/'
input = res
api_call = api_call+input
#call API
try:
contents = urllib.request.urlopen(api_call).read().decode('utf-8')
json_output = json.loads(contents)
if len(json_output) == 0:
return "No citations found for: *"+ input+"*"
else:
str_to_return = str_to_return + "\n- *Cited by:* "+str(len(json_output))+ "\n\n"
for c_elem in json_output:
#OCI
#str_to_return = str_to_return + "\n- *OCI:* "+"["+str(c_elem['oci'])+"]"+"(http://opencitations.net/index/coci/browser/ci/"+str(c_elem['oci'])+")"
#DOI
#str_to_return = str_to_return + "\n- *Citing DOI:* "+'https://www.doi.org/'+c_elem['citing']
#WITH tinyurl for OCI
str_to_return = str_to_return + '\n['+c_elem['citing']+'](https://www.doi.org/'+c_elem['citing']+')'
#lucinda_link = 'http://opencitations.net/index/coci/browser/ci/'+str(c_elem['oci'])
#tiny_url = urllib.request.urlopen('http://tinyurl.com/api-create.php?url='+lucinda_link).read().decode('utf-8')
#str_to_return = str_to_return + "\n["+c_elem['citing']+"]("+str(tiny_url)+")"
#Citation Creation date
#creation_str = ""
#list_date = c_elem['creation'].split("-")
#if len(list_date) > 0:
# creation_str = str(list_date[0])
# if len(list_date) > 1:
# creation_str = get_month_name(str(list_date[1])) +" "+ creation_str
# if len(list_date) > 2:
# creation_str = str(int(list_date[2])) + " "+ creation_str
#if creation_str != "":
# str_to_return = str_to_return + "\n- *Citation creation date:* "+creation_str
#Timespan
#tspan_str = ""
#result_y = re.search(r"(\d{1,})Y",c_elem['timespan'])
#if result_y:
# tspan_str += str(result_y.groups(0)[0]) + " Years"
# result_y = re.search(r"(\d{1,})M",c_elem['timespan'])
# if result_y:
# tspan_str += ", "+str(result_y.groups(0)[0]) + " Months"
# result_y = re.search(r"(\d{1,})D",c_elem['timespan'])
# if result_y:
# tspan_str += ", "+str(result_y.groups(0)[0]) + " Days"
#if tspan_str != "":
# str_to_return = str_to_return + "\n- *Timespan:* "+tspan_str
##New item
#str_to_return = str_to_return + "\n\n"
#str_to_return = str_to_return + "\n"
except:
return "Sorry, the connection went wrong!"
return str_to_return
def what_are_my_ref_in_coci(a_text):
str_to_return = ""
try:
a_text = a_text[0]
except:
return "You must text me a *DOI* !"
find_list = re.findall(r"(10.\d{4,9}\/\S*)",a_text)
if len(find_list) == 0:
return "Please, text me a correct *DOI format*"
res = find_list[0]
api_call = 'http://opencitations.net/index/coci/api/v1/references/'
input = res
api_call = api_call+input
#call API
try:
contents = urllib.request.urlopen(api_call).read().decode('utf-8')
json_output = json.loads(contents)
if len(json_output) == 0:
return "No references found for: *"+ input + "*"
else:
str_to_return = str_to_return + "\n- *References:* "+str(len(json_output))+ "\n\n"
for c_elem in json_output:
#OCI
#str_to_return = str_to_return + "\n- *OCI:* "+"["+str(c_elem['oci'])+"]"+"(http://opencitations.net/index/coci/browser/ci/"+str(c_elem['oci'])+")"
#DOI
#str_to_return = str_to_return + "\n- *Cited DOI:* "+'https://www.doi.org/'+c_elem['cited']
str_to_return = str_to_return + '\n['+c_elem['cited']+'](https://www.doi.org/'+c_elem['cited']+')'
#WITH tinyurl for OCI
#lucinda_link = 'http://opencitations.net/index/coci/browser/ci/'+str(c_elem['oci'])
#tiny_url = urllib.request.urlopen('http://tinyurl.com/api-create.php?url='+lucinda_link).read().decode('utf-8')
#str_to_return = str_to_return + "\n["+c_elem['cited']+"]("+str(tiny_url)+")"
#Citation Creation date
#creation_str = ""
#list_date = c_elem['creation'].split("-")
#if len(list_date) > 0:
# creation_str = str(list_date[0])
# if len(list_date) > 1:
# creation_str = get_month_name(str(list_date[1])) +" "+ creation_str
# if len(list_date) > 2:
# creation_str = str(int(list_date[2])) + " "+ creation_str
#if creation_str != "":
# str_to_return = str_to_return + "\n- *Citation creation date:* "+creation_str
#Timespan
#tspan_str = ""
#result_y = re.search(r"(\d{1,})Y",c_elem['timespan'])
#if result_y:
# tspan_str += str(result_y.groups(0)[0]) + " Years"
# result_y = re.search(r"(\d{1,})M",c_elem['timespan'])
# if result_y:
# tspan_str += ", "+str(result_y.groups(0)[0]) + " Months"
# result_y = re.search(r"(\d{1,})D",c_elem['timespan'])
# if result_y:
# tspan_str += ", "+str(result_y.groups(0)[0]) + " Days"
#if tspan_str != "":
# str_to_return = str_to_return + "\n- *Timespan:* "+tspan_str
##New item
#str_to_return = str_to_return + "\n\n"
except:
return "Sorry, the connection went wrong!"
return str_to_return
def get_month_name(month_num):
monthDict={'01':'Jan', '02':'Feb', '03':'Mar', '04':'Apr', '05':'May', '06':'Jun', '07':'Jul', '08':'Aug', '09':'Sep', '10':'Oct', '11':'Nov', '12':'Dec'}
return monthDict[month_num]
| 39.90942 | 167 | 0.535361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,733 | 0.520472 |
0f0047c195a44a1e7096ffa7a8721ac9af656c82 | 225 | py | Python | WebFrameDocs/src/demo/fileStorage/script/genScaleMap.py | Bean-jun/LearnGuide | 30a8567b222d18b15d3e9027a435b5bfe640a046 | [
"MIT"
] | 1 | 2022-02-23T13:42:01.000Z | 2022-02-23T13:42:01.000Z | WebFrameDocs/src/demo/fileStorage/script/genScaleMap.py | Bean-jun/LearnGuide | 30a8567b222d18b15d3e9027a435b5bfe640a046 | [
"MIT"
] | null | null | null | WebFrameDocs/src/demo/fileStorage/script/genScaleMap.py | Bean-jun/LearnGuide | 30a8567b222d18b15d3e9027a435b5bfe640a046 | [
"MIT"
] | null | null | null | """
A-Z: 65-90
a-z: 97-122
"""
dic = {}
n = 0
for i in range(10):
dic[n] = str(i)
n += 1
for i in range(65, 91):
dic[n] = chr(i)
n += 1
for i in range(97, 123):
dic[n] = chr(i)
n += 1
print(dic)
| 9.782609 | 24 | 0.444444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.133333 |
0f00a88c0e78055394352e113ece75275cfd78f2 | 6,894 | py | Python | src/dagian/tests/lifetime_feature_generator.py | ianlini/dagian | 2ab5b574ba7bbccb204bd285b3d8e1a6200972ce | [
"MIT"
] | 11 | 2018-06-20T16:30:01.000Z | 2021-08-16T14:14:40.000Z | src/dagian/tests/lifetime_feature_generator.py | ianlini/dagian | 2ab5b574ba7bbccb204bd285b3d8e1a6200972ce | [
"MIT"
] | 29 | 2018-06-09T10:32:57.000Z | 2019-02-24T13:06:53.000Z | src/dagian/tests/lifetime_feature_generator.py | ianlini/dagian | 2ab5b574ba7bbccb204bd285b3d8e1a6200972ce | [
"MIT"
] | 3 | 2018-06-23T05:12:33.000Z | 2021-10-11T02:51:49.000Z | from __future__ import print_function, division, absolute_import, unicode_literals
from io import StringIO
import dagian
from dagian import Argument as A
from dagian.decorators import (
require,
will_generate,
)
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from sklearn.model_selection import train_test_split
class LifetimeFeatureGenerator(dagian.FeatureGenerator):
@will_generate('memory', 'data_df')
def gen_data_df(self, context):
csv = StringIO("""\
id,lifetime,tested_age,weight,height,gender,income
0, 68, 50, 60.1, 170.5, f, 22000
1, 59, 41, 90.4, 168.9, m, 19000
2, 52, 39, 46.2, 173.6, m, 70000
3, 68, 25, 93.9, 180.0, m, 1000000
4, 99, 68, 65.7, 157.6, f, 46000
5, 90, 81, 56.3, 170.2, f, 17000
""")
return {'data_df': pd.read_csv(csv, index_col='id')}
@require('data_df')
@will_generate('h5py', 'lifetime')
def gen_lifetime(self, context):
data_df = context['upstream_data']['data_df']
return {'lifetime': data_df['lifetime']}
@require('data_df')
@will_generate('h5py', ['weight', 'height', 'income'])
def gen_raw_data_features(self, context):
data_df = context['upstream_data']['data_df']
return data_df[['weight', 'height', 'income']]
@require('data_df')
@will_generate('memory', 'mem_raw_data')
def gen_mem_raw_data(self, context):
data_df = context['upstream_data']['data_df']
return {'mem_raw_data': data_df[['weight', 'height']].values}
@require('data_df')
@will_generate('h5py', 'man_raw_data', create_dataset_context='create_dataset_functions')
def gen_man_raw_data(self, context):
data_df = context['upstream_data']['data_df']
dset = context['create_dataset_functions']['man_raw_data'](shape=(data_df.shape[0], 2))
dset[...] = data_df[['weight', 'height']].values
@require('data_df')
@will_generate(
'h5py', 'man_sparse_raw_data', create_dataset_context='create_dataset_functions')
def gen_man_sparse_raw_data(self, context):
data_df = context['upstream_data']['data_df']
context['create_dataset_functions']['man_sparse_raw_data'](
data=csr_matrix(data_df[['weight', 'height']].values))
@require('data_df')
@will_generate('pandas_hdf', ['pd_weight', 'pd_height'])
def gen_raw_data_table(self, context):
data_df = context['upstream_data']['data_df']
result_df = data_df.loc[:, ['weight', 'height']]
result_df.rename(columns={'weight': 'pd_weight', 'height': 'pd_height'},
inplace=True)
return result_df
@require('data_df')
@will_generate('pandas_hdf', 'pd_raw_data', data_columns=True)
def gen_raw_data_df(self, context):
data_df = context['upstream_data']['data_df']
return {'pd_raw_data': data_df[['weight', 'height']]}
@require('pd_raw_data')
@will_generate('pandas_hdf', 'pd_raw_data_append', append_context='append_functions')
def gen_raw_data_append_df(self, context):
df = context['upstream_data']['pd_raw_data'][()]
context['append_functions']['pd_raw_data_append'](df.iloc[:3])
context['append_functions']['pd_raw_data_append'](df.iloc[3:])
@require('data_df')
@will_generate('h5py', 'BMI')
def gen_bmi(self, context):
data_df = context['upstream_data']['data_df']
bmi = data_df['weight'] / ((data_df['height'] / 100) ** 2)
return {'BMI': bmi}
@require('{dividend}')
@require('{divisor}')
@will_generate('h5py', 'division')
def gen_division(self, context, dividend, divisor='height'):
upstream_data = context['upstream_data']
division_result = upstream_data['{dividend}'][()] / upstream_data['{divisor}'][()]
return {'division': division_result}
@require('division', 'partial_division', dividend=A('dividend'), divisor=A('divisor1'))
@require('{divisor2}', 'divisor2')
@will_generate('h5py', 'division_2_divisor')
def gen_division_2_divisor(self, context, dividend, divisor1, divisor2):
upstream_data = context['upstream_data']
division_result = upstream_data['partial_division'][()] / upstream_data['divisor2'][()]
return {'division_2_divisor': division_result}
@require('division', dividend=A('dividend', lambda x: 'pd_' + x), divisor=A('divisor1'))
@require(A('divisor2'))
@will_generate('h5py', 'division_pd_2_divisor')
def gen_division_pd_2_divisor(self, context, dividend, divisor1, divisor2):
upstream_data = context['upstream_data']
division_result = upstream_data['division'][()] / upstream_data['divisor2'][()]
return {'division_pd_2_divisor': division_result}
@require(A('dividend'))
@require(A('divisor'))
@will_generate('h5py', 'recursive_division')
def gen_recursive_division(self, context, dividend, divisor):
upstream_data = context['upstream_data']
division_result = upstream_data['dividend'][()] / upstream_data['divisor'][()]
return {'recursive_division': division_result}
@require(A('sequence'))
@will_generate('h5py', 'sequential_division')
def gen_sequential_division(self, context, sequence):
assert sequence
upstream_data = context['upstream_data']
division_result = upstream_data['sequence'][0][()]
for data in upstream_data['sequence'][1:]:
division_result /= data[()]
return {'sequential_division': division_result}
@require('data_df')
@will_generate('pickle', 'train_test_split')
def gen_train_test_split(self, context):
data_df = context['upstream_data']['data_df']
train_id, test_id = train_test_split(
data_df.index, test_size=0.5, random_state=0)
return {'train_test_split': (train_id, test_id)}
@require('data_df')
@require('train_test_split')
@will_generate('h5py', 'is_in_test_set')
def gen_is_in_test_set(self, context):
upstream_data = context['upstream_data']
data_df = upstream_data['data_df']
_, test_id = upstream_data['train_test_split']
is_in_test_set = data_df.index.isin(test_id)
sparse_is_in_test_set = csr_matrix(is_in_test_set[:, np.newaxis])
return {'is_in_test_set': sparse_is_in_test_set}
@require('data_df')
@will_generate('h5py', 'nan', allow_nan=True)
def gen_nan(self, context):
nan = np.full(
context['upstream_data']['data_df'].shape[0],
fill_value=np.nan, dtype=np.float32)
return {'nan': nan}
@require('pd_raw_data')
@will_generate('h5py', 'light_weight')
def gen_light_weight(self, context):
raw_data = context['upstream_data']['pd_raw_data']
light_weight = raw_data.select(columns=['weight'], where="weight < 60")
return {'light_weight': light_weight.values}
| 41.035714 | 95 | 0.66333 | 6,542 | 0.948941 | 0 | 0 | 6,372 | 0.924282 | 0 | 0 | 2,161 | 0.313461 |
0f0216b386fed492d52639a4b739d1af569a3fae | 2,198 | py | Python | spyre/spyre/spyrelets/peaktrack_spyrelet.py | zhong-lab/code | 068ca3df58c3804fcc858f26ac5b26106e1d0cb0 | [
"BSD-2-Clause"
] | 1 | 2022-03-27T07:47:19.000Z | 2022-03-27T07:47:19.000Z | peaktrack_spyrelet.py | zhong-lab/code | 068ca3df58c3804fcc858f26ac5b26106e1d0cb0 | [
"BSD-2-Clause"
] | null | null | null | peaktrack_spyrelet.py | zhong-lab/code | 068ca3df58c3804fcc858f26ac5b26106e1d0cb0 | [
"BSD-2-Clause"
] | 4 | 2019-11-08T22:39:04.000Z | 2021-11-05T02:39:37.000Z | import numpy as np
import pyqtgraph as pg
import matplotlib.pyplot as plt
import csv
import sys
from PyQt5.Qsci import QsciScintilla, QsciLexerPython
from PyQt5.QtWidgets import QPushButton, QTextEdit, QVBoxLayout
import time
import random
import os
from spyre import Spyrelet, Task, Element
from spyre.widgets.task import TaskWidget
from spyre.plotting import HeatmapPlotWidget,LinePlotWidget
from spyre.widgets.rangespace import Rangespace
from spyre.widgets.param_widget import ParamWidget
from spyre.widgets.repository_widget import RepositoryWidget
from lantz.drivers.keysight import Arbseq_Class
from lantz.drivers.keysight.seqbuild import SeqBuild
from lantz import Q_
from lantz.drivers.ando.aq6317b import AQ6317B
from lantz.drivers.artisan.ldt5910b import LDT5910B
class filter(Spyrelet):
# delete if not using power meter
requires = {
'osa':AQ6317B,
'tc':LDT5910B
}
@Task()
def track(self):
# unpack the parameters
params=self.parameters.widget.get()
filename=params['Filename']
tracktime=params['Track time'].magnitude #s
sleep=params['Sleep Interval'].magnitude #s
# read peak position for a while
start=time.time()
t=start
with open(filename+'.csv','w',newline='') as csvfile:
writer=csv.writer(
csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
while (t-start)<tracktime:
pk,pwr=self.osa.read_marker
temp=self.tc.display('T')
t=time.time()
print('t: '+str(t-start)+', '+str(pk)+', '+str(temp))
writer.writerow([t-start,pk,temp])
time.sleep(sleep)
return
@Element(name='Params')
def parameters(self):
params = [
('Sleep Interval',{'type':int,'default':10,'units':'s'}),
('Track time', {'type': int, 'default': 1200,'units':'s'}),
('Filename', {'type': str, 'default':'Q:\\06.02.21_ff\\track1'})
]
w = ParamWidget(params)
return w
| 32.323529 | 73 | 0.61465 | 1,392 | 0.633303 | 0 | 0 | 1,242 | 0.565059 | 0 | 0 | 313 | 0.142402 |
0f02200fde2e322e2f135a475e57f32165299b7f | 4,592 | py | Python | cdpybio/general.py | cdeboever3/cdpybio | 893010dc42e4c324af6cdd1c93ca415466fab0cf | [
"MIT"
] | 2 | 2016-09-09T11:54:03.000Z | 2021-12-09T16:12:23.000Z | cdpybio/general.py | cdeboever3/cdpybio | 893010dc42e4c324af6cdd1c93ca415466fab0cf | [
"MIT"
] | null | null | null | cdpybio/general.py | cdeboever3/cdpybio | 893010dc42e4c324af6cdd1c93ca415466fab0cf | [
"MIT"
] | 2 | 2022-01-28T20:05:05.000Z | 2022-02-01T18:04:43.000Z | import re
import numpy as np
import pandas as pd
import scipy.stats as stats
R_REGEX = re.compile('(.*):(.*)-(.*)')
R_REGEX_STRAND = re.compile('(.*):(.*)-(.*):(.*)')
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
# https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
for i in range(0, len(l), n):
yield l[i:i + n]
def estimate_allele_frequency(ac, an, a=1, b=100):
"""
Make sample (or other) names.
Parameters:
-----------
ac : array-like
Array-like object with the observed allele counts for each variant. If
ac is a pandas Series, the output dataframe will have the same index as
ac.
an : array-like
Array-like object with the number of haplotypes that were genotyped.
a : float
Parameter for prior distribution beta(a, b).
b : float
Parameter for prior distribution beta(a, b).
Returns
-------
out : pandas.DataFrame
Pandas dataframe with allele frequency estimate
"""
# Credible interval is 95% highest posterior density
td = dict(zip(['ci_lower', 'ci_upper'],
stats.beta(a + ac, b + an - ac).interval(0.95)))
td['af'] = (a + ac) / (a + b + an)
td['af_mle'] = np.array(ac).astype(float) / np.array(an)
out = pd.DataFrame(td)[['af_mle', 'af', 'ci_lower', 'ci_upper']]
if type(ac) == pd.Series:
out.index = ac.index
return(out)
def transform_standard_normal(df):
"""Transform a series or the rows of a dataframe to the values of a standard
normal based on rank."""
import pandas as pd
import scipy.stats as stats
if type(df) == pd.core.frame.DataFrame:
gc_ranks = df.rank(axis=1)
gc_ranks = gc_ranks / (gc_ranks.shape[1] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.DataFrame(std_norm, index=gc_ranks.index,
columns=gc_ranks.columns)
elif type(df) == pd.core.series.Series:
gc_ranks = df.rank()
gc_ranks = gc_ranks / (gc_ranks.shape[0] + 1)
std_norm = stats.norm.ppf(gc_ranks)
std_norm = pd.Series(std_norm, index=df.index)
return std_norm
def read_gzipped_text_url(url):
"""Read a gzipped text file from a URL and return
contents as a string."""
import urllib2
import zlib
from StringIO import StringIO
opener = urllib2.build_opener()
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
respond = opener.open(request)
compressedData = respond.read()
respond.close()
opener.close()
compressedDataBuf = StringIO(compressedData)
d = zlib.decompressobj(16+zlib.MAX_WBITS)
buffer = compressedDataBuf.read(1024)
#saveFile = open('/tmp/test.txt', "wb")
s = []
while buffer:
s.append(d.decompress(buffer))
buffer = compressedDataBuf.read(1024)
s = ''.join(s)
return s
def parse_region(region):
"""
Parse region of type chr1:10-20 or chr1:10-20:+
Parameters:
-----------
region : str
Region of type chr1:10-20 or chr1:10-20:+.
Returns
-------
groups : tuple
Tuple of groups from regex e.g. (chr1, 10, 20) or (chr1, 10, 20, +).
"""
m = R_REGEX_STRAND.search(region)
if not m:
m = R_REGEX.search(region)
if m:
groups = m.groups()
return groups
else:
return None
def _sample_names(files, kwargs):
"""
Make sample (or other) names.
Parameters:
-----------
files : list of string
Typically a list of file paths although could be any list of strings
that you want to make names for. If neither names nor define_sample_name
are provided, then files is returned as is.
kwargs : dict
kwargs from another function. Can include the following keys with
appropriate arguments.
names : list of strings
Names to use. Overrides define_sample_name if provided.
define_sample_name : function that takes string as input
Function mapping string to name. For instance, you may have a sample
name in a file path and use a regex to extract it.
"""
if 'define_sample_name' not in kwargs.keys():
define_sample_name = lambda x: x
else:
define_sample_name = kwargs['define_sample_name']
if 'names' in kwargs.keys():
names = kwargs['names']
else:
names = [define_sample_name(f) for f in files]
assert len(names) == len(files)
return names
| 29.248408 | 97 | 0.617814 | 0 | 0 | 224 | 0.04878 | 0 | 0 | 0 | 0 | 2,225 | 0.484538 |
0f026f96d7b8eeca00baed4c3da006c965de1fd0 | 4,351 | py | Python | docs/doxygen/log.py | tkrupa-intel/openvino | 8c0ff5d9065486d23901a9c27debd303661f465f | [
"Apache-2.0"
] | 1 | 2022-01-19T15:36:45.000Z | 2022-01-19T15:36:45.000Z | docs/doxygen/log.py | tkrupa-intel/openvino | 8c0ff5d9065486d23901a9c27debd303661f465f | [
"Apache-2.0"
] | 22 | 2021-02-03T12:41:51.000Z | 2022-02-21T13:04:48.000Z | docs/doxygen/log.py | tkrupa-intel/openvino | 8c0ff5d9065486d23901a9c27debd303661f465f | [
"Apache-2.0"
] | null | null | null | # ******************************************************************************
# Copyright 2017-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import argparse
import os
import re
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--log', type=str, required=True, default=None, help='Path to doxygen log file')
parser.add_argument('--ignore-list', type=str, required=False,
default=os.path.join(os.path.abspath(os.path.dirname(__file__)),'doxygen-ignore.txt'),
help='Path to doxygen ignore list')
parser.add_argument('--strip', type=str, required=False, default=os.path.abspath('../../'),
help='Strip from warning paths')
parser.add_argument('--include_omz', type=bool, required=False, default=False,
help='Include link check for omz docs')
parser.add_argument('--include_wb', type=bool, required=False, default=False,
help='Include link check for workbench docs')
parser.add_argument('--include_pot', type=bool, required=False, default=False,
help='Include link check for pot docs')
parser.add_argument('--include_gst', type=bool, required=False, default=False,
help='Include link check for gst docs')
return parser.parse_args()
def strip_path(path, strip):
"""Strip `path` components ends on `strip`
"""
path = path.replace('\\', '/')
if path.endswith('.md') or path.endswith('.tag'):
strip = os.path.join(strip, 'build/docs').replace('\\', '/') + '/'
else:
strip = strip.replace('\\', '/') + '/'
return path.split(strip)[-1]
def is_excluded_link(warning, exclude_links):
if 'unable to resolve reference to' in warning:
ref = re.findall(r"'(.*?)'", warning)
if ref:
ref = ref[0]
for link in exclude_links:
reg = re.compile(link)
if re.match(reg, ref):
return True
return False
def parse(log, ignore_list, strip, include_omz=False, include_wb=False, include_pot=False, include_gst=False):
found_errors = []
exclude_links = {'omz': r'.*?omz_.*?', 'wb': r'.*?workbench_.*?',
'pot': r'.*?pot_.*?', 'gst': r'.*?gst_.*?'}
if include_omz:
del exclude_links['omz']
if include_wb:
del exclude_links['wb']
if include_pot:
del exclude_links['pot']
if include_gst:
del exclude_links['gst']
exclude_links = exclude_links.values()
with open(ignore_list, 'r') as f:
ignore_list = f.read().splitlines()
with open(log, 'r') as f:
log = f.read().splitlines()
for line in log:
if 'warning:' in line:
path, warning = list(map(str.strip, line.split('warning:')))
path, line_num = path[:-1].rsplit(':', 1)
path = strip_path(path, strip)
if path in ignore_list or is_excluded_link(warning, exclude_links):
continue
else:
found_errors.append('{path} {warning} line: {line_num}'.format(path=path,
warning=warning,
line_num=line_num))
if found_errors:
print('\n'.join(found_errors))
exit(1)
def main():
args = parse_arguments()
parse(args.log,
args.ignore_list,
args.strip,
include_omz=args.include_omz,
include_wb=args.include_wb,
include_pot=args.include_pot,
include_gst=args.include_gst)
if __name__ == '__main__':
main()
| 39.198198 | 110 | 0.568605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,386 | 0.318547 |
0f0332fe549cc56074e3fec02ab90c04fd4ee657 | 4,735 | py | Python | rec_to_nwb/processing/nwb/components/electrodes/extension/fl_electrode_extension_manager.py | asilvaalex4/rec_to_nwb | 8f7d9535fa25002bf821d4f04aacf1d722ab9601 | [
"Apache-2.0"
] | 1 | 2021-01-20T00:26:30.000Z | 2021-01-20T00:26:30.000Z | rec_to_nwb/processing/nwb/components/electrodes/extension/fl_electrode_extension_manager.py | asilvaalex4/rec_to_nwb | 8f7d9535fa25002bf821d4f04aacf1d722ab9601 | [
"Apache-2.0"
] | 12 | 2020-11-13T01:36:32.000Z | 2022-01-23T20:35:55.000Z | rec_to_nwb/processing/nwb/components/electrodes/extension/fl_electrode_extension_manager.py | asilvaalex4/rec_to_nwb | 8f7d9535fa25002bf821d4f04aacf1d722ab9601 | [
"Apache-2.0"
] | 3 | 2020-10-20T06:52:45.000Z | 2021-07-06T23:00:53.000Z | import copy
import logging.config
import os
from rec_to_nwb.processing.exceptions.not_compatible_metadata import NotCompatibleMetadata
from rec_to_nwb.processing.header.module.header import Header
from rec_to_nwb.processing.nwb.components.electrodes.extension.fl_electrode_extension import FlElectrodeExtension
from rec_to_nwb.processing.nwb.components.electrodes.extension.fl_electrode_extension_builder import \
FlElectrodeExtensionBuilder
from rec_to_nwb.processing.nwb.components.electrodes.extension.fl_electrode_extension_factory import \
FlElectrodeExtensionFactory
from rec_to_nwb.processing.tools.beartype.beartype import beartype
path = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(fname=str(path) + '/../../../../../logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class FlElectrodeExtensionManager:
@beartype
def __init__(self, probes_metadata: list, metadata: dict, header: Header):
self.probes_metadata = probes_metadata
self.metadata = metadata
self.header = header
@beartype
def get_fl_electrodes_extension(self, electrodes_valid_map: list) -> FlElectrodeExtension:
probes_metadata = self.probes_metadata
electrode_groups_metadata = self.metadata['electrode_groups']
ntrode_metadata = self.metadata['ntrode_electrode_group_channel_map']
spike_n_trodes = self.header.configuration.spike_configuration.spike_n_trodes
rel = FlElectrodeExtensionFactory.create_rel(
probes_metadata=probes_metadata,
electrode_groups_metadata=electrode_groups_metadata
)
hw_chan = FlElectrodeExtensionFactory.create_hw_chan(
spike_n_trodes=spike_n_trodes
)
ntrode_id = FlElectrodeExtensionFactory.create_ntrode_id(
ntrode_metadata=ntrode_metadata
)
channel_id = FlElectrodeExtensionFactory.create_channel_id(
ntrode_metadata=ntrode_metadata
)
bad_channels = FlElectrodeExtensionFactory.create_bad_channels(
ntrode_metadata=ntrode_metadata
)
probe_shank = FlElectrodeExtensionFactory.create_probe_shank(
probes_metadata=probes_metadata,
electrode_groups_metadata=electrode_groups_metadata
)
probe_electrode = FlElectrodeExtensionFactory.create_probe_electrode(
probes_metadata=probes_metadata,
electrode_groups_metadata=electrode_groups_metadata
)
ref_elect_id = FlElectrodeExtensionFactory.create_ref_elect_id(
spike_n_trodes=spike_n_trodes,
ntrode_metadata=ntrode_metadata
)
self.__validate_extension_length(
electrodes_valid_map,
rel['rel_x'],
rel['rel_y'],
rel['rel_z'],
hw_chan,
ntrode_id,
channel_id,
bad_channels,
probe_shank,
probe_electrode,
ref_elect_id
)
return FlElectrodeExtensionBuilder.build(
rel_x=self.__filter_extension_list_with_electrodes_valid_map(electrodes_valid_map, rel['rel_x']),
rel_y=self.__filter_extension_list_with_electrodes_valid_map(electrodes_valid_map, rel['rel_y']),
rel_z=self.__filter_extension_list_with_electrodes_valid_map(electrodes_valid_map, rel['rel_z']),
hw_chan=self.__filter_extension_list_with_electrodes_valid_map(electrodes_valid_map, hw_chan),
ntrode_id=self.__filter_extension_list_with_electrodes_valid_map(electrodes_valid_map, ntrode_id),
channel_id=self.__filter_extension_list_with_electrodes_valid_map(electrodes_valid_map, channel_id),
bad_channels=self.__filter_extension_list_with_electrodes_valid_map(electrodes_valid_map, bad_channels),
probe_shank=self.__filter_extension_list_with_electrodes_valid_map(electrodes_valid_map, probe_shank),
probe_electrode=self.__filter_extension_list_with_electrodes_valid_map(electrodes_valid_map, probe_electrode),
ref_elect_id=self.__filter_extension_list_with_electrodes_valid_map(electrodes_valid_map, ref_elect_id),
)
@staticmethod
def __validate_extension_length(*args):
if len(set(map(len, args))) != 1:
message = 'Electrodes metadata are not compatible!'
logger.error(message)
raise NotCompatibleMetadata(message)
@staticmethod
def __filter_extension_list_with_electrodes_valid_map(electrodes_valid_map, extension):
tmp_electrodes_valid_map = copy.deepcopy(electrodes_valid_map)
return [value for value in extension if tmp_electrodes_valid_map.pop(0)]
| 46.421569 | 122 | 0.741922 | 3,886 | 0.820697 | 0 | 0 | 3,828 | 0.808448 | 0 | 0 | 167 | 0.035269 |
0f03c437607dd785e33ff9e71aa3dbb48d46d5a4 | 1,314 | py | Python | utils/prediction.py | ZhengLiangliang1996/Speech-Recogniton-Tool-Box | 0a2353d990e3f0a3057a747ff52fd3a066d4289d | [
"MIT"
] | null | null | null | utils/prediction.py | ZhengLiangliang1996/Speech-Recogniton-Tool-Box | 0a2353d990e3f0a3057a747ff52fd3a066d4289d | [
"MIT"
] | null | null | null | utils/prediction.py | ZhengLiangliang1996/Speech-Recogniton-Tool-Box | 0a2353d990e3f0a3057a747ff52fd3a066d4289d | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""
Author: LiangLiang ZHENG
Date:
File Description
"""
import sys
import time
import os
sys.path.append('..')
from __future__ import print_function
import sys
import argparse
from keras import backend as K
from utils.cha_level_helper import output_sequence
import numpy as np
#TODO: still need to be tested
def get_predictions_then_print(data, label, mode, model, model_path):
""" Print a model's decoded predictions
Params:
index (int): dataset index
mode: which will get the dataset
model: model will be used
model_path (str): model checkpoint
"""
data_len = len(data)
for i in range(data_len):
# Obtain and decode the acoustic model's predictions
model.load_weights(model_path)
prediction = model.predict(data[i])
output_length = [model.output_length(data[i].shape[1])]
#why + 1?
pred_ints = (K.eval(K.ctc_decode(
prediction, output_length)[0][0])).flatten().tolist()
# Play the audio file, and display the true and predicted transcriptions
print('-'*80)
print('Ground Truth:\n' + '\n' + output_sequence(label[i]))
print('-'*80)
print('Predicted seq:\n' + '\n' + ''.join(output_sequence(pred_ints)))
print('-'*80)
| 28.565217 | 80 | 0.649924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 510 | 0.388128 |
0f043fd8060ea318b76a0b1d439aef5060a3a833 | 14,408 | py | Python | smq/plot.py | x75/smq | 17fc1219b3f34f6e6035d261021b8e772b7a287d | [
"MIT"
] | null | null | null | smq/plot.py | x75/smq | 17fc1219b3f34f6e6035d261021b8e772b7a287d | [
"MIT"
] | null | null | null | smq/plot.py | x75/smq | 17fc1219b3f34f6e6035d261021b8e772b7a287d | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as pl
import pandas as pd
import seaborn as sns
import smq.logging as log
# check pandas, seaborne
# FIXME: fix hardcoded tablenames
from smq.utils import set_attr_from_dict
def get_data_from_item_log(items):
tbl_key = items[0].name
# print "%s.run: tbl_key = %s" % (self.__class__.__name__, tbl_key)
print "plot.get_data_from_item_log: tbl_key = %s" % (tbl_key)
df = log.log_lognodes[tbl_key]
data = df.values.T
columns = df.columns
return tbl_key, df, data, columns
class Plot(object):
def __init__(self, conf):
self.conf = conf
set_attr_from_dict(self, conf)
def run(self, items):
self.make_plot(items)
def make_plot(self, items):
print "%s.make_plot: implement me" % (self.__class__.__name__)
class PlotTimeseries(Plot):
def __init__(self, conf):
Plot.__init__(self, conf)
def run(self, items):
# how many axes / plotitems
# configure subplotgrid
tbl_key = items[0].name
# tbl_key = items[0].conf["name"]
print "tbl_key", tbl_key
df = log.log_lognodes[tbl_key]
# data = log.h5file.root.item_pm_data.read()
# data = log.log_lognodes["pm"].values.T
# columns = log.log_lognodes["pm"].columns
data = df.values.T
columns = df.columns
# print "data.shape", data.shape
pl.ioff()
# create figure
fig = pl.figure()
fig.suptitle("Experiment %s" % (log.h5file.title))
# fig.suptitle("Experiment %s" % (self.title))
for i in range(data.shape[0]): # loop over data items
ax1 = pl.subplot2grid((data.shape[0], 2), (i, 0))
ax1 = self.make_plot_timeseries(ax1, data[i], columns[i])
ax2 = pl.subplot2grid((data.shape[0], 2), (i, 1)) # second plotgrid column
ax2 = self.make_plot_histogram(ax2, data[i], columns[i])
# global for plot, use last axis
ax1.set_xlabel("t [steps]")
ax2.set_xlabel("counts")
# fig.show() # this doesn't work
pl.show()
def make_plot_timeseries(self, ax, data, columns):
ax.plot(data, "k-", alpha=0.5)
# print "columns[i]", type(columns[i])
ax.legend(["%s" % (columns)])
return ax
def make_plot_histogram(self, ax, data, columns):
ax.hist(data, bins=20, orientation="horizontal")
ax.legend(["%s" % (columns)])
# pl.hist(data.T, bins=20, orientation="horizontal")
return ax
# def make_plot(self, items):
# # print "log.h5file", log.h5file
# # print "dir(log.h5file)", dir(log.h5file)
# # print "blub", type(log.h5file.root.item_pm_data)
# # for item in log.h5file.root.item_pm_data:
# # print type(item)
# # print "log.h5file.root.item_pm_data", log.h5file.root.item_pm_data.read()
# # df = log.log_lognodes["pm"]
# # g = sns.FacetGrid(df, col=list(df.columns))
# # g.map(pl.plot, )
# # print "data.shape", data.shape
# for i,datum in enumerate(data):
# pl.subplot(data.shape[0], 2, (i*2)+1)
# # pl.title(columns[i])
# # sns.timeseries.tsplot(datum)
# pl.plot(datum, "k-", alpha=0.5)
# # print "columns[i]", type(columns[i])
# pl.legend(["%s" % (columns[i])])
# pl.xlabel("t [steps]")
# # pl.legend(["acc_p", "vel_e", "vel_", "pos_", "vel_goal", "dist_goal", "acc_pred", "m"])
# # pl.subplot(122)
# for i,datum in enumerate(data):
# pl.subplot(data.shape[0], 2, (i*2)+2)
# # print "dataum", datum
# pl.hist(datum, bins=20, orientation="horizontal")
# pl.legend(["%s" % (columns[i])])
# # pl.hist(data.T, bins=20, orientation="horizontal")
# pl.xlabel("counts")
# pl.show()
class PlotTimeseries2D(Plot):
def __init__(self, conf):
Plot.__init__(self, conf)
def run(self, items):
# FIXME: assuming len(items) == 1, which might be appropriate depending on the experiment
if items[0].dim_s_motor > 2:
print "more than two dimensions in data, plot is going to be incomplete"
return
tbl_key = items[0].name
# tbl_key = items[0].conf["name"]
print "%s.run: tbl_key = %s" % (self.__class__.__name__, tbl_key)
df = log.log_lognodes[tbl_key]
data = df.values.T
columns = df.columns
# print "columns", columns
# transform df to new df
if hasattr(self, "cols"):
cols = self.cols
else:
cols = ["vel%d" % (i) for i in range(items[0].dim_s_motor)]
cols += ["acc_pred%d" % (i) for i in range(items[0].dim_s_motor)]
df2 = df[cols]
# print df
# goal columns
if not hasattr(self, "cols_goal_base"):
setattr(self, "cols_goal_base", "vel_goal")
print "PlotTimeseries2D", self.cols, self.cols_goal_base
pl.ioff() #
goal_col_1 = "%s%d" % (self.cols_goal_base, 0)
goal_col_2 = "%s%d" % (self.cols_goal_base, 1)
if self.type == "pyplot":
# pl.plot(df["vel0"], df["vel1"], "ko")
# print df["vel0"].values.dtype
pl.subplot(131)
pl.title("state distribution and goal")
# print df["vel_goal0"].values, df["vel_goal1"].values
# pl.hist2d(df["vel0"].values, df["vel1"].values, bins=20)
pl.plot(df["%s%d" % (self.cols_goal_base, 0)].values[0],
df["%s%d" % (self.cols_goal_base, 1)].values[0], "ro", markersize=16, alpha=0.5)
pl.hexbin(df[self.cols[0]].values, df[self.cols[1]].values, gridsize = 30, marginals=True)
pl.plot(df[self.cols[0]].values, df[self.cols[1]].values, "k-", alpha=0.25, linewidth=1)
# pl.xlim((-1.2, 1.2))
# pl.ylim((-1.2, 1.2))
pl.grid()
pl.colorbar()
pl.subplot(132)
pl.title("prediction distribution")
pl.hexbin(df["acc_pred0"].values, df["acc_pred1"].values, gridsize = 30, marginals=True)
pl.xlim((-1.2, 1.2))
pl.ylim((-1.2, 1.2))
pl.colorbar()
pl.subplot(133)
pl.title("goal distance distribution")
pl.hist(df["dist_goal0"].values)
pl.show()
elif self.type == "seaborn":
print "goal", df[goal_col_1][0], df[goal_col_2][0]
ax = sns.jointplot(x=self.cols[0], y=self.cols[1], data=df)
print "ax", dir(ax)
# plot goal
print "df[goal_col_1][0], df[goal_col_2][0]", self.cols_goal_base, goal_col_1, goal_col_2, df[goal_col_1][0], df[goal_col_2][0]
ax.ax_joint.plot(df[goal_col_1][0], df[goal_col_2][0], "ro", alpha=0.5)
# pl.plot(df["vel_goal0"], df["vel_goal1"], "ro")
pl.show()
class PlotTimeseriesND(Plot):
"""Plot a hexbin scattermatrix for N-dim data"""
def __init__(self, conf):
Plot.__init__(self, conf)
def run(self, items):
pl.ioff()
tbl_key, df, data, columns = get_data_from_item_log(items)
# transform df to new df
if hasattr(self, "cols"):
cols = self.cols
else:
cols = ["vel%d" % (i) for i in range(items[0].dim_s_motor)]
cols += ["acc_pred%d" % (i) for i in range(items[0].dim_s_motor)]
df2 = df[cols]
print df2
# goal columns
if not hasattr(self, "cols_goal_base"):
setattr(self, "cols_goal_base", "vel_goal")
# pp = sns.pairplot(df2)
# for i in range(3):
# for j in range(3): # 1, 2; 0, 2; 0, 1
# if i == j:
# continue
# pp.axes[i,j].plot(df["vel_goal%d" % i][0], df["vel_goal%d" % j][0], "ro", alpha=0.5)
# # print pp.axes
# # for axset in pp.axes:
# # print "a", axset
# # for
# # print "dir(pp)", dir(pp)
# pl.show()
g = sns.PairGrid(df2)
g.map_diag(pl.hist)
g.map_offdiag(pl.hexbin, cmap="gray", gridsize=30, bins="log");
# print "dir(g)", dir(g)
# print g.diag_axes
# print g.axes
for i in range(items[0].dim_s_motor):
for j in range(items[0].dim_s_motor): # 1, 2; 0, 2; 0, 1
if i == j:
continue
# column gives x axis, row gives y axis, thus need to reverse the selection for plotting goal
g.axes[i,j].plot(df["%s%d" % (self.cols_goal_base, j)], df["%s%d" % (self.cols_goal_base, i)], "ro", alpha=0.5)
pl.show()
pl.hist(df["dist_goal0"].values, bins=20)
pl.show()
class PlotExplautoSimplearm(Plot):
def __init__(self, conf):
Plot.__init__(self, conf)
def make_plot(self, items):
print "items", items
pl.ioff()
tbl_key, df, data, columns = get_data_from_item_log(items)
motors = df[["j_ang%d" % i for i in range(items[0].dim_s_motor)]]
goals = df[["j_ang_goal%d" % i for i in range(items[0].dim_s_motor)]]
# print "df", motors, columns #, df
fig = pl.figure()
for i,item in enumerate(items):
# fig.suptitle("Experiment %s" % (log.h5file.title))
ax = fig.add_subplot(len(items), 1, i+1)
for m in motors.values:
# print "m", m
item.env.env.plot_arm(ax = ax, m = m)
print "plot goal", goals.values[0]
item.env.env.plot_arm(ax = ax, m = goals.values[0], c="r")
pl.show()
################################################################################
class PlotTimeseries2(Plot):
def __init__(self, conf):
Plot.__init__(self, conf)
def run(self, items):
# how many axes / plotitems
# configure subplotgrid
tbl_key = items[0].name
# tbl_key = items[0].conf["name"]
print "tbl_key", tbl_key
df = log.log_lognodes[tbl_key]
# data = log.h5file.root.item_pm_data.read()
# data = log.log_lognodes["pm"].values.T
# columns = log.log_lognodes["pm"].columns
data = df.values.T
columns = df.columns
# print "data.shape", data.shape
pl.ioff()
# create figure
fig = pl.figure()
fig.suptitle("Experiment %s, module %s" % (self.title, tbl_key))
for i in range(data.shape[0]): # loop over data items
ax1 = pl.subplot2grid((data.shape[0], 2), (i, 0))
ax1 = self.make_plot_timeseries(ax1, data[i], columns[i])
ax2 = pl.subplot2grid((data.shape[0], 2), (i, 1)) # second plotgrid column
ax2 = self.make_plot_histogram(ax2, data[i], columns[i])
# global for plot, use last axis
ax1.set_xlabel("t [steps]")
ax2.set_xlabel("counts")
# fig.show() # this doesn't work
pl.show()
def make_plot_timeseries(self, ax, data, columns):
ax.plot(data, "k-", alpha=0.5)
# print "columns[i]", type(columns[i])
ax.legend(["%s" % (columns)])
return ax
def make_plot_histogram(self, ax, data, columns):
ax.hist(data, bins=20, orientation="horizontal")
ax.legend(["%s" % (columns)])
# pl.hist(data.T, bins=20, orientation="horizontal")
return ax
class PlotTimeseriesNDrealtimeseries(Plot):
"""Plot a hexbin scattermatrix for N-dim data"""
def __init__(self, conf):
Plot.__init__(self, conf)
def run(self, items):
pl.ioff()
tbl_key, df, data, columns = get_data_from_item_log(items)
# transform df to new df
if hasattr(self, "cols"):
cols = self.cols
else:
cols = ["vel%d" % (i) for i in range(items[0].dim_s_motor)]
cols += ["acc_pred%d" % (i) for i in range(items[0].dim_s_motor)]
# FIXME: make generic
numplots = 1
cols_ext = []
for i in range(items[0].dim_s_extero):
colname = "pos_goal%d" % i
if colname in columns:
cols_ext += [colname]
numplots = 2
colname = "ee_pos%d" % i
if colname in columns:
cols_ext += [colname]
cols_error_prop = []
colnames_error_prop = ["avgerror_prop", "davgerror_prop", "avgderror_prop"]
for ec in colnames_error_prop:
if ec in columns:
# print "lalala", err_colname
cols_error_prop.append(ec)
cols_error_ext = []
colnames_error_ext = ["avgerror_ext", "davgerror_ext", "avgderror_ext"]
for ec in colnames_error_ext:
if ec in columns:
# print "lalala", err_colname
cols_error_ext.append(ec)
df2 = df[cols]
print df2
# goal columns
if not hasattr(self, "cols_goal_base"):
setattr(self, "cols_goal_base", "vel_goal")
pl.ioff()
# create figure
fig = pl.figure()
fig.suptitle("Experiment %s, module %s" % (self.title, tbl_key))
if numplots == 1:
pl.subplot(211)
else:
pl.subplot(411)
pl.title("Proprioceptive space")
x1 = df[cols].values
x2 = df[self.cols_goals].values
# print "x1.shape", x1.shape
x1plot = x1 + np.arange(x1.shape[1])
x2plot = x2 + np.arange(x2.shape[1])
print "x1plot.shape", x1plot.shape
pl.plot(x1plot)
pl.plot(x2plot)
if numplots == 1:
pl.subplot(212)
else: # numplots == 2:
pl.subplot(412)
pl.plot(df[cols_error_prop])
if numplots == 2:
pl.subplot(413)
pl.title("Exteroceptive space")
pl.plot(df[cols_ext])
print "cols_error_ext", cols_error_ext
pl.subplot(414)
pl.plot(df[cols_error_ext])
pl.show()
| 33.981132 | 139 | 0.530261 | 13,750 | 0.954331 | 0 | 0 | 0 | 0 | 0 | 0 | 4,821 | 0.334606 |
0f045c6ae18a61a369eced501af84eaf8bea2c34 | 642 | py | Python | test/model/test_pddl_action_representation.py | DLR-RM/rafcon-task-planner-plugin | 9d004c76aa6f54c992a2f3f00b9dd98f9fb4e498 | [
"BSD-3-Clause"
] | 1 | 2020-05-21T17:08:02.000Z | 2020-05-21T17:08:02.000Z | test/model/test_pddl_action_representation.py | DLR-RM/rafcon-task-planner-plugin | 9d004c76aa6f54c992a2f3f00b9dd98f9fb4e498 | [
"BSD-3-Clause"
] | null | null | null | test/model/test_pddl_action_representation.py | DLR-RM/rafcon-task-planner-plugin | 9d004c76aa6f54c992a2f3f00b9dd98f9fb4e498 | [
"BSD-3-Clause"
] | null | null | null | from rafcontpp.model.pddl_action_representation import PddlActionRepresentation
from rafcontpp.model.pddl_action_representation import action_to_upper
def test_action_to_upper():
#arrange
action = PddlActionRepresentation('myAction','(action:)',['(at ?a - Object)'],['Object'],[':strips'],['param1','param2'])
#act
action = action_to_upper(action)
#assert
assert 'MYACTION' == action.name
assert '(ACTION:)' == action.action
assert ['(AT ?A - OBJECT)'] == action.predicates
assert ['OBJECT'] == action.types
assert [':STRIPS'] == action.requirements
assert ['param1','param2'] == action.parameters | 42.8 | 125 | 0.697819 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.253894 |
0f049fdb1bc9e04e29bedc84047ec6d4aaba04ae | 13,380 | py | Python | awsume/awsumepy/app.py | icyfork/awsume | 524c667599b8bfba521f0397214bb363f1b706fa | [
"MIT"
] | null | null | null | awsume/awsumepy/app.py | icyfork/awsume | 524c667599b8bfba521f0397214bb363f1b706fa | [
"MIT"
] | null | null | null | awsume/awsumepy/app.py | icyfork/awsume | 524c667599b8bfba521f0397214bb363f1b706fa | [
"MIT"
] | null | null | null | import os
import sys
import argparse
import difflib
import json
import logging
import pluggy
import colorama
import boto3
from pathlib import Path
from . lib.autoawsume import create_autoawsume_profile
from ..autoawsume.process import kill, kill_autoawsume
from . lib.profile import aggregate_profiles, get_role_chain, get_profile_name
from . lib.config_management import load_config
from . lib.aws_files import get_aws_files, add_section, get_section
from . lib.profile import credentials_to_profile, is_mutable_profile
from . lib import exceptions
from . lib.logger import logger
from . lib.safe_print import safe_print
from . lib import constants
from . lib import saml as saml
from . lib import aws as aws_lib
from . import hookspec
from . import default_plugins
class Awsume(object):
def __init__(self, is_interactive: bool = True):
logger.debug('Initalizing app')
self.plugin_manager = self.get_plugin_manager()
self.config = load_config()
self.config['is_interactive'] = is_interactive
self.is_interactive = is_interactive
colorama.init(autoreset=True)
def get_plugin_manager(self) -> pluggy.PluginManager:
logger.debug('Creating plugin manager')
pm = pluggy.PluginManager('awsume')
pm.add_hookspecs(hookspec)
logger.debug('Loading plugins')
pm.register(default_plugins)
pm.load_setuptools_entrypoints('awsume')
return pm
def parse_args(self, system_arguments: list) -> argparse.Namespace:
logger.debug('Gathering arguments')
epilog = """Thank you for using AWSume! Check us out at https://trek10.com"""
description="""Awsume - A cli that makes using AWS IAM credentials easy"""
argument_parser = argparse.ArgumentParser(
prog='awsume',
description=description,
epilog=epilog,
formatter_class=lambda prog: (argparse.RawDescriptionHelpFormatter(prog, max_help_position=80, width=80)), # pragma: no cover
)
self.plugin_manager.hook.pre_add_arguments(
config=self.config,
)
self.plugin_manager.hook.add_arguments(
config=self.config,
parser=argument_parser,
)
logger.debug('Parsing arguments')
args = argument_parser.parse_args(system_arguments)
logger.debug('Handling arguments')
if args.refresh_autocomplete:
autocomplete_file = Path('~/.awsume/autocomplete.json').expanduser()
result = self.plugin_manager.hook.get_profile_names(
config=self.config,
arguments=args,
)
profile_names = [y for x in result for y in x]
json.dump({'profile-names': profile_names}, open(autocomplete_file, 'w'))
raise exceptions.EarlyExit()
if args.list_plugins:
for plugin_name, _ in self.plugin_manager.list_name_plugin():
if 'default_plugins' not in plugin_name:
safe_print(plugin_name, color=colorama.Fore.LIGHTCYAN_EX)
raise exceptions.EarlyExit()
self.plugin_manager.hook.post_add_arguments(
config=self.config,
arguments=args,
parser=argument_parser,
)
args.system_arguments = system_arguments
return args
def get_profiles(self, args: argparse.Namespace) -> dict:
logger.debug('Gathering profiles')
config_file, credentials_file = get_aws_files(args, self.config)
self.plugin_manager.hook.pre_collect_aws_profiles(
config=self.config,
arguments=args,
credentials_file=credentials_file,
config_file=config_file,
)
aws_profiles_result = self.plugin_manager.hook.collect_aws_profiles(
config=self.config,
arguments=args,
credentials_file=credentials_file,
config_file=config_file,
)
profiles = aggregate_profiles(aws_profiles_result)
self.plugin_manager.hook.post_collect_aws_profiles(
config=self.config,
arguments=args,
profiles=profiles,
)
return profiles
def get_saml_credentials(self, args: argparse.Namespace, profiles: dict) -> dict:
assertion = self.plugin_manager.hook.get_credentials_with_saml(
config=self.config,
arguments=args,
)
assertion = next((_ for _ in assertion if _), None) # pragma: no cover
if not assertion:
raise exceptions.SAMLAssertionNotFoundError('No assertion to use!')
roles = saml.parse_assertion(assertion)
if not roles:
raise exceptions.SAMLAssertionMissingRoleError('No roles found in the saml assertion')
role_arn = None
principal_arn = None
role_duration = args.role_duration or int(self.config.get('role-duration', '0'))
if len(roles) > 1:
if args.role_arn and args.principal_arn:
principal_plus_role_arn = ','.join(args.role_arn, args.principal_arn)
if self.config.get('fuzzy-match'):
choice = difflib.get_close_matches(principal_plus_role_arn, roles, cutoff=0)[0]
safe_print('Closest match: {}'.format(choice))
else:
if principal_plus_role_arn not in roles:
raise exceptions.SAMLRoleNotFoundError(args.principal_arn, args.role_arn)
else:
choice = principal_plus_role_arn
elif args.profile_name:
profile_role_arn = profiles.get(args.profile_name, {}).get('role_arn')
principal_arn = profiles.get(args.profile_name, {}).get('principal_arn')
if profile_role_arn is None or principal_arn is None:
raise exceptions.InvalidProfileError(args.profile_name, 'both role_arn and principal_arn are necessary for saml profiles')
principal_plus_profile_role_arn = ','.join([principal_arn, profile_role_arn])
if principal_plus_profile_role_arn in roles:
choice = principal_plus_profile_role_arn
else:
raise exceptions.SAMLRoleNotFoundError(principal_arn, profile_role_arn)
safe_print('Match: {}'.format(choice))
else:
for index, choice in enumerate(roles):
safe_print('{}) {}'.format(index, choice), color=colorama.Fore.LIGHTYELLOW_EX)
safe_print('Which role do you want to assume? > ', end='', color=colorama.Fore.LIGHTCYAN_EX)
response = input()
if response.isnumeric():
choice = roles[int(response)]
else:
choice = difflib.get_close_matches(response, roles, cutoff=0)[0]
role_arn = choice.split(',')[1]
principal_arn = choice.split(',')[0]
else:
role_arn = roles[0].split(',')[1]
principal_arn = roles[0].split(',')[0]
safe_print('Assuming role: {},{}'.format(principal_arn, role_arn), color=colorama.Fore.GREEN)
credentials = aws_lib.assume_role_with_saml(
role_arn,
principal_arn,
assertion,
region=None,
role_duration=role_duration,
)
return credentials
def get_credentials(self, args: argparse.Namespace, profiles: dict) -> dict:
logger.debug('Getting credentials')
self.plugin_manager.hook.pre_get_credentials(
config=self.config,
arguments=args,
profiles=profiles,
)
try:
if not args.auto_refresh and args.json: # sending credentials to awsume directly
logger.debug('Pulling credentials from json parameter')
args.target_profile_name = 'json'
credentials = json.loads(args.json)
if 'Credentials' in credentials:
credentials = credentials['Credentials']
elif args.with_saml:
logger.debug('Pulling credentials from saml')
credentials = self.get_saml_credentials(args, profiles)
elif args.with_web_identity:
logger.debug('Pulling credentials from web identity')
credentials = self.plugin_manager.hook.get_credentials_with_web_identity(
config=self.config,
arguments=args,
)
else:
logger.debug('Pulling credentials from default awsume flow')
credentials = self.plugin_manager.hook.get_credentials(config=self.config, arguments=args, profiles=profiles)
credentials = next((_ for _ in credentials if _), {})
if args.auto_refresh:
create_autoawsume_profile(self.config, args, profiles, credentials)
if self.config.get('is_interactive'):
logger.debug('Interactive execution, killing existing autoawsume processes')
kill_autoawsume()
except exceptions.ProfileNotFoundError as e:
self.plugin_manager.hook.catch_profile_not_found_exception(config=self.config, arguments=args, profiles=profiles, error=e)
raise
except exceptions.InvalidProfileError as e:
self.plugin_manager.hook.catch_invalid_profile_exception(config=self.config, arguments=args, profiles=profiles, error=e)
raise
except exceptions.UserAuthenticationError as e:
self.plugin_manager.hook.catch_user_authentication_error(config=self.config, arguments=args, profiles=profiles, error=e)
raise
except exceptions.RoleAuthenticationError as e:
self.plugin_manager.hook.catch_role_authentication_error(config=self.config, arguments=args, profiles=profiles, error=e)
raise
if type(credentials) == list: # pragma: no cover
credentials = next((_ for _ in credentials if _), {}) # pragma: no cover
self.plugin_manager.hook.post_get_credentials(
config=self.config,
arguments=args,
profiles=profiles,
credentials=credentials,
)
if not credentials:
safe_print('No credentials to awsume', colorama.Fore.RED)
raise exceptions.NoCredentialsError()
return credentials
def export_data(self, arguments: argparse.Namespace, profiles: dict, credentials: dict, awsume_flag: str, awsume_list: list):
logger.debug('Exporting data')
if self.is_interactive:
print(awsume_flag, end=' ')
print(' '.join(awsume_list))
session = boto3.Session(
aws_access_key_id=credentials.get('AccessKeyId'),
aws_secret_access_key=credentials.get('SecretAccessKey'),
aws_session_token=credentials.get('SessionToken'),
profile_name=credentials.get('AwsProfile'),
region_name=credentials.get('Region'),
)
if arguments.output_profile and not arguments.auto_refresh:
if not is_mutable_profile(profiles, arguments.output_profile):
raise exceptions.ImmutableProfileError(arguments.output_profile, 'not awsume-managed')
_, credentials_file = get_aws_files(arguments, self.config)
awsumed_profile = credentials_to_profile(credentials)
if 'Expiration' in credentials:
awsumed_profile['expiration'] = credentials['Expiration'].strftime('%Y-%m-%d %H:%M:%S')
add_section(arguments.output_profile, awsumed_profile, credentials_file, True)
session.awsume_credentials = credentials
return session
def run(self, system_arguments: list):
try:
args = self.parse_args(system_arguments)
profiles = self.get_profiles(args)
credentials = self.get_credentials(args, profiles)
if args.auto_refresh:
return self.export_data(args, profiles, credentials, 'Auto', [
args.output_profile or 'autoawsume-{}'.format(args.target_profile_name),
credentials.get('Region'),
args.target_profile_name,
])
else:
return self.export_data(args, profiles, credentials, 'Awsume', [
str(credentials.get('AccessKeyId')),
str(credentials.get('SecretAccessKey')),
str(credentials.get('SessionToken')),
str(credentials.get('Region')),
str(args.target_profile_name),
str(credentials.get('AwsProfile')),
str(credentials['Expiration'].strftime('%Y-%m-%dT%H:%M:%S') if 'Expiration' in credentials else None),
])
except exceptions.EarlyExit:
logger.debug('', exc_info=True)
logger.debug('EarlyExit exception raised, no more work to do')
except exceptions.AwsumeException as e:
logger.debug('', exc_info=True)
if self.is_interactive:
safe_print('Awsume error: {}'.format(e), color=colorama.Fore.RED)
else:
raise
| 46.137931 | 142 | 0.626831 | 12,609 | 0.942377 | 0 | 0 | 0 | 0 | 0 | 0 | 1,473 | 0.11009 |
0f067b4300c65db4f51f2b088598a9976507db44 | 4,254 | py | Python | mtkclient/gui/toolsMenu.py | P-Salik/mtkclient | ca702a4ec84da4ec607f1e6484ff605e79a69f46 | [
"MIT"
] | null | null | null | mtkclient/gui/toolsMenu.py | P-Salik/mtkclient | ca702a4ec84da4ec607f1e6484ff605e79a69f46 | [
"MIT"
] | null | null | null | mtkclient/gui/toolsMenu.py | P-Salik/mtkclient | ca702a4ec84da4ec607f1e6484ff605e79a69f46 | [
"MIT"
] | null | null | null | from PySide6.QtCore import Slot, QObject, Signal
from PySide6.QtWidgets import QTableWidget, QTableWidgetItem
from mtkclient.gui.toolkit import trap_exc_during_debug, asyncThread, FDialog
from mtkclient.Library.mtk_da_cmd import DA_handler
import os
import sys
import json
sys.excepthook = trap_exc_during_debug
class UnlockMenu(QObject):
enableButtonsSignal = Signal()
disableButtonsSignal = Signal()
def __init__(self, ui, parent, da_handler: DA_handler, sendToLog): # def __init__(self, *args, **kwargs):
super(UnlockMenu, self).__init__(parent)
self.parent = parent
self.ui = ui
self.fdialog = FDialog(parent)
self.mtkClass = da_handler.mtk
self.sendToLog = sendToLog
self.da_handler = da_handler
@Slot()
def updateLock(self):
self.enableButtonsSignal.emit()
result = self.parent.Status['result'][1]
self.ui.partProgressText.setText(result)
self.sendToLogSignal.emit(self.tr(result))
def unlock(self, unlockflag):
self.disableButtonsSignal.emit()
self.ui.partProgressText.setText(self.tr("Generating..."))
thread = asyncThread(self.parent, 0, self.UnlockAsync, [unlockflag])
thread.sendToLogSignal.connect(self.sendToLog)
thread.sendUpdateSignal.connect(self.updateLock)
thread.start()
thread.wait()
self.enableButtonsSignal.emit()
def UnlockAsync(self, toolkit, parameters):
self.sendToLogSignal = toolkit.sendToLogSignal
self.sendUpdateSignal = toolkit.sendUpdateSignal
toolkit.sendToLogSignal.emit(self.tr("Bootloader: ")+parameters[0])
self.parent.Status["result"] = self.mtkClass.daloader.seccfg(parameters[0])
self.parent.Status["done"] = True
self.sendUpdateSignal.emit()
class generateKeysMenu(QObject):
enableButtonsSignal = Signal()
disableButtonsSignal = Signal()
def __init__(self, ui, parent, da_handler: DA_handler, sendToLog): # def __init__(self, *args, **kwargs):
super(generateKeysMenu, self).__init__(parent)
self.parent = parent
self.ui = ui
self.fdialog = FDialog(parent)
self.mtkClass = da_handler.mtk
self.sendToLog = sendToLog
self.da_handler = da_handler
@Slot()
def updateKeys(self):
path = os.path.join(self.hwparamFolder, "hwparam.json")
self.ui.keystatuslabel.setText(self.tr(f"Keys saved to {path}."))
keycount = len(self.parent.Status['result'])
self.ui.keytable.setRowCount(keycount)
self.ui.keytable.setColumnCount(2)
column = 0
for key in self.parent.Status['result']:
skey = self.parent.Status['result'][key]
if skey is not None:
self.ui.keytable.setItem(column, 0, QTableWidgetItem(key))
self.ui.keytable.setItem(column, 1, QTableWidgetItem(skey))
column+=1
self.sendToLogSignal.emit(self.tr("Keys generated!"))
self.enableButtonsSignal.emit()
def generateKeys(self):
self.ui.keystatuslabel.setText(self.tr("Generating..."))
hwparamFolder = self.fdialog.opendir(self.tr("Select output directory"))
if hwparamFolder == "" or hwparamFolder is None:
self.parent.enablebuttons()
return
else:
self.mtkClass.config.set_hwparam_path(hwparamFolder)
self.hwparamFolder = hwparamFolder
thread = asyncThread(self.parent, 0, self.generateKeysAsync, [hwparamFolder])
thread.sendToLogSignal.connect(self.sendToLog)
thread.sendUpdateSignal.connect(self.updateKeys)
thread.start()
self.disableButtonsSignal.emit()
def generateKeysAsync(self, toolkit, parameters):
self.sendToLogSignal = toolkit.sendToLogSignal
self.sendUpdateSignal = toolkit.sendUpdateSignal
toolkit.sendToLogSignal.emit(self.tr("Generating keys"))
res = self.mtkClass.daloader.keys()
if res:
with open(os.path.join(parameters[0],"hwparam.json"),"w") as wf:
wf.write(json.dumps(res))
self.parent.Status["result"] = res
self.parent.Status["done"] = True
self.sendUpdateSignal.emit()
| 39.388889 | 110 | 0.668077 | 3,935 | 0.925012 | 0 | 0 | 970 | 0.228021 | 0 | 0 | 296 | 0.069582 |
0f07cbe922efe088c09747c107ff6e124768d889 | 412 | py | Python | Semenenya_Vladislav_dz_2/task_2_3.py | neesaj/1824_GB_Python_1 | bcafcef4819fcaaddc7a9f7a93ab256b6637c516 | [
"MIT"
] | null | null | null | Semenenya_Vladislav_dz_2/task_2_3.py | neesaj/1824_GB_Python_1 | bcafcef4819fcaaddc7a9f7a93ab256b6637c516 | [
"MIT"
] | null | null | null | Semenenya_Vladislav_dz_2/task_2_3.py | neesaj/1824_GB_Python_1 | bcafcef4819fcaaddc7a9f7a93ab256b6637c516 | [
"MIT"
] | null | null | null | lst = ['инженер-конструктор Игорь', 'главный бухгалтер МАРИНА', 'токарь высшего разряда нИКОЛАй', 'директор аэлита']
some_str = ''
i = 0
for elem in lst:
some_str = elem
lst[i] = some_str.split()
some_str = ''
i += 1
for i in lst:
for j in range(len(i)):
if j == len(i)-1:
some_str = i[j]
name = some_str.capitalize()
print(f'Привет, {name}!')
| 21.684211 | 116 | 0.558252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.428571 |
0f095bc4079d063bb1c75421ffb1d0b5da98ff4e | 834 | py | Python | mooc.py | fichas/Down_Mooc | 9777755cdf44aadb10100ddcd6437f2f16afe98c | [
"MIT"
] | 18 | 2020-02-28T08:42:19.000Z | 2021-08-24T15:53:35.000Z | mooc.py | fichas/Down_Mooc | 9777755cdf44aadb10100ddcd6437f2f16afe98c | [
"MIT"
] | 1 | 2020-08-07T06:59:19.000Z | 2020-08-07T07:25:53.000Z | mooc.py | fichas/Down_Mooc | 9777755cdf44aadb10100ddcd6437f2f16afe98c | [
"MIT"
] | 2 | 2020-08-11T13:25:19.000Z | 2021-08-31T03:23:40.000Z | # -*- coding: utf-8 -*-
import re
import sys
import os
import requests
headers = {'Referer':'http://d0.ananas.chaoxing.com/','User-Agent': 'User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}
url=sys.argv[1]
#print(url)
req = requests.get(url, headers=headers)
strr=req.text
patt=re.compile(r'[a-zA-z]+://cs.[^\s][^\_\$]*')
res=patt.findall(strr)
f=open('output.txt','w')
for i in res :
f.write(i)
f.write('\n')
f.close()
patt=re.compile(r'<i[^>]*>(.*?)</i>+<a[^>]*>(.*?)</a>')
res=patt.findall(strr)
f=open('name.bat','w')
for i in res :
s1=str(i[0])
s2=str(i[1])
s1=s1.strip()
s2=s2.strip()
stri='ren '+s1+'.mp4 '+s1+'_'+s2+'.mp4'
f.write(stri)
f.write('\n')
f.close()
| 22.540541 | 205 | 0.568345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 349 | 0.418465 |
0f0a513efe32c280bcacaf1a2ab45f2537b65890 | 1,220 | py | Python | src/api/test/test_datahub_serializer.py | RogerTangos/datahub-stub | 8c3e89c792e45ccc9ad067fcf085ddd52f7ecd89 | [
"MIT"
] | 192 | 2015-07-29T15:20:35.000Z | 2021-09-06T21:42:01.000Z | src/api/test/test_datahub_serializer.py | RogerTangos/datahub-stub | 8c3e89c792e45ccc9ad067fcf085ddd52f7ecd89 | [
"MIT"
] | 120 | 2015-10-27T21:43:11.000Z | 2021-08-12T15:15:43.000Z | src/api/test/test_datahub_serializer.py | RogerTangos/datahub-stub | 8c3e89c792e45ccc9ad067fcf085ddd52f7ecd89 | [
"MIT"
] | 56 | 2015-09-19T05:58:41.000Z | 2021-09-14T09:46:11.000Z | from mock import patch
from django.test import TestCase
from ..serializer import DataHubSerializer
class DataHubSerializerTests(TestCase):
"""Test DataHubSerializer methods"""
def setUp(self):
self.username = "delete_me_username"
self.repo_base = "delete_me_repo_base"
self.password = "delete_me_password"
self.mock_manager = self.create_patch(
'api.serializer.DataHubManager')
self.serializer = DataHubSerializer(
username=self.username, repo_base=self.repo_base)
def create_patch(self, name):
# helper method for creating patches
patcher = patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def test_initialization(self):
dataHubSerializer = DataHubSerializer(
username=self.username, repo_base=self.repo_base)
self.assertEqual(dataHubSerializer.username, self.username)
self.assertEqual(dataHubSerializer.repo_base, self.repo_base)
self.assertEqual(
self.mock_manager.call_args[1]['repo_base'], self.repo_base)
self.assertEqual(
self.mock_manager.call_args[1]['user'], self.username)
| 32.105263 | 72 | 0.682787 | 1,116 | 0.914754 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.148361 |
0f0a874b832d5307c77060e388ac90f502854fe7 | 852 | py | Python | notes/algo-ds-practice/problems/number_theory/multiplicative_mod_inverse/multiplicative_mod_inverse.py | Anmol-Singh-Jaggi/interview-notes | 65af75e2b5725894fa5e13bb5cd9ecf152a0d652 | [
"MIT"
] | 6 | 2020-07-05T05:15:19.000Z | 2021-01-24T20:17:14.000Z | notes/algo-ds-practice/problems/number_theory/multiplicative_mod_inverse/multiplicative_mod_inverse.py | Anmol-Singh-Jaggi/interview-notes | 65af75e2b5725894fa5e13bb5cd9ecf152a0d652 | [
"MIT"
] | null | null | null | notes/algo-ds-practice/problems/number_theory/multiplicative_mod_inverse/multiplicative_mod_inverse.py | Anmol-Singh-Jaggi/interview-notes | 65af75e2b5725894fa5e13bb5cd9ecf152a0d652 | [
"MIT"
] | 2 | 2020-09-14T06:46:37.000Z | 2021-06-15T09:17:21.000Z | from algo.number_theory.extended_gcd.extended_gcd import extended_gcd
from algo.number_theory.eulers_totient_function.eulers_totient import etf
def mod_inverse_gcd(a, m):
'''
a and m should be coprime!
Complexity -> O(log(m)).
'''
return extended_gcd(a, m)[0]
def mod_inverse_eulers(a, m):
'''
a and m should be coprime.
Complexity -> O(sqrt(m) + log(m)).
'''
etf_m = etf(m)
return pow(a, etf_m - 1, m)
def mod_inverse_fermat(a, p):
'''
p must be prime and a should not be a multiple of p.
Is a special case of Euler's Totient function actually.
Complexity -> O(log(p)).
'''
return pow(a, p - 2, p)
def main():
a = 7
m = 5
print(mod_inverse_gcd(7, 5))
print(mod_inverse_fermat(7, 5))
print(mod_inverse_eulers(7, 5))
if __name__ == "__main__":
main()
| 20.780488 | 73 | 0.627934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 319 | 0.374413 |
0f0b7b2c5564b7c02453602dda17e447559b3a6d | 56 | py | Python | tests/unchained/conftest.py | uolot/py-yaml-fixtures | 0b165d91578420cd4cb0b2fc245ae0e39578ede5 | [
"MIT"
] | 13 | 2018-08-14T12:28:54.000Z | 2022-02-08T04:25:47.000Z | tests/unchained/conftest.py | uolot/py-yaml-fixtures | 0b165d91578420cd4cb0b2fc245ae0e39578ede5 | [
"MIT"
] | 5 | 2019-02-23T04:01:48.000Z | 2021-04-08T17:37:40.000Z | tests/unchained/conftest.py | uolot/py-yaml-fixtures | 0b165d91578420cd4cb0b2fc245ae0e39578ede5 | [
"MIT"
] | 5 | 2018-09-04T03:28:46.000Z | 2021-04-09T11:46:03.000Z | from flask_unchained.bundles.sqlalchemy.pytest import *
| 28 | 55 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0f0bb21d732e1e1a3fe041762c735e3ea255fe56 | 345 | py | Python | meadow/meadow/migrations/0007_book_is_approved.py | digital-gachilib/meadow | 7a4510bc6290a74305536c35b24867d79107bd30 | [
"MIT"
] | null | null | null | meadow/meadow/migrations/0007_book_is_approved.py | digital-gachilib/meadow | 7a4510bc6290a74305536c35b24867d79107bd30 | [
"MIT"
] | 26 | 2020-04-05T08:37:16.000Z | 2021-09-22T18:47:20.000Z | meadow/meadow/migrations/0007_book_is_approved.py | digital-gachilib/meadow | 7a4510bc6290a74305536c35b24867d79107bd30 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-04-28 15:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("meadow", "0006_mmake_isbn_charfield"),
]
operations = [
migrations.AddField(model_name="book", name="is_approved", field=models.BooleanField(default=False),),
]
| 23 | 110 | 0.684058 | 252 | 0.730435 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.292754 |
0f0bb8584063b7dea01c7c517b101023b6b85c01 | 2,831 | py | Python | fire/cli/__init__.py | xidus/FIRE | 9508b26faff4830b70981c21d3fdc33c20c85dde | [
"MIT"
] | 1 | 2018-12-15T16:13:09.000Z | 2018-12-15T16:13:09.000Z | fire/cli/__init__.py | xidus/FIRE | 9508b26faff4830b70981c21d3fdc33c20c85dde | [
"MIT"
] | 49 | 2018-11-13T23:04:09.000Z | 2019-10-31T11:15:13.000Z | fire/cli/__init__.py | xidus/FIRE | 9508b26faff4830b70981c21d3fdc33c20c85dde | [
"MIT"
] | 3 | 2018-11-15T14:06:36.000Z | 2019-01-07T13:30:29.000Z | """
Kommandoliniebrugergrænsefladen (en command-line interface, CLI) til FIREs API.
"""
import sys
import click
from fire.api import FireDb
firedb = FireDb()
_show_colors = True
def _set_monochrome(ctx, param, value):
"""
Anvend værdien af --monokrom og sæt den globale værdi af _show_colors.
"""
global _show_colors
_show_colors = not value
def _set_debug(ctx, param, value):
"""
Ændrer debug tilstand på firedb object vha --debug.
"""
global firedb
firedb.engine.echo = value
def _set_database(ctx, param, value):
"""
Vælg en specifik databaseforbindelse.
"""
if value is not None:
new_firedb = FireDb(db=str(value).lower())
override_firedb(new_firedb)
_default_options = [
click.option(
"--db",
type=click.Choice(["prod", "test"]),
default=None,
callback=_set_database,
help="Vælg en specifik databaseforbindelse - default_connection i fire.ini bruges hvis intet vælges.",
),
click.option(
"-m",
"--monokrom",
is_flag=True,
callback=_set_monochrome,
help="Vis ikke farver i terminalen",
),
click.option(
"--debug",
is_flag=True,
callback=_set_debug,
help="Vis debug output fra FIRE-databasen.",
),
click.help_option(help="Vis denne hjælp tekst"),
]
def default_options(**kwargs):
"""Create decorator that handles all default options"""
def _add_options(func):
# Click-produced help text shows arguments and options
# in the order they were added.
# Reversing the order to have it shown in same order in
# the help text as items were defined in the list.
for option in reversed(_default_options):
func = option(func)
return func
return _add_options
def farvelæg(tekst: str, farve: str):
"""
Farvelæg en tekst der udskrives via Click.
"""
# Undgå ANSI farvekoder i Sphinx HTML docs
if "sphinx" in sys.modules:
return tekst
if not _show_colors:
return tekst
return click.style(tekst, fg=farve)
def grøn(tekst: str):
"""
Farv en tekst der udskrives via Click grøn.
"""
return farvelæg(tekst, "green")
def rød(tekst: str):
"""
Farv en tekst der udskrives via Click rød.
"""
return farvelæg(tekst, "red")
def print(*args, **kwargs):
"""
FIRE-specifik print funktion baseret på click.secho.
Tilsidesætter farven når --monokrom parameteren anvendes i
kommandolinjekald.
"""
kwargs["color"] = _show_colors
click.secho(*args, **kwargs)
def override_firedb(new_firedb: FireDb):
"""
Tillad at bruge en anden firedb end den der oprettes automatisk af
fire.cli.
"""
global firedb
firedb = new_firedb
| 22.291339 | 110 | 0.635818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,281 | 0.449001 |
0f0d09870e10aa47900875345d9579dc0d49b729 | 817 | py | Python | background_modelling.py | blurry-mood/computer-vision-opencv | 327fe65b1c731e19c4d83c468f93cc7edc818918 | [
"MIT"
] | 1 | 2021-12-22T09:47:15.000Z | 2021-12-22T09:47:15.000Z | background_modelling.py | blurry-mood/computer-vision-opencv | 327fe65b1c731e19c4d83c468f93cc7edc818918 | [
"MIT"
] | null | null | null | background_modelling.py | blurry-mood/computer-vision-opencv | 327fe65b1c731e19c4d83c468f93cc7edc818918 | [
"MIT"
] | null | null | null | import cv2 as cv
"""
Choose background substractor
"""
algo = 'MOG2'
input = 'videos/shine.mp4'
if algo == 'MOG2':
backSub = cv.createBackgroundSubtractorMOG2()
else:
backSub = cv.createBackgroundSubtractorKNN()
capture = cv.VideoCapture(input)
if not capture.isOpened():
print('Unable to open: ' + input)
exit(0)
while True:
ret, frame = capture.read()
if frame is None:
break
fgMask = backSub.apply(frame)
cv.rectangle(frame, (10, 2), (100,20), (255,255,255), -1)
cv.putText(frame, str(capture.get(cv.CAP_PROP_POS_FRAMES)), (15, 15),
cv.FONT_HERSHEY_SIMPLEX, 0.5 , (0,0,0))
cv.imshow('Frame', frame)
cv.imshow('FG Mask', fgMask)
keyboard = cv.waitKey(30)
if keyboard == 'q' or keyboard == 27:
break | 22.081081 | 73 | 0.609547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.127295 |
0f10e889a3b3c3b84aaddb6c89475481e138ea2f | 3,271 | py | Python | visualization.py | Agnar22/MachineLearning | bee0fcf0712de6384eb0d2d95be8574fea31fdf2 | [
"MIT"
] | 1 | 2021-01-11T18:00:06.000Z | 2021-01-11T18:00:06.000Z | visualization.py | Agnar22/MachineLearning | bee0fcf0712de6384eb0d2d95be8574fea31fdf2 | [
"MIT"
] | null | null | null | visualization.py | Agnar22/MachineLearning | bee0fcf0712de6384eb0d2d95be8574fea31fdf2 | [
"MIT"
] | null | null | null | import config
import pandas as pd
import matplotlib.pyplot as plt
#import lstm
from keras.models import Sequential
import matplotlib.dates as mdates
def visualize_spread_for_countries(data: pd.DataFrame):
"""
:param data: a pandas dataframe of the data to visualize.
:return:
"""
countries_to_visualize = []
for country in config.COUNTRIES:
countries_to_visualize.append(
{
'x': data[data['CountryName'] == country]['date'],
'y': data[data['CountryName'] == country]['total_cases_per_million'],
'name': country
}
)
draw_graph(*countries_to_visualize, x='date', y='total cases per million')
def draw_graph(*args, x: str = 'x', y: str = 'y'):
"""
:param args: dict('x' : list, 'y' : list, 'line-style':str,'name' : str)
:param y: label for y axis.
:param x: label for x axis.
:return:
"""
plt.close('all')
for func in args:
plt.plot(func['x'], func['y'], func['line-style'], label=func['name'])
X = plt.gca().xaxis
# Set the locator
locator = mdates.MonthLocator() # every month
# Specify the format - %b gives us Jan, Feb...
fmt = mdates.DateFormatter('%b')
X.set_major_locator(locator)
# Specify formatter
X.set_major_formatter(fmt)
plt.xlabel(x)
plt.ylabel(y)
plt.xticks(fontsize=8)
plt.legend()
plt.grid(False)
plt.show(block=True)
def visualize_predictions(cases: pd.DataFrame, model: Sequential, cases_norway: pd.DataFrame):
loop = True
while loop:
try:
start_day = int(input("Start day:"))
prediction_length = int(input("prediction length:"))
output_start = start_day + config.INPUTDAYS
output_end = output_start + prediction_length
features = ['C1_School closing', 'C2_Workplace closing', 'C3_Cancel public events',
'C4_Restrictions on gatherings',
'C5_Close public transport', 'C6_Stay at home requirements', 'C7_Restrictions on internal movement',
'C8_International travel controls', 'E1_Income support', 'E2_Debt/contract relief',
'E3_Fiscal measures',
'E4_International support', 'H1_Public information campaigns', 'H2_Testing policy',
'H3_Contact tracing',
'H4_Emergency investment in healthcare', 'H6_Facial Coverings', 'ConfirmedCases']
predictions = lstm.predict(model, cases.iloc[start_day:output_start][features].to_numpy(),
prediction_length)
for day in range(predictions.shape[0]):
print(cases['date'].iloc[output_start + day], predictions[day], cases['ConfirmedCases'].iloc[output_start + day])
draw_graph(
{'x': cases['date'].iloc[output_start:output_end], 'y': predictions.tolist(), 'name': 'prediction'},
{'x': cases['date'].iloc[:start_day], 'y': cases_norway['ConfirmedCases'].iloc[:start_day], 'name': 'start'},
{'x': cases['date'].iloc[start_day:output_start], 'y': cases['ConfirmedCases'].iloc[start_day:output_start],
'name': 'input'},
{'x': cases['date'].iloc[output_start:output_end], 'y': cases['ConfirmedCases'].iloc[output_start:output_end],
'name': 'target'},
)
except:
ans = input("quit?")
if ans == 'y':
loop = False
| 38.034884 | 121 | 0.641394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,163 | 0.355549 |
0f110666530b2d0778bb5bff1c809c8ebc73b13f | 1,146 | py | Python | src/livedumper/common.py | m45t3r/livedumper | f6441283269b4a602cafea3be5cda9446fc64005 | [
"BSD-2-Clause"
] | 17 | 2015-02-10T12:18:22.000Z | 2018-03-23T05:28:51.000Z | src/livedumper/common.py | m45t3r/livedumper | f6441283269b4a602cafea3be5cda9446fc64005 | [
"BSD-2-Clause"
] | 3 | 2015-01-12T17:32:20.000Z | 2016-12-13T23:55:38.000Z | src/livedumper/common.py | m45t3r/livedumper | f6441283269b4a602cafea3be5cda9446fc64005 | [
"BSD-2-Clause"
] | 3 | 2015-02-06T09:58:09.000Z | 2016-01-04T23:46:28.000Z | "Common functions that may be used everywhere"
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
import sys
from distutils.util import strtobool
try:
input = raw_input
except NameError:
pass
def yes_no_query(question):
"""Ask the user *question* for 'yes' or 'no'; ask again until user
inputs a valid option.
Returns:
'True' if user answered 'y', 'yes', 't', 'true', 'on' or '1'.
'False' if user answered 'n', 'no', 'f', 'false', 'off' or '0'.
"""
print("{} (y/n)".format(question), end=" "),
while True:
try:
return strtobool(input().lower())
except ValueError:
print("Please respond with 'y' or 'n'.")
def ask_overwrite(dest):
"""Check if file *dest* exists. If 'True', asks if the user wants
to overwrite it (just remove the file for later overwrite).
"""
msg = "File '{}' already exists. Overwrite file?".format(dest)
if os.path.exists(dest):
if yes_no_query(msg):
os.remove(dest)
else:
sys.exit("Cancelling operation...")
| 26.045455 | 70 | 0.602094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 546 | 0.47644 |
0f116262d51df870092baaa77da7c1a3942b13fa | 121 | py | Python | BOJ/week02/recursion/ex10872.py | FridayAlgorithm/taesong_study | 50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c | [
"MIT"
] | null | null | null | BOJ/week02/recursion/ex10872.py | FridayAlgorithm/taesong_study | 50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c | [
"MIT"
] | null | null | null | BOJ/week02/recursion/ex10872.py | FridayAlgorithm/taesong_study | 50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c | [
"MIT"
] | 2 | 2020-12-27T15:03:46.000Z | 2021-03-06T14:13:34.000Z | N = int(input())
def factorial(N):
if N == 0:
return 1
return N * factorial(N-1)
print(factorial(N))
| 11 | 29 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0f11a8afccc861d59d504d51479b9cc0588a8670 | 4,498 | py | Python | pytest_lambda/fixtures.py | mikelane/pytest-lambda | e83b47e3b4fdb088f18fc7ee1f52c3ae933c5663 | [
"MIT"
] | 1 | 2021-04-21T03:07:15.000Z | 2021-04-21T03:07:15.000Z | pytest_lambda/fixtures.py | mikelane/pytest-lambda | e83b47e3b4fdb088f18fc7ee1f52c3ae933c5663 | [
"MIT"
] | null | null | null | pytest_lambda/fixtures.py | mikelane/pytest-lambda | e83b47e3b4fdb088f18fc7ee1f52c3ae933c5663 | [
"MIT"
] | null | null | null | import inspect
from typing import Union, Callable, Any, Iterable
from pytest_lambda.exceptions import DisabledFixtureError, NotImplementedFixtureError
from pytest_lambda.impl import LambdaFixture
__all__ = ['lambda_fixture', 'static_fixture', 'error_fixture',
'disabled_fixture', 'not_implemented_fixture']
def lambda_fixture(fixture_name_or_lambda: Union[str, Callable]=None,
*other_fixture_names: Iterable[str],
bind=False,
scope="function", params=None, autouse=False, ids=None, name=None):
"""Use a fixture name or lambda function to compactly declare a fixture
Usage:
class DescribeMyTests:
url = lambda_fixture('list_url')
updated_name = lambda_fixture(lambda vendor: vendor.name + ' updated')
:param fixture_name_or_lambda: Either the name of another fixture, or a
lambda function, which can request other fixtures with its params. If
None, this defaults to the name of the attribute containing the lambda_fixture.
:param bind: Set this to true to pass self to your fixture. It must be the
first parameter in your fixture. This cannot be true if using a fixture
name.
"""
if other_fixture_names:
fixture_names_or_lambda = (fixture_name_or_lambda,) + other_fixture_names
else:
fixture_names_or_lambda = fixture_name_or_lambda
return LambdaFixture(fixture_names_or_lambda, bind=bind, scope=scope,
params=params, autouse=autouse, ids=ids, name=name)
def static_fixture(value: Any, **fixture_kwargs):
"""Compact method for defining a fixture that returns a static value
"""
return lambda_fixture(lambda: value, **fixture_kwargs)
RAISE_EXCEPTION_FIXTURE_FUNCTION_FORMAT = '''
def raise_exception({args}):
exc = error_fn({kwargs})
if exc is not None:
raise exc
'''
def error_fixture(error_fn: Callable, **fixture_kwargs):
"""Fixture whose usage results in the raising of an exception
Usage:
class DescribeMyTests:
url = error_fixture(lambda request: Exception(
f'Please override the {request.fixturename} fixture!'))
:param error_fn: fixture method which returns an exception to raise. It may
request pytest fixtures in its arguments
"""
proto = tuple(inspect.signature(error_fn).parameters)
args = ', '.join(proto)
kwargs = ', '.join(f'{arg}={arg}' for arg in proto)
source = RAISE_EXCEPTION_FIXTURE_FUNCTION_FORMAT.format(
args=args,
kwargs=kwargs,
)
ctx = {'error_fn': error_fn}
exec(source, ctx)
raise_exception = ctx['raise_exception']
return lambda_fixture(raise_exception, **fixture_kwargs)
def disabled_fixture(**fixture_kwargs):
"""Mark a fixture as disabled – using the fixture will raise an error
This is useful when you know any usage of a fixture would be in error. When
using disabled_fixture, pytest will raise an error if the fixture is
requested, so errors can be detected early, and faulty assumptions may be
avoided.
Usage:
class DescribeMyListOnlyViewSet(ViewSetTest):
list_route = lambda_fixture(lambda: reverse('...'))
detail_route = disabled_fixture()
class DescribeRetrieve(UsesDetailRoute):
def test_that_should_throw_error():
print('I should never be executed!')
"""
def build_disabled_fixture_error(request):
msg = (f'Usage of the {request.fixturename} fixture has been disabled '
f'in the current context.')
return DisabledFixtureError(msg)
return error_fixture(build_disabled_fixture_error, **fixture_kwargs)
def not_implemented_fixture(**fixture_kwargs):
"""Mark a fixture as abstract – requiring definition/override by the user
This is useful when defining abstract base classes requiring implementation
to be used correctly.
Usage:
class MyBaseTest:
list_route = not_implemented_fixture()
class TestThings(MyBaseTest):
list_route = lambda_fixture(lambda: reverse(...))
"""
def build_not_implemented_fixture_error(request):
msg = (f'Please define/override the {request.fixturename} fixture in '
f'the current context.')
return NotImplementedFixtureError(msg)
return error_fixture(build_not_implemented_fixture_error, **fixture_kwargs)
| 33.819549 | 87 | 0.689862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,600 | 0.577521 |
0f11c7fc72c1e19f0e03c6226a3a153483d3fe2a | 16,983 | py | Python | NaiveBayes/NaiveBayes/arffreader/ArffProcessor.py | NickChapman/Naive-Bayes | 7620bca26d63bc0adebda974870ecdd0def8fac2 | [
"MIT"
] | null | null | null | NaiveBayes/NaiveBayes/arffreader/ArffProcessor.py | NickChapman/Naive-Bayes | 7620bca26d63bc0adebda974870ecdd0def8fac2 | [
"MIT"
] | 4 | 2016-02-15T21:32:39.000Z | 2016-02-18T08:49:27.000Z | Nicholas_Chapman_NBC_Submission/arffreader/ArffProcessor.py | NickChapman/Naive-Bayes | 7620bca26d63bc0adebda974870ecdd0def8fac2 | [
"MIT"
] | null | null | null | import random, math
import utils
class ArffProcessor(object):
"""Loads and manages an ARFF file"""
def __init__(self, file_path):
"""Loads an ARFF file, fills in missing data points
@param file_path: Path to the ARFF file
"""
# Load the file into memory and do initial processing
self.load_file(file_path)
# Map the attributes to their positions in the data line
self.map_attributes_to_num()
def load_file(self, file_path):
"""Loads an ARFF file into memory and extracts all information
@param file_path: Path to the ARFF file
"""
self.file_path = file_path
# Open the file
self.file = open(file_path, 'r')
# Process the headers
self.relation = ""
self.attributes = [] # Contains tuple pairs of (attr_name, attr_values)
self.data = []
lines = self.file.readlines()
headers_done = False
for line in lines:
# Remove leading and trailing whitespace
line = line.strip()
# Disregard commented out and blank lines
if line.startswith("%") or line == "":
continue
if not headers_done:
# Process the headers
if line.lower().startswith("@"):
# @relation
if line.lower().startswith("@relation"):
# Make sure we are not already processing a relation
if self.relation != "":
raise IOError("The ARFF file contains more than one relation definition")
else:
self.relation = line.split()[1]
# @attribute
if line.lower().startswith("@attribute"):
attr_name = line.split()[1]
# Check to see if it is a nominal attribute
if "{" in line:
# Get rid of the { and }
clean_line = line.replace("{", "")
clean_line = clean_line.replace("}", "")
line_parts = clean_line.split(",")
# Remove pieces from the first one which has too much
values = []
values.append(line_parts[0].split()[-1])
for i in range(1, len(line_parts)):
values.append(line_parts[i].strip())
self.attributes.append((attr_name, values))
else:
# Numeric or string attribute
# NO SUPPORT FOR DATES AT PRESENT
values = line.lower().split()[-1]
self.attributes.append((attr_name, values))
# @data
if line.lower().startswith("@data"):
# Nothing to do, just means reading is about to commence
headers_done = True
# Begin reading in data
else:
# Convert each data line into a list with the index corresponding to the attribute
data_line = [x.strip() for x in line.split(",")]
self.data.append(data_line)
# Convert numeric data into actual numbers instead of strings
self.map_attributes_to_num()
for attr in self.attributes:
attr_name = attr[0]
type = attr[1]
# The next if must be in this order to short circuit
if (not isinstance(type, list)) and (type.lower() == "numeric"):
# Convert that column into actual numbers
for entry in self.data:
# We will try to convert it to an int first
try:
entry[self.attr_position[attr_name]] = int(entry[self.attr_position[attr_name]])
except ValueError:
# int conversion failed so make it a float
entry[self.attr_position[attr_name]] = float(entry[self.attr_position[attr_name]])
self.file.close()
def fill_holes(self, core_attribute):
""" Finds holes in the data and fills them in
Numeric values are filled in with the attribute mean
Categorical values are filled in with the attribute mode
"""
# This first call to map the attributes is potentially redundant
# However, it's easier to just repeat this minimal step rather than catch errors
# TODO: Optimize this call in some way
self.map_attributes_to_num()
for attribute in self.attributes:
attr_name = attribute[0]
attr_values = attribute[1]
# Determine attribute type
if isinstance(attr_values, list):
# It's nominal
# Create a counter for each nominal bin
count = {}
for label in attr_values:
count[label] = 0
# Find out how many times each
for entry in self.data:
entry_label_value = entry[self.attr_position[attr_name]]
if entry_label_value == "?":
# Skip this one
continue
count[entry_label_value] += 1
fill_choices = utils.get_dict_modes(count)
# Now that we have our choices we will back fill missing values
# We will choose from fill_choices at random
for entry in self.data:
entry_label_value = entry[self.attr_position[attr_name]]
if entry_label_value == "?":
# Choose at random
entry[self.attr_position[attr_name]] = random.choice(fill_choices)
elif attr_values.lower() == "numeric":
totals_count = {}
class_count = {}
for core_value in self.attributes[self.attr_position[core_attribute]][1]:
totals_count[core_value] = 0
class_count[core_value] = .01 #Prevents divide by zero
for entry in self.data:
entry_label_value = entry[self.attr_position[attr_name]]
if entry_label_value == "?":
# Skip this row
continue
entry_core_value = entry[self.attr_position[core_attribute]]
totals_count[entry_core_value] += entry_label_value
class_count[entry_core_value] += 1
averages = {}
for core_value in totals_count:
averages[core_value] = totals_count[core_value]/class_count[core_value]
# Now fill in this average where necessary
for entry in self.data:
entry_label_value = entry[self.attr_position[attr_name]]
if entry_label_value == "?":
entry_core_value = entry[self.attr_position[core_attribute]]
entry[self.attr_position[attr_name]] = averages[entry_core_value]
else:
# TODO: Implement additional data type handlers
# For now we will raise an exception if we make it to here because
# something has definitely gone wrong in that case
raise NotImplementedError("Need to implement handling for types beyond categorical and numeric")
def entropy_discretize_single_numeric(self, numeric_attribute, core_attribute, gain_threshold=.05):
self.map_attributes_to_num()
# TODO
def entropy_discretize_numerics(self, core_attribute, gain_threshold=.05):
""" Converts all numerical attributes to categorical
Uses entropy based discretization.
"""
self.map_attributes_to_num()
for attribute in self.attributes:
# We are only concerned with numeric attributes
if attribute[1] == "numeric":
attr_name = attribute[0]
# Sort the data into ascending order based on that attribute
self.data.sort(key=lambda x : x[self.attr_position[attr_name]])
attr_ranges = []
self.get_splits(0, len(self.data) - 1, attr_name, core_attribute, gain_threshold, attr_ranges)
# We now have the bin ranges for this attribute
# We need to create the bin objects for this attribute
attr_bins = []
for pair in attr_ranges:
lower_bound = self.data[pair[0]][self.attr_position[attr_name]]
upper_bound = self.data[pair[1]][self.attr_position[attr_name]]
attr_bins.append(utils.NumericalDataBin(gte=lower_bound, lt=upper_bound))
# Sort the attribute bins on one of their bounds
attr_bins.sort(key=lambda x : x.min)
# Set the lowest bins minimum to -Infinity and the highest bins max to Infinity
attr_bins[0].min = -float("inf")
attr_bins[-1].max = float("inf")
#Stitch the bins together to complete the continuous range
for i in range(1, len(attr_bins)):
attr_bins[i].min = attr_bins[i - 1].max
# Apply these bins to the data values
for entry in self.data:
attr_value = entry[self.attr_position[attr_name]]
entry[self.attr_position[attr_name]] = self.get_bin(attr_bins, attr_value)
# The attributes are tuples so we can't modify them
# The following is a work around
temp = list(attribute)
temp[1] = attr_bins
temp = tuple(temp)
for i in range(len(self.attributes)):
if self.attributes[i][0] == temp[0]:
self.attributes[i] = temp
break
@staticmethod
def get_bin(bin_list, value):
"""Takes a list of NumericalDataBins and returns the bin for the value"""
for bin in bin_list:
if bin.belongs_to_bin(value):
return bin
# If we get here something has gone wrong
raise AssertionError("A bin was not found for a data point. This should never happen")
def get_splits(self, lower_index, upper_index, binning_attribute, core_attribute, gain_threshold, ranges):
""" RANGES WILL CONTAIN ALL OF THE FINAL RANGE VALUES"""
results = self.find_best_split(lower_index, upper_index, binning_attribute, core_attribute, gain_threshold)
should_split = results[0]
lower_range = results[1]
upper_range = results[2]
if not should_split:
ranges.append((lower_index, upper_index))
else:
# Find the index where the split occurs
split_value = lower_range[1]
split_index = lower_index
for i in range(lower_index + 1, upper_index + 1):
if self.data[i][self.attr_position[binning_attribute]] >= split_value:
break
else:
split_index += 1
self.get_splits(lower_index, split_index, binning_attribute, core_attribute, gain_threshold, ranges)
self.get_splits(split_index + 1, upper_index, binning_attribute, core_attribute, gain_threshold, ranges)
def find_best_split(self, lower_index, upper_index, binning_attribute, core_attribute, gain_threshold):
""" ASSUMES DATA IS SORTED ON BINNING_ATTRIBUTE
@returns a tuple of one bool and 2 range tuples: (should_split, (min_value, ideal_split), (ideal_split, max_value))
"""
# If lower_index == upper_index then obviously there is nothing to split so should_split = false and we move on
if lower_index == upper_index:
return (False, (lower_index, lower_index), (lower_index, lower_index))
# Get the bins starting entropy
# TODO: Correct the following assumption
# Assume that core_attributes are always categorical
overall_entropy = self.entropy(lower_index, upper_index, float("inf"), binning_attribute, core_attribute)
# best split is initially the first split
ideal_split = (self.data[lower_index][self.attr_position[binning_attribute]]
+ self.data[lower_index + 1][self.attr_position[binning_attribute]]) / 2
ideal_entropy = self.entropy(lower_index, upper_index, ideal_split, binning_attribute, core_attribute)
# Calculate the entropy for a number of possible splits
# We set a limit because otherwise this takes wayyyyy too long
step = max(1, (upper_index - (lower_index + 1)) // int(10*math.log10(upper_index - (lower_index)) + 1))
for i in range(lower_index + 1, upper_index, step):
split = (self.data[i][self.attr_position[binning_attribute]]
+ self.data[i + 1][self.attr_position[binning_attribute]]) / 2
split_entropy = self.entropy(lower_index, upper_index, split, binning_attribute, core_attribute)
if split_entropy < ideal_entropy:
ideal_split = split
ideal_entropy = split_entropy
# Determine whether it is worth it to split up this range
should_split = False
if (overall_entropy - ideal_entropy) >= gain_threshold:
should_split = True
range_min = self.data[lower_index][self.attr_position[binning_attribute]]
range_max = self.data[upper_index][self.attr_position[binning_attribute]]
return (should_split, (range_min, ideal_split), (ideal_split, range_max))
def entropy(self, lower_index, upper_index, split_point, binning_attribute, core_attribute):
""" Determines entropy of a given split
ASSUMES DATA IS SORTED ON BINNING_ATTRIBUTE """
sample_size = upper_index - lower_index + 1;
probabilities = {}
net_entropy = 0;
lower_entropy = 0
upper_entropy = 0
lower_bin_size = 0
upper_bin_size = 0
# Get entropy for the bin less than split_point
for attr_value in self.attributes[self.attr_position[core_attribute]][1]:
# Ensuring that none of the probabilities come out to 0 ensures the entropy calculation works
probabilities[attr_value] = .5
# Count the occurences of each core attribute value in the lower range
for i in range(lower_index, upper_index + 1):
if self.data[i][self.attr_position[binning_attribute]] < split_point:
probabilities[self.data[i][self.attr_position[core_attribute]]] += 1
lower_bin_size += 1
# Perform the actual entropy calculation
if lower_bin_size == 0:
lower_entropy = 0
else:
for attr_value in probabilities:
p = probabilities[attr_value] / lower_bin_size
lower_entropy += p * math.log2(p)
# Multiply the result by negative 1 to factor in the fact that it is -Sum...
lower_entropy *= -1
# Repeat for the upper bin
# Get entropy for the bin greater than or equal to split_point
for attr_value in self.attributes[self.attr_position[core_attribute]][1]:
# Ensuring that none of the probabilities come out to 0 ensures the entropy calculation works
probabilities[attr_value] = .5
# Count the occurences of each core attribute value in the lower range
for i in range(lower_index, upper_index + 1):
if self.data[i][self.attr_position[binning_attribute]] >= split_point:
probabilities[self.data[i][self.attr_position[core_attribute]]] += 1
upper_bin_size += 1
# Perform the actual entropy calculation
if upper_bin_size == 0:
upper_entropy = 0
else:
for attr_value in probabilities:
p = probabilities[attr_value] / upper_bin_size
upper_entropy += p * math.log2(p)
# Multiply the result by negative 1 to factor in the fact that it is -Sum...
upper_entropy *= -1.0
# Calculate the net entropy
net_entropy = lower_bin_size / sample_size * lower_entropy + upper_bin_size / sample_size * upper_entropy
return net_entropy
def map_attributes_to_num(self):
"""Maps the attribute to its position in a data line"""
self.attr_position = {}
for i, attribute in enumerate(self.attributes):
attr_name = attribute[0]
self.attr_position[attr_name] = i | 52.906542 | 123 | 0.579933 | 16,949 | 0.997998 | 0 | 0 | 384 | 0.022611 | 0 | 0 | 4,736 | 0.278867 |
0f12055f98804756bad971fcd0011760c5d5e75a | 15,935 | py | Python | indico/web/util.py | javfg/indico | 2634756ba1e9caf6dd8fc9afc3f47291fda5816d | [
"MIT"
] | null | null | null | indico/web/util.py | javfg/indico | 2634756ba1e9caf6dd8fc9afc3f47291fda5816d | [
"MIT"
] | null | null | null | indico/web/util.py | javfg/indico | 2634756ba1e9caf6dd8fc9afc3f47291fda5816d | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import hashlib
import sys
from datetime import datetime
import sentry_sdk
from authlib.oauth2 import OAuth2Error
from flask import flash, g, has_request_context, jsonify, render_template, request, session
from itsdangerous import Signer
from markupsafe import Markup
from werkzeug.exceptions import BadRequest, Forbidden, ImATeapot
from werkzeug.urls import url_decode, url_encode, url_parse, url_unparse
from indico.util.caching import memoize_request
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
def inject_js(js):
"""Inject JavaScript into the current page.
:param js: Code wrapped in a ``<script>`` tag.
"""
if 'injected_js' not in g:
g.injected_js = []
g.injected_js.append(Markup(js))
def _pop_injected_js():
js = None
if 'injected_js' in g:
js = g.injected_js
del g.injected_js
return js
def jsonify_form(form, fields=None, submit=None, back=None, back_url=None, back_button=True, disabled_until_change=True,
disabled_fields=(), form_header_kwargs=None, skip_labels=False, save_reminder=False,
footer_align_right=False, disable_if_locked=True, message=None):
"""Return a json response containing a rendered WTForm.
This is shortcut to the ``simple_form`` jinja macro to avoid
adding new templates that do nothing besides importing and
calling this macro.
:param form: A WTForms `Form` instance
:param fields: A list of fields to be displayed on the form
:param submit: The title of the submit button
:param back: The title of the back button
:param back_url: The URL the back button redirects to
:param back_button: Whether to show a back button
:param disabled_until_change: Whether to disable form submission
until a field is changed
:param disabled_fields: List of field names to disable
:param form_header_kwargs: Keyword arguments passed to the
``form_header`` macro
:param skip_labels: Whether to show labels on the fields
:param save_reminder: Whether to show a message when the form has
been modified and the save button is not
visible
:param footer_align_right: Whether the buttons in the event footer
should be aligned to the right.
:param disable_if_locked: Whether the form should be disabled when
the associated event is locked (based on
a CSS class in the DOM structure)
"""
if submit is None:
submit = _('Save')
if back is None:
back = _('Cancel')
if form_header_kwargs is None:
form_header_kwargs = {}
tpl = get_template_module('forms/_form.html')
html = tpl.simple_form(form, fields=fields, submit=submit, back=back, back_url=back_url, back_button=back_button,
disabled_until_change=disabled_until_change, disabled_fields=disabled_fields,
form_header_kwargs=form_header_kwargs, skip_labels=skip_labels, save_reminder=save_reminder,
footer_align_right=footer_align_right, disable_if_locked=disable_if_locked, message=message)
return jsonify(html=html, js=_pop_injected_js())
def jsonify_template(template, _render_func=render_template, _success=None, **context):
"""Return a json response containing a rendered template."""
html = _render_func(template, **context)
jsonify_kw = {}
if _success is not None:
jsonify_kw['success'] = _success
return jsonify(html=html, js=_pop_injected_js(), **jsonify_kw)
def jsonify_data(flash=True, **json_data):
"""Return a json response with some default fields.
This behaves similar to :func:`~flask.jsonify`, but includes
``success=True`` and flashed messages by default.
:param flash: if the json data should contain flashed messages
:param json_data: the data to include in the json response
"""
json_data.setdefault('success', True)
if flash:
json_data['flashed_messages'] = render_template('flashed_messages.html')
return jsonify(**json_data)
class ExpectedError(ImATeapot):
"""
An error that is expected to happen and is guaranteed to be handled
by client-side code.
Use this class in new react-based code together with the AJAX
actions when you expect things to go wrong and want to handle
them in a nicer way than the usual error dialog.
:param message: A short message describing the error
:param data: Any additional data to return
"""
def __init__(self, message, **data):
super().__init__(message or 'Something went wrong')
self.data = dict(data, message=message)
def _format_request_data(data, hide_passwords=False):
if not hasattr(data, 'lists'):
data = ((k, [v]) for k, v in data.items())
else:
data = data.lists()
rv = {}
for key, values in data:
if hide_passwords and 'password' in key:
values = [v if not v else f'<{len(v)} chars hidden>' for v in values]
rv[key] = values if len(values) != 1 else values[0]
return rv
def get_request_info(hide_passwords=True):
"""Get various information about the current HTTP request.
This is especially useful for logging purposes where you want
as many information as possible.
:param hide_passwords: Hides the actual value of POST fields
if their name contains ``password``.
:return: a dictionary containing request information, or ``None``
when called outside a request context
"""
if not has_request_context():
return None
try:
user_info = {
'id': session.user.id,
'name': session.user.full_name,
'email': session.user.email
} if session.user else None
except Exception as exc:
user_info = f'ERROR: {exc}'
return {
'id': request.id,
'time': datetime.now().isoformat(),
'url': request.url,
'endpoint': request.url_rule.endpoint if request.url_rule else None,
'method': request.method,
'rh': g.rh.__class__.__name__ if 'rh' in g else None,
'user': user_info,
'ip': request.remote_addr,
'user_agent': str(request.user_agent),
'referrer': request.referrer,
'data': {
'url': _format_request_data(request.view_args) if request.view_args is not None else None,
'get': _format_request_data(request.args),
'post': _format_request_data(request.form, hide_passwords=hide_passwords),
'json': request.get_json(silent=True),
'headers': _format_request_data(request.headers, False),
}
}
def url_for_index(_external=False, _anchor=None):
from indico.web.flask.util import url_for
return url_for('categories.display', _external=_external, _anchor=_anchor)
def is_legacy_signed_url_valid(user, url):
"""Check whether a legacy signed URL is valid for a user.
This util is deprecated and only exists because people may be actively
using URLs using the old style token. Any new code should use the new
:func:`signed_url_for_user` and :func:`verify_signed_user_url` utils
which encode the user id within the signature.
"""
parsed = url_parse(url)
params = url_decode(parsed.query)
try:
signature = params.pop('token')
except KeyError:
return False
url = url_unparse((
'',
'',
parsed.path,
url_encode(params, sort=False),
parsed.fragment
))
signer = Signer(user.signing_secret, salt='url-signing')
return signer.verify_signature(url.encode(), signature)
def _get_user_url_signer(user):
return Signer(user.signing_secret, salt='user-url-signing', digest_method=hashlib.sha256)
def signed_url_for_user(user, endpoint, /, *args, **kwargs):
"""Get a URL for an endpoint, which is signed using a user's signing secret.
The user id, path and query string are encoded within the signature.
"""
from indico.web.flask.util import url_for
_external = kwargs.pop('_external', False)
url = url_for(endpoint, *args, **kwargs)
# we include the plain userid in the token so we know which signing secret to load.
# the signature itself is over the method, user id and URL, so tampering with that ID
# would not help.
# using signed urls for anything that's not GET is also very unlikely, but we include
# the method as well just to make sure we don't accidentally sign some URL where POST
# is more powerful and has a body that's not covered by the signature. if we ever want
# to allow such a thing we could of course make the method configurable instead of
# hardcoding GET.
signer = _get_user_url_signer(user)
signature_data = f'GET:{user.id}:{url}'
signature = signer.get_signature(signature_data).decode()
user_token = f'{user.id}_{signature}'
# this is the final URL including the signature ('user_token' parameter); it also
# takes the `_external` flag into account (which is omitted for the signature in
# order to never include the host in the signed part)
return url_for(endpoint, *args, **kwargs, _external=_external, user_token=user_token)
def verify_signed_user_url(url, method):
"""Verify a signed URL and extract the associated user.
:param url: the full relative URL of the request, including the query string
:param method: the HTTP method of the request
:return: the user associated with the signed link or `None` if no token was provided
:raise Forbidden: if a token is present but invalid
"""
from indico.modules.users import User
parsed = url_parse(url)
params = url_decode(parsed.query)
try:
user_id, signature = params.pop('user_token').split('_', 1)
user_id = int(user_id)
except KeyError:
return None
except ValueError:
raise BadRequest(_('The persistent link you used is invalid.'))
url = url_unparse((
'',
'',
parsed.path,
url_encode(params, sort=False),
parsed.fragment
))
user = User.get(user_id)
if not user:
raise BadRequest(_('The persistent link you used is invalid.'))
signer = _get_user_url_signer(user)
signature_data = f'{method}:{user.id}:{url}'
if not signer.verify_signature(signature_data.encode(), signature):
raise BadRequest(_('The persistent link you used is invalid.'))
return user
def get_oauth_user(scopes):
from indico.core.oauth import require_oauth
from indico.core.oauth.util import TOKEN_PREFIX_SERVICE
token = request.headers.get('Authorization', '')
if not token.lower().startswith('bearer ') or token.lower().startswith(f'bearer {TOKEN_PREFIX_SERVICE}'):
return None
try:
oauth_token = require_oauth.acquire_token(scopes)
except OAuth2Error as exc:
require_oauth.raise_error_response(exc)
return oauth_token.user
def _lookup_request_user(allow_signed_url=False, oauth_scope_hint=None):
oauth_scopes = [oauth_scope_hint] if oauth_scope_hint else []
if request.method == 'GET':
oauth_scopes += ['read:everything', 'full:everything']
else:
oauth_scopes += ['full:everything']
signed_url_user = verify_signed_user_url(request.full_path, request.method)
oauth_user = get_oauth_user(oauth_scopes)
session_user = session.get_session_user()
if oauth_user:
if signed_url_user:
raise BadRequest('OAuth tokens and signed URLs cannot be mixed')
if session_user:
raise BadRequest('OAuth tokens and session cookies cannot be mixed')
if signed_url_user and not allow_signed_url:
raise BadRequest('Signature auth is not allowed for this URL')
if signed_url_user:
return signed_url_user, 'signed_url'
elif oauth_user:
return oauth_user, 'oauth'
elif session_user:
return session_user, 'session'
return None, None
def _request_likely_seen_by_user():
return not request.is_xhr and not request.is_json and request.blueprint != 'assets'
def _check_request_user(user, source):
if not user:
return None, None
elif user.is_deleted:
merged_into_user = user.merged_into_user
if source != 'session':
if merged_into_user:
raise Forbidden('User has been merged into another user')
else:
raise Forbidden('User has been deleted')
user = source = None
# If the user is deleted and the request is likely to be seen by
# the user, we forcefully log him out and inform him about it.
if _request_likely_seen_by_user():
session.clear()
if merged_into_user:
msg = _('Your profile has been merged into <strong>{}</strong>. Please log in using that profile.')
flash(Markup(msg).format(merged_into_user.full_name), 'warning')
else:
flash(_('Your profile has been deleted.'), 'error')
elif user.is_blocked:
if source != 'session':
raise Forbidden('User has been blocked')
user = source = None
if _request_likely_seen_by_user():
session.clear()
flash(_('Your profile has been blocked.'), 'error')
return user, source
@memoize_request
def get_request_user():
"""Get the user associated with the current request.
This looks up the user using all ways of authentication that are
supported on the current endpoint. In most cases that's the user
from the active session (via a session cookie), but it may also be
set (or even overridden if there is a session as well) through other
means, such as:
- an OAuth token
- a signature for a persistent url
"""
if g.get('get_request_user_failed'):
# If getting the current user failed, we abort early in case something
# tries again since that code may be in logging or error handling, and
# we don't want that code to fail because of an invalid token in the URL
return None, None
current_exc = sys.exc_info()[1]
rh = type(g.rh) if 'rh' in g else None
oauth_scope_hint = getattr(rh, '_OAUTH_SCOPE', None)
allow_signed_url = getattr(rh, '_ALLOW_SIGNED_URL', False)
try:
user, source = _lookup_request_user(allow_signed_url, oauth_scope_hint)
user, source = _check_request_user(user, source)
except Exception as exc:
g.get_request_user_failed = True
if current_exc:
# If we got here while handling another exception, we silently ignore
# any failure related to authenticating the current user and pretend
# there is no user so we can continue handling the original exception.
# one case when this happens is passing a `user_token` arg to a page
# that 404s. of course the token is not valid there, but the 404 error
# is the more interesting one.
from indico.core.logger import Logger
Logger.get('auth').info('Discarding exception "%s" while authenticating request user during handling of '
'exception "%s"', exc, current_exc)
return None, None
raise
if user:
sentry_sdk.set_user({
'id': user.id,
'email': user.email,
'name': user.full_name,
'source': source
})
return user, source
| 38.12201 | 120 | 0.669658 | 584 | 0.036649 | 0 | 0 | 2,159 | 0.135488 | 0 | 0 | 7,013 | 0.4401 |
0f1273a2a398899dbb4a15cc92f3fa611276f39e | 7,071 | py | Python | eval_DCBC.py | dzhi1993/DCBC_evaluation | 9751278987ae356a6a7b55afe60d43fed8df933b | [
"MIT"
] | null | null | null | eval_DCBC.py | dzhi1993/DCBC_evaluation | 9751278987ae356a6a7b55afe60d43fed8df933b | [
"MIT"
] | null | null | null | eval_DCBC.py | dzhi1993/DCBC_evaluation | 9751278987ae356a6a7b55afe60d43fed8df933b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on Mon Aug 17 11:31:32 2020
Distance-Controlled Boundaries Coefficient (DCBC) evaluation
for a functional parcellation of brain cortex
INPUTS:
sn: The return subject number
hems: Hemisphere to test. 'L' - left hemisphere; 'R' - right hemisphere; 'all' - both hemispheres
binWidth: The spatial binning width in mm, default 1 mm
maxDist: The maximum distance for vertices pairs
parcels: The cortical parcellation labels (integer value) to be evaluated, shape is (N,)
N is the number of vertices, 0 - medial wall
condType: The condition type for evaluating
'unique' - evaluation will be done by using unique task conditions of the task set
'all' - evaluation will be done by all task conditions of the task set
taskSet: The task set of MDTB to use for evaluating. 1 - taskset A; 2 - taskset B; [1,2] - both
resolution: The resolution of surface space, either 32k or 164k, 32k as default
distType: The distance metric of vertices pairs, for example Dijkstra's distance, GOD distance
Euclidean distance. Dijkstra's distance as default
icoRes: Icosahedron resolution, 42, 162, 362, 642, 1002, ... default to use 2562
mwallFile: The medial wall to be excluded from the evaluation
OUTPUT:
M: Gifti object- can be saved as a *.func.gii or *.label.gii file
Author: Da Zhi
'''
import os
import numpy as np
import pandas as pd
import scipy.io as spio
from scipy.sparse import find
import nibabel as nb
def eval_DCBC(sn=[2],subj_name=['s02'], hems='L', maxDist=35, binWidth=1, parcels='',
condType='unique', taskSet=[1],resolution='32k', distType='Dijkstra',
icoRes=162, mWallFile='icos_162'):
taskConds = pd.read_table('DCBC/sc1_sc2_taskConds.txt', delim_whitespace=True)
numBins = int(np.floor(maxDist / binWidth))
if distType is 'Dijkstra':
dist = spio.loadmat("DCBC/distAvrg_sp.mat")['avrgDs']
elif distType is 'Sphere':
dist = spio.loadmat("DCBC/distSphere_sp.mat")['avrgDs']
else:
raise TypeError("Distance type cannot be recognized!")
# Determine which hemisphere shall be evaluated
if hems is 'all':
hems = ['L', 'R']
elif hems is 'L' or 'R':
hems = [hems]
else:
raise TypeError("Hemisphere type cannot be recognized!")
# Initialization of the result buffers
studyNum, SN, hem = [], [], []
N, bwParcel, distmin, distmax, meanCorr, weightedCorr = [], [], [], [], [], []
for h in hems:
mWall = np.where(parcels == 0)[0]
parcels = np.delete(parcels, mWall) # remove medial wall
parcels = np.abs(parcels - parcels[:, np.newaxis])
dist=dist.todense()
dist = np.delete(dist, mWall, 0)
dist = np.delete(dist, mWall, 1)
row, col, dist = find(dist)
sameRegion = np.zeros((dist.shape[0],), dtype=int)
for i in range(len(row)):
if parcels[row[i]][col[i]] == 0:
sameRegion[i] = 1 # within-parcel
else:
sameRegion[i] = 2 # between-parcel
del parcels
for ts in taskSet:
taskConds = taskConds[taskConds['StudyNum'] == ts]
if condType is 'unique': # unique conditions in taskset ts
condIdx = taskConds['condNum'][taskConds['overlap']==0]
elif condType is 'all': # all conditions in taskset ts
condIdx = taskConds['condNum']
else:
raise TypeError("Invalid condition type input!")
for s in sn:
this_wcon = nb.load("DCBC/%s/%s.%s.sc%s.con.%s.func.gii" %
(subj_name[s-1],subj_name[s-1], h, ts, resolution))
this_wcon = [x.data for x in this_wcon.darrays]
this_wcon = np.reshape(this_wcon, (len(this_wcon), len(this_wcon[0]))).transpose()
res = np.sqrt(this_wcon[:,-1])
this_wcon = np.delete(this_wcon, [0, this_wcon.shape[1] - 1], axis=1) # remove instruction
this_wcon = np.concatenate((this_wcon, np.zeros((this_wcon.shape[0], 1))), axis=1) # add rest
for i in range(this_wcon.shape[0]): # noise normalize
this_wcon[i, :] = this_wcon[i, :] / res[i]
this_wcon = np.delete(this_wcon, mWall, axis=0)
this_wcon = this_wcon[:,condIdx-1] # take the right subset
mean_wcon = this_wcon.mean(1)
for i in range(this_wcon.shape[0]):
this_wcon[i, :] = this_wcon[i, :] - mean_wcon[i]
this_wcon = this_wcon.astype('float32').transpose()
K=this_wcon.shape[0]
del res, mean_wcon
SD = np.sqrt(np.sum(np.square(this_wcon), axis=0)/K) # standard deviation
SD = np.reshape(SD, (SD.shape[0], 1))
VAR = np.matmul(SD, SD.transpose())
COV = np.matmul(this_wcon.transpose(), this_wcon) / K
VAR = VAR[row,col]
COV = COV[row,col]
del SD, this_wcon
print("\n")
for bw in range(1,3):
for i in range(numBins):
print(".")
inBin = np.zeros((dist.shape[0],), dtype=int)
for j in range(len(inBin)):
if (dist[j] > i*binWidth) & (dist[j] <= (i+1)*binWidth) & (sameRegion[j] == bw):
inBin[j] = 1
# inBin = np.where(dist>i*binWidth) & (dist<=(i+1)*binWidth) & (sameRegion==bw)
# inBin = np.reshape(inBin, (inBin.shape[1],))
N = np.append(N, np.count_nonzero(inBin == 1))
studyNum = np.append(studyNum, ts)
SN = np.append(SN, s)
hem = np.append(hem, h)
bwParcel = np.append(bwParcel, bw - 1)
distmin = np.append(distmin, i * binWidth)
distmax = np.append(distmax, (i + 1) * binWidth)
meanCorr = np.append(meanCorr, np.nanmean(COV[inBin == 1]) / np.nanmean(VAR[inBin == 1]))
del inBin
del VAR, COV
num_w = N[bwParcel == 0]
num_b = N[bwParcel == 1]
weight = 1/(1/num_w + 1/num_b)
weight = weight / np.sum(weight)
weightedCorr = np.append(meanCorr * weight)
print("\n")
struct = {
"SN": SN,
"hem": hem,
"studyNum": studyNum,
"N": N,
"bwParcel": bwParcel,
"distmin": distmin,
"distmax":distmax,
"meanCorr": meanCorr,
"weightedCorr": weightedCorr
}
return struct
| 42.341317 | 113 | 0.537548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,447 | 0.346061 |
0f128327ac454f125d92bc8c80bfc98fabb7bc4f | 6,331 | py | Python | pythonclient/swagger_client/models/repository.py | kongyew/qualys_cli | 720a22088994d3ff2b635ba87209c971da24c56c | [
"MIT"
] | null | null | null | pythonclient/swagger_client/models/repository.py | kongyew/qualys_cli | 720a22088994d3ff2b635ba87209c971da24c56c | [
"MIT"
] | null | null | null | pythonclient/swagger_client/models/repository.py | kongyew/qualys_cli | 720a22088994d3ff2b635ba87209c971da24c56c | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Container Security API
# Authentication You must authenticate to the Qualys Cloud Platform using Qualys account credentials (user name and password) and get the JSON Web Token (JWT) before you can start using the Container Security APIs. Use the Qualys Authentication API to get the JWT. **Example Authentication Curl Request**: curl -X POST https://gateway/auth -H 'Content-Type: application/x-www-form-urlencoded' -d 'username=value1&password=passwordValue&token=true' where - gateway is the base URL to the Qualys API server where your account is located. - **username** and **password** are the credentials of the user account for which you want to fetch Container Security data. - **token** should be **true** - **Content-Type** should be **application/x-www-form-urlencoded** # noqa: E501
OpenAPI spec version: v1.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Repository(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'repo_name': 'str',
'total_images': 'int',
'total_scanned_images': 'int',
'total_vulnerable_images': 'int'
}
attribute_map = {
'repo_name': 'repoName',
'total_images': 'totalImages',
'total_scanned_images': 'totalScannedImages',
'total_vulnerable_images': 'totalVulnerableImages'
}
def __init__(self, repo_name=None, total_images=None, total_scanned_images=None, total_vulnerable_images=None): # noqa: E501
"""Repository - a model defined in Swagger""" # noqa: E501
self._repo_name = None
self._total_images = None
self._total_scanned_images = None
self._total_vulnerable_images = None
self.discriminator = None
if repo_name is not None:
self.repo_name = repo_name
if total_images is not None:
self.total_images = total_images
if total_scanned_images is not None:
self.total_scanned_images = total_scanned_images
if total_vulnerable_images is not None:
self.total_vulnerable_images = total_vulnerable_images
@property
def repo_name(self):
"""Gets the repo_name of this Repository. # noqa: E501
:return: The repo_name of this Repository. # noqa: E501
:rtype: str
"""
return self._repo_name
@repo_name.setter
def repo_name(self, repo_name):
"""Sets the repo_name of this Repository.
:param repo_name: The repo_name of this Repository. # noqa: E501
:type: str
"""
self._repo_name = repo_name
@property
def total_images(self):
"""Gets the total_images of this Repository. # noqa: E501
:return: The total_images of this Repository. # noqa: E501
:rtype: int
"""
return self._total_images
@total_images.setter
def total_images(self, total_images):
"""Sets the total_images of this Repository.
:param total_images: The total_images of this Repository. # noqa: E501
:type: int
"""
self._total_images = total_images
@property
def total_scanned_images(self):
"""Gets the total_scanned_images of this Repository. # noqa: E501
:return: The total_scanned_images of this Repository. # noqa: E501
:rtype: int
"""
return self._total_scanned_images
@total_scanned_images.setter
def total_scanned_images(self, total_scanned_images):
"""Sets the total_scanned_images of this Repository.
:param total_scanned_images: The total_scanned_images of this Repository. # noqa: E501
:type: int
"""
self._total_scanned_images = total_scanned_images
@property
def total_vulnerable_images(self):
"""Gets the total_vulnerable_images of this Repository. # noqa: E501
:return: The total_vulnerable_images of this Repository. # noqa: E501
:rtype: int
"""
return self._total_vulnerable_images
@total_vulnerable_images.setter
def total_vulnerable_images(self, total_vulnerable_images):
"""Sets the total_vulnerable_images of this Repository.
:param total_vulnerable_images: The total_vulnerable_images of this Repository. # noqa: E501
:type: int
"""
self._total_vulnerable_images = total_vulnerable_images
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Repository, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Repository):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.497354 | 779 | 0.626125 | 5,339 | 0.843311 | 0 | 0 | 2,236 | 0.353183 | 0 | 0 | 3,226 | 0.509556 |
0f1592f3be048e8437004cc1fbfde1d17593625b | 689 | py | Python | noise_layers/rotate.py | pierrefdz/HiDDeN | c1ca842389f86239c4e3ac9911f784cd3965f260 | [
"MIT"
] | null | null | null | noise_layers/rotate.py | pierrefdz/HiDDeN | c1ca842389f86239c4e3ac9911f784cd3965f260 | [
"MIT"
] | null | null | null | noise_layers/rotate.py | pierrefdz/HiDDeN | c1ca842389f86239c4e3ac9911f784cd3965f260 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
from torchvision.transforms import functional
import numpy as np
class Rotate(nn.Module):
"""
Rotate the image by random angle between -degrees and degrees.
"""
def __init__(self, degrees, interpolation_method='nearest'):
super(Rotate, self).__init__()
self.degrees = degrees
self.interpolation_method = interpolation_method
def forward(self, noised_and_cover):
rotation_angle = np.random.uniform(-self.degrees, self.degrees)
noised_image = noised_and_cover[0]
noised_and_cover[0] = functional.rotate(noised_image, rotation_angle)
return noised_and_cover
| 31.318182 | 77 | 0.716981 | 567 | 0.822932 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.12627 |
0f162b0c31fb0578dd692face5da5fae3fc2df41 | 278 | py | Python | app/utils/weak_random.py | michel-rodrigues/viggio_backend | f419f0b939209722e1eb1e272f33de172cd5c1f1 | [
"MIT"
] | null | null | null | app/utils/weak_random.py | michel-rodrigues/viggio_backend | f419f0b939209722e1eb1e272f33de172cd5c1f1 | [
"MIT"
] | null | null | null | app/utils/weak_random.py | michel-rodrigues/viggio_backend | f419f0b939209722e1eb1e272f33de172cd5c1f1 | [
"MIT"
] | null | null | null | import random
import string
def random_string_digits(string_length=10):
"""Generate a random string of letters and digits."""
letters_and_digits = string.ascii_letters + string.digits
return ''.join(random.choice(letters_and_digits) for _ in range(string_length))
| 30.888889 | 83 | 0.769784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.197842 |
0f1665f0055f31ec323063994f87f3173d7444f6 | 28,521 | py | Python | bibleutils/test/test_versification.py | 47rooks/bible-utilities | b744828214dbadd6f0c1b6d514a796761159b779 | [
"MIT"
] | null | null | null | bibleutils/test/test_versification.py | 47rooks/bible-utilities | b744828214dbadd6f0c1b6d514a796761159b779 | [
"MIT"
] | null | null | null | bibleutils/test/test_versification.py | 47rooks/bible-utilities | b744828214dbadd6f0c1b6d514a796761159b779 | [
"MIT"
] | null | null | null | '''
Created on Jan 22, 2017
@author: Daniel
'''
import unittest
from bibleutils.versification import VersificationID, BookID, Identifier, \
ReferenceFormID, parse_refs, ETCBCHVersification, Ref, convert_refs, \
expand_refs, VersificationException
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testVersificationIDs(self):
'''Verify that ids can be referred to by the property methods
'''
assert VersificationID.ETCBCH == 1
assert VersificationID.ETCBCG == 2
assert VersificationID.IGNTPSinaiticus == 3
assert VersificationID.Accordance == 4
def testVersificationIDsImmutable(self):
with self.assertRaises(AttributeError):
VersificationID.ETCBCH = 12
def testVersificationIDsCannotBeAdded(self):
# FIXME I cannot prevent an attribute being added.
with self.assertRaises(AttributeError):
VersificationID.FOO = 15
def testVersificationIter(self):
for k in VersificationID:
print('key={:s}'.format(k))
def testBookNameFromBookId(self):
self.assertEqual(ETCBCHVersification.book_name(BookID._NUMBERS), 'Numeri',
f'Incorrect name from book_id {ETCBCHVersification.book_id(BookID._NUMBERS)}')
def testBookIdFromBookName(self):
self.assertEqual(ETCBCHVersification.book_id('Numeri'),
BookID._NUMBERS,
f"Incorrect ID from book_name {ETCBCHVersification.book_name('Numeri')}")
def testIDValuesUnique(self):
'''Verify that duplicates cannot be created in the Identifier class
'''
chk = {'_GENESIS':1, '_EXODUS':2, '_LEVITICUS':3,
'_NUMBERS':4, '_DEUTERONOMY':5, '_DEUTERONOMYA':5}
with self.assertRaises(VersificationException) as expected_ex:
Identifier(chk)
ex = expected_ex.exception
self.assertEqual(ex.message[:51],
'duplicate value in supplied map at key _DEUTERONOMY',
'Unexpected mesg in exception : {:s}'.format(str(ex)))
def testBookIDSmoker(self):
'''Just a quick smoker
'''
self.assertEqual(BookID._1CHRONICLES, 38, 'Unexpected value {:d}')
def testParseBookOnly(self):
r = parse_refs("Exodus", ReferenceFormID.BIBLEUTILS)
self.assertEquals(len(r), 1)
self.assertEqual(r[0].versification, ReferenceFormID.BIBLEUTILS,
'wrong versification system {}'.format(r[0].versification))
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertIsNone(r[0].end_book,
'ending book is wrong {}'.format(r[0].end_book))
self.assertIsNone(r[0].st_ch, 'st_ch not None {}'.format(r[0].st_ch))
self.assertIsNone(r[0].end_ch, 'end_ch not None {}'.format(r[0].end_ch))
self.assertIsNone(r[0].st_vs, 'st_vs not None {}'.format(r[0].st_vs))
self.assertIsNone(r[0].end_vs, 'end_vs not None {}'.format(r[0].end_vs))
self.assertIsNone(r[0].st_sub_vs, 'st_sub_vs not None {}'.format(r[0].st_sub_vs))
self.assertIsNone(r[0].end_sub_vs, 'end_sub_vs not None {}'.format(r[0].end_sub_vs))
def testParseNumBookOnly(self):
r = parse_refs("1Kings", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._1KINGS,
'wrong book id {}'.format(r[0].st_book))
def testParseBookRangeOnly(self):
r = parse_refs("Exodus-Numbers", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].end_book, BookID._NUMBERS,
'wrong book id {}'.format(r[0].end_book))
def testParseBookRangeTwoDelims(self):
with self.assertRaises(VersificationException) as expected_ex:
parse_refs("Exodus--Numbers", ReferenceFormID.BIBLEUTILS)
ex = expected_ex.exception
self.assertEqual(ex.message,
'invalid book name at pos 7 in Exodus--Numbers',
'Unexpected mesg in exception : {:s}'.format(str(ex)))
def testParseChVsRangeTwoDelims(self):
with self.assertRaises(VersificationException) as expected_ex:
parse_refs("Exodus 12::13", ReferenceFormID.BIBLEUTILS)
ex = expected_ex.exception
self.assertEqual(ex.message,
'invalid verse reference at pos 10 in Exodus 12::13',
'Unexpected mesg in exception : {:s}'.format(str(ex)))
def testParseTwoCommas(self):
with self.assertRaises(VersificationException) as expected_ex:
parse_refs("Exodus 12-13,,15", ReferenceFormID.BIBLEUTILS)
ex = expected_ex.exception
self.assertEqual(ex.message,
'invalid chapter at pos 13 in Exodus 12-13,,15',
'Unexpected mesg in exception : {:s}'.format(str(ex)))
def testParseMixedDelims(self):
with self.assertRaises(VersificationException) as expected_ex:
parse_refs("Exodus 12-13,:-15", ReferenceFormID.BIBLEUTILS)
ex = expected_ex.exception
self.assertEqual(ex.message,
'invalid chapter at pos 13 in Exodus 12-13,:-15',
'Unexpected mesg in exception : {:s}'.format(str(ex)))
def testParseBookRangeTooManyBooks(self):
with self.assertRaises(VersificationException) as expected_ex:
parse_refs("Exodus-Numbers-Deuteronomy", ReferenceFormID.BIBLEUTILS)
ex = expected_ex.exception
self.assertEqual(ex.message,
'invalid "-" delimiter at 15 in Exodus-Numbers-Deuteronomy')
def testParseMultiBookRangeOnly(self):
r = parse_refs("Exodus-Numbers,Matt-Mark", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].end_book, BookID._NUMBERS,
'wrong book id {}'.format(r[0].end_book))
self.assertEqual(r[1].st_book, BookID._MATTHEW,
'wrong book id {}'.format(r[1].st_book))
self.assertEqual(r[1].end_book, BookID._MARK,
'wrong book id {}'.format(r[1].end_book))
def testParseNumBookRangeOnly(self):
r = parse_refs("1Kings-2Kings", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._1KINGS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].end_book, BookID._2KINGS,
'wrong book id {}'.format(r[0].end_book))
def testParseBookChapter(self):
r = parse_refs("Exodus 12", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertIsNone(r[0].end_book,
'book id is not None {}'.format(r[0].end_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect chapter {}'.format(r[0].st_ch))
self.assertIsNone(r[0].end_ch, 'chapter is not None')
def testParseBookChapterRange(self):
r = parse_refs("Exodus 12-15", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].end_ch, 15,
'incorrect ending chapter {}'.format(r[0].end_ch))
def testParseBookMultiChapterRange(self):
r = parse_refs("Exodus 12-15, 17-25", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].end_ch, 15,
'incorrect ending chapter {}'.format(r[0].end_ch))
self.assertEqual(r[1].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[1].st_book))
self.assertEqual(r[1].st_ch, 17,
'incorrect starting chapter {}'.format(r[1].st_ch))
self.assertEqual(r[1].end_ch, 25,
'incorrect ending chapter {}'.format(r[1].end_ch))
def testParseBookAbbrevCh(self):
r = parse_refs("Ex 12", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
def testParseBookAbbrevWithDot(self):
r = parse_refs("Ex. 12", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
def testParseBookChVs(self):
r = parse_refs("Gen 12:1", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
def testParseBookChVsRange(self):
r = parse_refs("Gen 12:1-12", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
self.assertEqual(r[0].end_vs, 12,
'incorrect starting chapter {}'.format(r[0].end_vs))
def testParseBookChVsRangeSeq(self):
r = parse_refs("Gen 12:1-12,13", ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 12,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
self.assertEqual(r[0].end_vs, 12,
'incorrect starting chapter {}'.format(r[0].end_vs))
self.assertEqual(r[1].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[1].st_ch, 12,
'incorrect starting chapter {}'.format(r[1].st_ch))
self.assertEqual(r[1].st_vs, 13,
'incorrect starting chapter {}'.format(r[1].st_vs))
def testParseGen1_3(self):
r = parse_refs('Gen 1:1-2,6-23', ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 1,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
self.assertEqual(r[0].end_vs, 2,
'incorrect starting chapter {}'.format(r[0].end_vs))
self.assertEqual(r[1].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[1].st_ch, 1,
'incorrect starting chapter {}'.format(r[1].st_ch))
self.assertEqual(r[1].st_vs, 6,
'incorrect starting chapter {}'.format(r[1].st_vs))
self.assertEqual(r[1].end_vs, 23,
'incorrect starting chapter {}'.format(r[1].st_vs))
def testParseBookChVsChVs(self):
r = parse_refs('Gen 1:1-2,6-23,2:23', ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 1,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
self.assertEqual(r[0].end_vs, 2,
'incorrect starting chapter {}'.format(r[0].end_vs))
self.assertEqual(r[1].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[1].st_ch, 1,
'incorrect starting chapter {}'.format(r[1].st_ch))
self.assertEqual(r[1].st_vs, 6,
'incorrect starting chapter {}'.format(r[1].st_vs))
self.assertEqual(r[1].end_vs, 23,
'incorrect starting chapter {}'.format(r[1].st_vs))
self.assertEqual(r[2].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[2].st_book))
self.assertEqual(r[2].st_ch, 2,
'incorrect starting chapter {}'.format(r[2].st_ch))
self.assertEqual(r[2].st_vs, 23,
'incorrect starting chapter {}'.format(r[2].st_vs))
def testParseComplexRefString(self):
r = parse_refs('Gen 1:1-2,6, Ex 17:3, Deut 12,13', ReferenceFormID.BIBLEUTILS)
self.assertEqual(r[0].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[0].st_ch, 1,
'incorrect starting chapter {}'.format(r[0].st_ch))
self.assertEqual(r[0].st_vs, 1,
'incorrect starting chapter {}'.format(r[0].st_vs))
self.assertEqual(r[0].end_vs, 2,
'incorrect starting chapter {}'.format(r[0].end_vs))
self.assertEqual(r[1].st_book, BookID._GENESIS,
'wrong book id {}'.format(r[0].st_book))
self.assertEqual(r[1].st_ch, 1,
'incorrect starting chapter {}'.format(r[1].st_ch))
self.assertEqual(r[1].st_vs, 6,
'incorrect starting chapter {}'.format(r[1].st_vs))
self.assertEqual(r[2].st_book, BookID._EXODUS,
'wrong book id {}'.format(r[2].st_book))
self.assertEqual(r[2].st_ch, 17,
'incorrect starting chapter {}'.format(r[2].st_ch))
self.assertEqual(r[2].st_vs, 3,
'incorrect starting chapter {}'.format(r[2].st_vs))
self.assertEqual(r[3].st_book, BookID._DEUTERONOMY,
'wrong book id {}'.format(r[3].st_book))
self.assertEqual(r[3].st_ch, 12,
'incorrect starting chapter {}'.format(r[3].st_ch))
self.assertEqual(r[4].st_book, BookID._DEUTERONOMY,
'wrong book id {}'.format(r[4].st_book))
self.assertEqual(r[4].st_ch, 13,
'incorrect starting chapter {}'.format(r[4].st_vs))
def testConvertInternalToETCBCH(self):
refs = [Ref(ReferenceFormID.BIBLEUTILS,
BookID._DEUTERONOMY, sc=3, sv=4),
Ref(ReferenceFormID.BIBLEUTILS,
BookID._EXODUS, BookID._EXODUS, 1, sv=12, ev=15)]
c_refs = convert_refs(refs, ReferenceFormID.ETCBCH)
self.assertEqual(c_refs[0].versification, ReferenceFormID.ETCBCH,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[0].st_book, 'Deuteronomium',
f'Conversion returned wrong name {c_refs[0].st_book}')
self.assertEqual(c_refs[0].st_ch, 3,
f'Conversion returned wrong ch {c_refs[0].st_ch}')
self.assertEqual(c_refs[0].st_vs, 4,
f'Conversion returned wrong vs {c_refs[0].st_vs}')
self.assertEqual(c_refs[1].versification, ReferenceFormID.ETCBCH,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[1].st_book, 'Exodus',
f'Conversion returned wrong name {c_refs[1].st_book}')
self.assertEqual(c_refs[1].st_ch, 1,
f'Conversion returned wrong ch {c_refs[1].st_ch}')
self.assertEqual(c_refs[1].st_vs, 12,
f'Conversion returned wrong vs {c_refs[1].st_vs}')
self.assertEqual(c_refs[1].end_vs, 15,
f'Conversion returned wrong vs {c_refs[1].end_vs}')
def testConvertETCBCHToInternal(self):
refs = [Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', sc=3, sv=4),
Ref(ReferenceFormID.ETCBCH,
'Exodus', 'Exodus', 1, sv=12, ev=15)]
c_refs = convert_refs(refs, ReferenceFormID.BIBLEUTILS)
self.assertEqual(c_refs[0].versification, ReferenceFormID.BIBLEUTILS,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[0].st_book, BookID._DEUTERONOMY,
f'Conversion returned wrong name {c_refs[0].st_book}')
self.assertEqual(c_refs[0].st_ch, 3,
f'Conversion returned wrong ch {c_refs[0].st_ch}')
self.assertEqual(c_refs[0].st_vs, 4,
f'Conversion returned wrong vs {c_refs[0].st_vs}')
self.assertEqual(c_refs[1].versification, ReferenceFormID.BIBLEUTILS,
f'Incorrect reference form {c_refs[1].versification}')
self.assertEqual(c_refs[1].st_book, BookID._EXODUS,
f'Conversion returned wrong name {c_refs[1].st_book}')
self.assertEqual(c_refs[1].st_ch, 1,
f'Conversion returned wrong ch {c_refs[1].st_ch}')
self.assertEqual(c_refs[1].st_vs, 12,
f'Conversion returned wrong vs {c_refs[1].st_vs}')
self.assertEqual(c_refs[1].end_vs, 15,
f'Conversion returned wrong vs {c_refs[1].end_vs}')
def testConvertInternalToETCBCG(self):
refs = [Ref(ReferenceFormID.BIBLEUTILS,
BookID._LUKE, sc=3, sv=4),
Ref(ReferenceFormID.BIBLEUTILS,
BookID._MARK, BookID._MARK, 1, sv=12, ev=15)]
c_refs = convert_refs(refs, ReferenceFormID.ETCBCG)
self.assertEqual(c_refs[0].versification, ReferenceFormID.ETCBCG,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[0].st_book, 'Luke',
f'Conversion returned wrong name {c_refs[0].st_book}')
self.assertEqual(c_refs[0].st_ch, 3,
f'Conversion returned wrong ch {c_refs[0].st_ch}')
self.assertEqual(c_refs[0].st_vs, 4,
f'Conversion returned wrong vs {c_refs[0].st_vs}')
self.assertEqual(c_refs[1].versification, ReferenceFormID.ETCBCG,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[1].st_book, 'Mark',
f'Conversion returned wrong name {c_refs[1].st_book}')
self.assertEqual(c_refs[1].st_ch, 1,
f'Conversion returned wrong ch {c_refs[1].st_ch}')
self.assertEqual(c_refs[1].st_vs, 12,
f'Conversion returned wrong vs {c_refs[1].st_vs}')
self.assertEqual(c_refs[1].end_vs, 15,
f'Conversion returned wrong vs {c_refs[1].end_vs}')
def testConvertETCBCGToInternal(self):
refs = [Ref(ReferenceFormID.ETCBCG,
'Luke', sc=3, sv=4),
Ref(ReferenceFormID.ETCBCG,
'Mark', 'Mark', 1, sv=12, ev=15)]
c_refs = convert_refs(refs, ReferenceFormID.BIBLEUTILS)
self.assertEqual(c_refs[0].versification, ReferenceFormID.BIBLEUTILS,
f'Incorrect reference form {c_refs[0].versification}')
self.assertEqual(c_refs[0].st_book, BookID._LUKE,
f'Conversion returned wrong name {c_refs[0].st_book}')
self.assertEqual(c_refs[0].st_ch, 3,
f'Conversion returned wrong ch {c_refs[0].st_ch}')
self.assertEqual(c_refs[0].st_vs, 4,
f'Conversion returned wrong vs {c_refs[0].st_vs}')
self.assertEqual(c_refs[1].versification, ReferenceFormID.BIBLEUTILS,
f'Incorrect reference form {c_refs[1].versification}')
self.assertEqual(c_refs[1].st_book, BookID._MARK,
f'Conversion returned wrong name {c_refs[1].st_book}')
self.assertEqual(c_refs[1].st_ch, 1,
f'Conversion returned wrong ch {c_refs[1].st_ch}')
self.assertEqual(c_refs[1].st_vs, 12,
f'Conversion returned wrong vs {c_refs[1].st_vs}')
self.assertEqual(c_refs[1].end_vs, 15,
f'Conversion returned wrong vs {c_refs[1].end_vs}')
def testExpandVerse(self):
refs = [Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', sc=3, sv=4, ev=6)]
e_refs = expand_refs(refs)
self.assertEqual(len(e_refs), 3, 'incorrect number of expanded refs')
self.assertEqual(e_refs[0].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[0].end_book, 'end_book is not None')
self.assertEqual(e_refs[0].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[0].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[0].st_vs, 4, 'wrong verse')
self.assertIsNone(e_refs[0].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[1].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[1].end_book, 'end_book is not None')
self.assertEqual(e_refs[1].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[1].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[1].st_vs, 5, 'wrong verse')
self.assertIsNone(e_refs[1].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[2].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[2].end_book, 'end_book is not None')
self.assertEqual(e_refs[2].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[2].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[2].st_vs, 6, 'wrong verse')
self.assertIsNone(e_refs[2].end_vs, 'end_vs is not None')
def testExpandList(self):
refs = [Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', sc=3, sv=4, ev=6),
Ref(ReferenceFormID.ETCBCH,
'Exodus', sc=6, sv=1, ev=7)]
e_refs = expand_refs(refs)
self.assertEqual(len(e_refs), 10, 'incorrect number of expanded refs')
self.assertEqual(e_refs[0].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[0].end_book, 'end_book is not None')
self.assertEqual(e_refs[0].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[0].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[0].st_vs, 4, 'wrong verse')
self.assertIsNone(e_refs[0].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[1].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[1].end_book, 'end_book is not None')
self.assertEqual(e_refs[1].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[1].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[1].st_vs, 5, 'wrong verse')
self.assertIsNone(e_refs[1].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[2].st_book, 'Deuteronomium', 'st_book is not Deuteronomium')
self.assertIsNone(e_refs[2].end_book, 'end_book is not None')
self.assertEqual(e_refs[2].st_ch, 3, 'wrong chapter')
self.assertIsNone(e_refs[2].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[2].st_vs, 6, 'wrong verse')
self.assertIsNone(e_refs[2].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[3].st_book, 'Exodus', 'st_book is not Exodus')
self.assertIsNone(e_refs[3].end_book, 'end_book is not None')
self.assertEqual(e_refs[3].st_ch, 6, 'wrong chapter')
self.assertIsNone(e_refs[3].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[3].st_vs, 1, 'wrong verse')
self.assertIsNone(e_refs[3].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[4].st_book, 'Exodus', 'st_book is not Exodus')
self.assertIsNone(e_refs[4].end_book, 'end_book is not None')
self.assertEqual(e_refs[4].st_ch, 6, 'wrong chapter')
self.assertIsNone(e_refs[4].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[4].st_vs, 2, 'wrong verse')
self.assertIsNone(e_refs[4].end_vs, 'end_vs is not None')
self.assertEqual(e_refs[9].st_book, 'Exodus', 'st_book is not Exodus')
self.assertIsNone(e_refs[9].end_book, 'end_book is not None')
self.assertEqual(e_refs[9].st_ch, 6, 'wrong chapter')
self.assertIsNone(e_refs[9].end_ch, 'end_ch is not None')
self.assertEqual(e_refs[9].st_vs, 7, 'wrong verse')
self.assertIsNone(e_refs[9].end_vs, 'end_vs is not None')
def testExpandChapter(self):
with self.assertRaises(VersificationException) as expected_ex:
refs = [Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', sc=3, ec=4, sv=4, ev=6)]
expand_refs(refs)
ex = expected_ex.exception
print(f'ex is {ex}')
self.assertEqual(ex.message,
'reference extends over more than one chapter')
def testExpandEndBook(self):
with self.assertRaises(VersificationException) as expected_ex:
refs = [Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', 'Exodus', sc=3, sv=4)]
expand_refs(refs)
ex = expected_ex.exception
self.assertEqual(ex.message,
'reference extends over more than one book')
def testRefBadCh(self):
with self.assertRaises(VersificationException) as expected_ex:
Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', 'Exodus', sc=3, ec=2)
ex = expected_ex.exception
self.assertEqual(ex.message,
'ending chapter 2 is before the starting chapter 3')
def testRefBadVs(self):
with self.assertRaises(VersificationException) as expected_ex:
Ref(ReferenceFormID.ETCBCH,
'Deuteronomium', 'Exodus', sv=3, ev=2)
ex = expected_ex.exception
self.assertEqual(ex.message,
'ending verse 2 is before the starting verse 3')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | 53.510319 | 104 | 0.570948 | 28,135 | 0.986466 | 0 | 0 | 0 | 0 | 0 | 0 | 6,981 | 0.244767 |