content
stringlengths 5
1.05M
|
|---|
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
from django.utils.translation import gettext_lazy as _
CATEGORY = (
('Women', 'Women'),
('Men', 'Men'),
('Raw material', 'Raw material'),
('Life style', 'Life style'),
)
class Messages(models.Model):
"""
Model for newsletter messages
"""
# Fields
subject = models.CharField(
max_length=20,
help_text=_("Max length 20 character"),
)
message = RichTextUploadingField()
category = models.CharField(
max_length=15,
choices=CATEGORY,
)
# Metadata
class Meta:
verbose_name_plural = "1. Messages"
verbose_name = "message"
# Methods
def __str__(self):
return self.subject
class Subscribers(models.Model):
"""
Model for newsletter subscribers
"""
# Fields
email = models.EmailField(
unique=True,
)
full_name = models.CharField(
max_length=30,
help_text=_("Max length 30 character"),
)
category = models.CharField(
max_length=15,
choices=CATEGORY,
)
# Metadata
class Meta:
verbose_name_plural = "2. Subscribers"
verbose_name = "subscriber"
# Methods
def __str__(self):
return self.email
|
import logging
from oidcmsg import oidc
from oidcmsg.message import Message
from oidcmsg.oauth2 import ResponseMessage
from oidcservice.service import Service
LOGGER = logging.getLogger(__name__)
class RegistrationRead(Service):
msg_type = Message
response_cls = oidc.RegistrationResponse
error_msg = ResponseMessage
synchronous = True
service_name = 'registration_read'
http_method = 'GET'
default_authn_method = 'client_secret_basic'
def get_endpoint(self):
try:
return self.service_context.registration_response["registration_client_uri"]
except KeyError:
return ''
def get_authn_header(self, request, authn_method, **kwargs):
"""
Construct an authorization specification to be sent in the
HTTP header.
:param request: The service request
:param authn_method: Which authentication/authorization method to use
:param kwargs: Extra keyword arguments
:return: A set of keyword arguments to be sent in the HTTP header.
"""
headers = {}
if authn_method == "client_secret_basic":
LOGGER.debug("Client authn method: %s", authn_method)
headers["Authorization"] = "Bearer {}".format(
self.service_context.registration_response["registration_access_token"]
)
return headers
|
import argparse
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
LOG_FILE_NAME = "log.csv"
def moving_avg(x, y, window_size=1):
if window_size == 1:
return x, y
moving_avg_y = np.convolve(y, np.ones(window_size) / window_size, 'valid')
return x[-len(moving_avg_y):], moving_avg_y
def plot_run(paths, name, ax=None, x_key="steps", y_keys=["eval/loss"], window_size=1, max_x_value=None):
for path in paths:
assert LOG_FILE_NAME in os.listdir(path), "Did not find log file, found " + " ".join(os.listdir(path))
for y_key in y_keys:
xs, ys = [], []
for path in paths:
df = pd.read_csv(os.path.join(path, LOG_FILE_NAME))
if y_key not in df:
print("[research] WARNING: y_key was not in run, skipping plot", path)
x, y = moving_avg(df[x_key], df[y_key], window_size=window_size)
assert len(x) == len(y)
if max_x_value is not None:
y = y[x <= max_x_value] # need to set y value first
x = x[x <= max_x_value]
xs.append(x)
ys.append(y)
xs = np.concatenate(xs, axis=0)
ys = np.concatenate(ys, axis=0)
plot_df = pd.DataFrame({x_key: xs, y_key: ys})
label = name + " " + y_key if len(y_keys) > 1 else name
ci = "sd" if len(paths) > 0 else None
sns.lineplot(ax=ax, x=x_key, y=y_key, data=plot_df, sort=True, ci=ci, label=label)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output", "-o", type=str, default="plot.png", help="Path of output plot")
parser.add_argument("--path", "-p", nargs='+', type=str, required=True, help="Paths of runs to plot")
parser.add_argument("--legend", "-l", nargs='+', type=str, required=False, help="Names of each run to display in the legend")
parser.add_argument("--title", "-t", type=str, required=False, help="Plot title")
parser.add_argument("--window", "-w", type=int, default=1, help="Moving window averaging parameter.")
parser.add_argument("--x", "-x", type=str, default="step", help="X value to plot")
parser.add_argument("--max-x", "-m", type=int, default=None, help="Max x value to plot")
parser.add_argument("--x-label", "-xl", type=str, default=None, help="X label to display on the plot")
parser.add_argument("--y", "-y", type=str, nargs='+', default=["eval/loss"], help="Y value(s) to plot")
parser.add_argument("--y-label", "-yl", type=str, default=None, help="Y label to display on the plot")
parser.add_argument("--fig-size", "-f", nargs=2, type=int, default=(6, 4))
args = parser.parse_args()
paths = args.path
# Check to see if we should auto-expand the path.
# Do this only if the number of paths specified is one and each sub-path is a directory
if len(paths) == 1 and all([os.path.isdir(os.path.join(paths[0], d)) for d in os.listdir(paths[0])]):
paths = sorted([os.path.join(paths[0], d) for d in os.listdir(paths[0])])
# Now create the labels
labels = args.legend
if labels is None:
labels = [os.path.basename(path[:-1] if path.endswith('/') else path) for path in paths]
# Sort the paths alphabetically by the labels
paths, labels = zip(*sorted(zip(paths, labels), key=lambda x: x[0])) # Alphabetically sort by filename
for path, label in zip(paths, labels):
if LOG_FILE_NAME not in os.listdir(path):
path = [os.path.join(path, run) for run in os.listdir(path)]
else:
path = [path]
sns.set_context(context="paper", font_scale=1.2)
sns.set_style("darkgrid", {'font.family': 'serif'})
plot_run(path, label, x_key=args.x, y_keys=args.y, window_size=args.window, max_x_value=args.max_x)
# Set relevant labels
if args.title:
plt.title(args.title)
# Label X
if args.x_label is not None:
plt.x_label(args.xlabel)
elif args.x is not None:
plt.xlabel(args.x)
# Label Y
if args.y_label is not None:
plt.ylabel(args.y_label)
elif args.y is not None and len(args.y) == 1:
plt.ylabel(args.y[0])
# Save the plot
print("[research] Saving plot to", args.output)
plt.gcf().set_size_inches(*args.fig_size)
plt.tight_layout(pad=0)
plt.savefig(args.output, dpi=200) # Increase DPI for higher res.
|
import time
import math
import odrive
import contextlib
import odrive.enums
from configs.config import global_config as c
ANGLE_NORMALIZATION = 1 / math.pi
class KineticMazeMotor:
def __init__(self):
self.od = None
self.approx_cycles_per_revolution = None
self.init_odrive()
def axis(self):
return getattr(self.od, "axis%d" % (c.physics.MOTOR_AXIS,))
@contextlib.contextmanager
def axis_context(self, control_mode=None, pos_gain=None, vel_gain=None,
trajectory_mv=None, trajectory_ma=None, trajectory_md=None):
old_control_mode = self.axis().controller.config.control_mode
old_pos_gain = self.axis().controller.config.pos_gain
old_vel_gain = self.axis().controller.config.vel_gain
old_traj_mv = self.axis().trap_traj.config.vel_limit
old_traj_ma = self.axis().trap_traj.config.accel_limit
old_traj_md = self.axis().trap_traj.config.decel_limit
try:
if control_mode is not None:
self.axis().controller.config.control_mode = control_mode
if pos_gain is not None:
self.axis().controller.config.pos_gain = pos_gain
if vel_gain is not None:
self.axis().controller.config.vel_gain = vel_gain
if trajectory_mv is not None:
self.axis().trap_traj.config.vel_limit = trajectory_mv
if trajectory_ma is not None:
self.axis().trap_traj.config.accel_limit = trajectory_ma
if trajectory_md is not None:
self.axis().trap_traj.config.decel_limit = trajectory_md
yield self.axis()
finally:
self.axis().controller.config.control_mode = old_control_mode
self.axis().controller.config.pos_gain = old_pos_gain
self.axis().controller.config.vel_gain = old_vel_gain
self.axis().trap_traj.config.vel_limit = old_traj_mv
self.axis().trap_traj.config.accel_limit = old_traj_ma
self.axis().trap_traj.config.decel_limit = old_traj_md
def report_odrive_error(self):
print("ODrive Error!")
def set_velocity(self, vel, wait=False):
if abs(vel) < c.physics.VELOCITY_MIN_CUTOFF:
corrected = 0
elif abs(vel) > c.physics.VELOCITY_MAX_CUTOFF:
raise ValueError("Velocity too large!")
else:
corrected = vel
try:
self.axis().controller.vel_setpoint = corrected
except:
self.report_odrive_error()
raise
if wait:
while abs(self.axis().encoder.vel_estimate - corrected) > \
c.physics.VEL_CONTROL_VELOCITY_TOLERANCE:
pass
def adjust_angle(self, angle):
norm = max(min(angle * ANGLE_NORMALIZATION, 1.0), -1.0)
adjusted = abs((abs(norm) ** c.physics.ANGLE_EXPONENT) * \
c.physics.ADJUSTED_ANGLE_MULTIPLIER)
clamped = min(adjusted, c.physics.VELOCITY_MAX_CUTOFF)
return math.copysign(clamped, norm) * (-1 if c.physics.FLIP_VELOCITY else 1)
def ramp_down(self):
try:
return self.axis().controller.vel_setpoint * c.physics.RAMP_DOWN_FACTOR
except:
self.report_odrive_error()
raise
def init_odrive(self):
print("Finding ODrive")
self.od = odrive.find_any()
print("Calibrating ODrive")
self.axis().motor.config.current_lim = c.physics.MOTOR_CURRENT_LIMIT
self.axis().motor.config.calibration_current = c.physics.MOTOR_CALIBRATION_CURRENT
self.axis().requested_state = odrive.enums.AXIS_STATE_MOTOR_CALIBRATION
while self.axis().current_state != odrive.enums.AXIS_STATE_IDLE:
pass
# Wait for any oscillation to dissipate
print("Waiting for oscillation to dissipate")
time.sleep(c.physics.CALIBRATION_DELAY_TIME)
self.axis().requested_state = odrive.enums.AXIS_STATE_ENCODER_OFFSET_CALIBRATION
while self.axis().current_state != odrive.enums.AXIS_STATE_IDLE:
pass
self.axis().cycle_trigger.config.gpio_pin_num = c.physics.CYCLE_TRIGGER_GPIO_PIN
self.axis().cycle_trigger.config.enabled = True
self.axis().encoder.config.bandwidth = c.physics.ENCODER_BANDWIDTH
self.axis().controller.config.vel_gain = c.physics.CONTROLLER_VEL_GAIN
self.axis().trap_traj.config.vel_limit = c.physics.TRAJECTORY_VEL_LIMIT
self.axis().trap_traj.config.accel_limit = c.physics.TRAJECTORY_ACCEL_LIMIT
self.axis().trap_traj.config.decel_limit = c.physics.TRAJECTORY_DECEL_LIMIT
self.axis().trap_traj.config.A_per_css = c.physics.TRAJECTORY_AMPS_PER_ACCELERATION
self.axis().requested_state = odrive.enums.AXIS_STATE_CLOSED_LOOP_CONTROL
self.axis().controller.config.control_mode = odrive.enums.CTRL_MODE_VELOCITY_CONTROL
self.home()
print("ODrive initialization complete")
def home(self):
print("Homing . . .")
with self.axis_context(control_mode=odrive.enums.CTRL_MODE_VELOCITY_CONTROL):
print("Finding first edge")
self.axis().cycle_trigger.last_edge_hit.has_hit = False
self.set_velocity(c.physics.HOMING_VELOCITY)
while not self.axis().cycle_trigger.last_edge_hit.has_hit:
pass
first_edge = self.axis().cycle_trigger.last_edge_hit.hit_location
print("Found first edge at %d", first_edge)
print("Finding second edge")
self.axis().cycle_trigger.last_edge_hit.has_hit = False
while not self.axis().cycle_trigger.last_edge_hit.has_hit:
pass
second_edge = self.axis().cycle_trigger.last_edge_hit.hit_location
delta = second_edge - first_edge
print("Found second edge at %d (offset %d)", second_edge, delta)
self.approx_cycles_per_revolution = abs(delta)
self.set_velocity(0, wait=True)
self.go_to_angle(0)
self.set_velocity(0, wait=True)
print("Homing complete")
def get_home(self):
if not self.axis().cycle_trigger.last_edge_hit.has_hit:
raise ValueError("has_hit was False; ensure home() has been called at least once")
return self.axis().cycle_trigger.last_edge_hit.hit_location + \
((c.physics.HOME_OFFSET_ANGLE / (2 * math.pi)) * self.approx_cycles_per_revolution)
def get_adjusted_home(self):
# Ensure that home is always below us
raw_home = self.get_home()
if self.axis().encoder.pos_estimate < raw_home:
return raw_home - self.approx_cycles_per_revolution
else:
return raw_home
def get_counts_per_radian(self):
return self.approx_cycles_per_revolution / (2 * math.pi)
def calculate_relative_position(self, pos):
return (pos - self.get_home()) % self.approx_cycles_per_revolution
def go_to_angle(self, angle, direction=None,
max_velocity=None, max_accel=None, max_decel=None):
print("Going to angle %f", angle)
if direction is not None and direction != 1 and direction != -1:
raise ValueError("Invalid direction")
home = self.get_adjusted_home()
current = self.calculate_relative_position(self.axis().encoder.pos_estimate)
base_offset = (angle * self.get_counts_per_radian())
if current < base_offset:
pos_offset = base_offset
neg_offset = base_offset - self.approx_cycles_per_revolution
else:
pos_offset = base_offset + self.approx_cycles_per_revolution
neg_offset = base_offset
if direction == 1:
offset = pos_offset
elif direction == -1:
offset = neg_offset
else:
# Find the closest
if abs(pos_offset - current) < abs(neg_offset - current):
offset = pos_offset
else:
offset = neg_offset
target = home + offset
mv = max_velocity if max_velocity is not None else c.physics.TRAJECTORY_VEL_LIMIT
ma = max_accel if max_accel is not None else c.physics.TRAJECTORY_ACCEL_LIMIT
md = max_decel if max_decel is not None else c.physics.TRAJECTORY_DECEL_LIMIT
with self.axis_context(control_mode=odrive.enums.CTRL_MODE_POSITION_CONTROL,
trajectory_mv=mv, trajectory_ma=ma, trajectory_md=md):
print("Seeking to %d (currently at %f)", target, self.axis().encoder.pos_estimate)
# ODrive begins calculating based on the current pos_setpoint and vel_setpoint
# We want it to be relative to the current pos/vel, so use this workaround
self.axis().controller.pos_setpoint = self.axis().encoder.pos_estimate
self.axis().controller.vel_setpoint = self.axis().encoder.vel_estimate
self.axis().controller.move_to_pos(target)
while self.axis().controller.config.control_mode != \
odrive.enums.CTRL_MODE_POSITION_CONTROL:
pass
print("Trapezoidal planning finished")
pos_tolerance = c.physics.POS_CONTROL_OFFSET_TOLERANCE
velocity_tolerance = c.physics.POS_CONTROL_VELOCITY_TOLERANCE
tick_count = 0
while tick_count < c.physics.POS_CONTROL_TICK_COUNT:
while True:
if abs(self.axis().encoder.pos_estimate - target) < pos_tolerance and \
abs(self.axis().encoder.vel_estimate) < velocity_tolerance:
break
tick_count = 0
tick_count += 1
print("Go-to-angle complete")
|
#Programa que leia o nome de uma pessoa e diga se ela tem silva no nome
nome = input('Digite o seu nome completo: ').title()
splt = nome.split()
if 'Silva' in splt:
print('Seu nome possui Silva.')
else:
print('Seu nome não possui Silva.')
|
import collections
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
m = collections.Counter(nums1)
result = []
for num in nums2:
if num in m:
result.append(num)
if m[num] == 1:
del m[num]
else:
m[num] -= 1
return result
|
# 140000000
LILIN = 1201000
sm.setSpeakerID(LILIN)
sm.sendNext("Alright, I've done enough explaining for now. Let's move on to the next stage. What's the next stage, you ask? I just told you. Train as hard as you can until you become strong enough to defeat the Black Mage with a single blow.")
sm.sendSay("You may have been a hero in the past, but that was hundreds of years ago. Even if it weren't for the curse of the Black Mage, all those years you spent frozen in time have stiffened your body. You must loosen up a bit and slowly regain your agility. How do you do that, you ask?")
if sm.sendAskAccept("Don't you know that you must first master the fundamentals? So the wise thing to do is to begin with #bBasic Training#k. Oh, of course, I forgot that you lost your memory. Well, that's why I'm here. You'll just have to experience it yourself. Shall we begin?"):
sm.startQuest(parentID)
sm.removeEscapeButton()
sm.sendNext("The population of Rien may be mostly Penguins, but even this island has monsters. You'll find #o0100131#s if you go to #b#m140020000##k, located on the right side of the town. Please defeat #r10 of those #o0100131#s#k. I'm sure you'll have no trouble defeating the #o0100131#s that even the slowest penguins here can defeat.")
else:
sm.sendNext("What are you so hesitant about? You're a hero! You gotta strike while the iron is hot! Come on, let's do this!")
|
class Range:
def __init__(self,start,stop=None,step=1):
if step==0:
raise ValueError("step cant be zero")
elif stop==None:
start,stop=0,start
self._length=max(0,(stop-start+step-1)//step)
self._start=start
self._step=step
def __len__(self):
return self._length
def __getitem__(self,k):
if k<0:
k+=len(self)
if not 0<=k< self._length:
raise IndexError('Index Out Of range')
return self._start+k*self._step
if __name__=="__main__":
for i in Range(0,23,3):
print(i)
for j in Range(90):
print("the red hood ",j)
|
#!/usr/bin/env python3
total = 0
with open("input") as infile:
for line in infile:
output = line.split(" | ")[1].strip()
output = output.split(" ")
for value in output:
if len(value) in [2, 3, 4, 7]:
total += 1
print(total)
|
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import numpy as np
from solutions import get_vals
cmap = plt.cm.coolwarm
cmap_r = plt.cm.coolwarm_r
def sqrt_riemann_surface_1():
"""riemann surface for real part of sqrt(z)"""
fig = plt.figure()
ax = Axes3D(fig)
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
Z = np.real(np.sqrt(X+1j*Y))
ax.plot_surface(X, Y, Z, cstride=1, rstride=1, linewidth=0, cmap=cmap)
ax.plot_surface(X, Y, -Z, cstride=1, rstride=1, linewidth=0, cmap=cmap)
plt.savefig('sqrt_riemann_surface_1.pdf', bbox_inches='tight', pad_inches=0)
def sqrt_riemann_surface_2():
"""riemann surface for imaginary part of sqrt(z)"""
fig = plt.figure()
ax = Axes3D(fig)
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 0, 0.25)
X, Y = np.meshgrid(X, Y)
Z = np.imag(np.sqrt(X+1j*Y))
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, cmap=cmap_r)
ax.plot_surface(X, Y, -Z, rstride=1, cstride=1, linewidth=0, cmap=cmap)
X = np.arange(-5, 5, 0.25)
Y = np.arange(0, 5, .25)
X, Y = np.meshgrid(X, Y)
Z = np.imag(np.sqrt(X+1j*Y))
ax.plot_surface(X, Y, -Z, rstride=1, cstride=1, linewidth=0, cmap=cmap_r)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, cmap=cmap)
plt.savefig('sqrt_riemann_surface_2.pdf', bbox_inches='tight', pad_inches=0)
def log_riemann_surface():
"""riemann surface for imaginary part of ln(z)"""
fig = plt.figure()
ax = Axes3D(fig)
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 0, 0.25)
X, Y = np.meshgrid(X, Y)
Z = np.imag(sp.log(X+1j*Y))
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, cmap=cmap_r)
ax.plot_surface(X, Y, Z+2*sp.pi, rstride=1, cstride=1, linewidth=0, cmap=cmap_r)
ax.plot_surface(X, Y, Z-2*sp.pi, rstride=1, cstride=1, linewidth=0, cmap=cmap_r)
X = np.arange(-5, 5, .25)
Y = np.arange(0, 5, .25)
X, Y = np.meshgrid(X, Y)
Z = np.imag(np.log(X+1j*Y))
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, cmap=cmap)
ax.plot_surface(X, Y, Z+2*sp.pi, rstride=1, cstride=1, linewidth=0, cmap=cmap)
ax.plot_surface(X, Y, Z-2*sp.pi, rstride=1, cstride=1, linewidth=0, cmap=cmap)
plt.savefig('log_riemann_surface.pdf', bbox_inches='tight', pad_inches=0)
def arctan_riemann_surface():
"""Riemann surface for real part of arctan(z)"""
fig = plt.figure()
ax = Axes3D(fig)
Xres, Yres = .01, .2
ax.view_init(elev=11., azim=-56)
X = np.arange(-4, -.0001, Xres)
Y = np.arange(-4, 4, Yres)
X, Y = np.meshgrid(X, Y)
Z = np.real(sp.arctan(X+1j*Y))
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, cmap=cmap)
ax.plot_surface(X, Y, Z+sp.pi, rstride=1, cstride=1, linewidth=0, cmap=cmap)
ax.plot_surface(X, Y, Z-sp.pi, rstride=1, cstride=1, linewidth=0, cmap=cmap)
X = np.arange(.0001, 4, Xres)
Y = np.arange(-4, 4, Yres)
X, Y = np.meshgrid(X, Y)
Z = np.real(sp.arctan(X+1j*Y))
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, cmap=cmap)
ax.plot_surface(X, Y, Z + sp.pi, rstride=1, cstride=1, linewidth=0, cmap=cmap)
ax.plot_surface(X, Y, Z - sp.pi, rstride=1, cstride=1, linewidth=0, cmap=cmap)
plt.savefig('arctan_riemann_surface.pdf', bbox_inches='tight', pad_inches=0)
def poly_color_plot_real(p, res=101):
X, Y, vals = get_vals(p, (-1, 1), (-1, 1), res=res)
plt.pcolormesh(X, Y, vals.real)
plt.savefig("poly_color_plot_real.pdf")
def poly_color_plot_imag(p, res=101):
X, Y, vals = get_vals(p, (-1, 1), (-1, 1), res=res)
plt.pcolormesh(X, Y, vals.imag)
plt.savefig("poly_color_plot_imag.pdf")
def poly_surface_plot_real(p, res=101):
X, Y, vals = get_vals(p, (-1, 1), (-1, 1), res)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, vals.real)
plt.savefig("poly_surface_plot_real.pdf")
def poly_surface_plot_imag(p, res=101):
X, Y, vals = get_vals(p, (-1, 1), (-1, 1), res)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, vals.imag)
plt.savefig("poly_surface_plot_imag.pdf")
if __name__=='__main__':
sqrt_riemann_surface_1()
sqrt_riemann_surface_2()
log_riemann_surface()
arctan_riemann_surface()
p = np.poly1d([1, 0, -1])
poly_color_plot_real(p)
poly_color_plot_imag(p)
poly_surface_plot_real(p)
poly_surface_plot_imag(p)
|
"""This module is used as a base for other integration tests"""
# To extract the logs from the Docker instance, override /tmp/logs
import inspect
import os
import shutil
import signal
import subprocess
import unittest
import tempfile
from collections import namedtuple
import logging
import sys
from chewie.chewie import Chewie
def get_logger(name, file=sys.stdout, log_level=logging.DEBUG):
logger = logging.getLogger(name)
if not logger.handlers:
logger.setLevel(log_level)
handler = logging.StreamHandler(file)
handler.setLevel(log_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
VethLink = namedtuple('VethLink', 'name ip mac')
CHEWIE_SUPPLICANT = VethLink('chewie_supp', '192.168.20.1', '8e:00:00:00:01:01')
# TODO - Add namespaces
CHEWIE_RADIUS = VethLink('chewie_radius', '192.168.21.1', '8e:00:00:00:02:01')
SUPPLICANT = VethLink('supplicant', '192.168.20.2', '8e:00:00:00:01:02')
RADIUS = VethLink('radius', '127.0.0.1', '8e:00:00:00:02:02')
# NOTE: DO NOT CHANGE THESE VALUES UNLESS CHANGES ARE MADE TO THE FREERADIUS CONFIGURATION FILES
RADIUS_IP = RADIUS.ip
RADIUS_PORT = "1812"
RADIUS_SECRET = "SECRET"
IP_LINK_PAIRS = {
CHEWIE_SUPPLICANT: SUPPLICANT,
CHEWIE_RADIUS: RADIUS,
}
LOG_DIR = '/tmp/logs/'
os.makedirs(LOG_DIR, exist_ok=True)
HANDLER_COUNTS = {}
CHEWIE_ROOT = os.environ.get('CHEWIE_ROOT', None)
if not CHEWIE_ROOT:
CHEWIE_ROOT = '/chewie-src/'
CHEWIE_CONF_DIR = CHEWIE_ROOT + '/etc/'
def auth_handler(address, group_address, *args, **kwargs): # pylint: disable=missing-docstring
logger = logging.getLogger('CHEWIE')
logger.info("Authentication successful for address %s on port %s",
str(address), str(group_address))
logger.info("Arguments passed from Chewie to Faucet: \n*args: %s", str(args))
if kwargs:
for key, value in kwargs.items():
logger.info("kwargs : %s : %s", str(key), str(value))
HANDLER_COUNTS['auth_handler'] += 1 # pytype: disable=key-error
def failure_handler(address, group_address): # pylint: disable=missing-docstring
logger = logging.getLogger('CHEWIE')
logger.info("Authentication failed for address %s on port %s",
str(address), str(group_address))
HANDLER_COUNTS['failure_handler'] += 1 # pytype: disable=key-error
def logoff_handler(address, group_address): # pylint: disable=missing-docstring
logger = logging.getLogger('CHEWIE')
logger.info("Logoff successful for address %s on port %s",
str(address), str(group_address))
HANDLER_COUNTS['logoff_handler'] += 1 # pytype: disable=key-error
class BaseTest(unittest.TestCase):
"""
This class can be used to hold common functionality of integration tests for Chewie
Inherit from this class to have an environment set up for the tests to run in.
"""
active_processes = []
chewie_pid = None
freeradius_log = None
wpa_supplicant_log = None
current_log_dir = None
test_name = "BaseTest"
@classmethod
def setUpClass(cls):
cls.prepare_freeradius()
cls.prepare_wpa_supplicant()
def setUp(self):
"""Setup environment for tests to start processes"""
self.active_processes = []
self.freeradius_log = None
self.wpa_supplicant_log = None
HANDLER_COUNTS = {
'auth_handler': 0,
'logoff_handler': 0,
'failure_handler': 0
}
for link1, link2 in IP_LINK_PAIRS.items():
self.run_command_and_wait(
"ip link add {} type veth peer name {}".format(link1.name, link2.name))
for link in [link1, link2]:
self.run_command_and_wait(
"ip link set dev {} address {}".format(link.name, link.mac))
self.run_command_and_wait("ip link set {} up".format(link.name))
self.open_logs()
def tearDown(self):
"""Close Logs and Kill Opened Processes"""
self.close_logs()
if self.chewie_pid != 0:
os.kill(self.chewie_pid, signal.SIGKILL)
for proc in self.active_processes:
os.kill(proc.pid, signal.SIGKILL)
proc.wait()
for link1, _ in IP_LINK_PAIRS.items():
self.run_command_and_wait("ip link del {}".format(link1.name))
def open_logs(self):
"""Open Logs for Processes"""
self.current_log_dir = tempfile.mkdtemp(prefix='chewie-' + self.test_name + '-',
dir='/tmp/logs') + "/"
print('Logging test results in {}'.format(self.current_log_dir))
print(os.path.join(self.current_log_dir + "wpa_supplicant.log"))
self.freeradius_log = open(os.path.join(self.current_log_dir, "freeradius.log"), "w+")
self.wpa_supplicant_log = open(os.path.join(self.current_log_dir + "wpa_supplicant.log"),
"w+")
def close_logs(self):
"""Close Process Logs"""
self.freeradius_log.close()
self.wpa_supplicant_log.close()
def run_command_and_wait(self, command, output_file=None):
"""Run a command and wait for the process to complete"""
if output_file:
child = subprocess.Popen(command.split(), stdout=output_file)
else:
child = subprocess.Popen(command.split())
result = child.wait()
if result != 0:
raise Exception(
"Command returned with a non-zero exit code. Code: {}, Command: {}".format(
str(result), command))
def run_command_and_detach(self, command, output_file=None):
"""Run a command and return the process"""
if output_file:
child = subprocess.Popen(command.split(), stdout=output_file)
else:
child = subprocess.Popen(command.split())
self.active_processes.append(child)
return child
def start_radius(self):
"""Start Radius Server"""
# NOTE: RADIUS PORT and IP have not been set due to it
# skipping sections of the radiusd.conf file when given.
return self.run_command_and_detach("freeradius -X -l stdout", self.freeradius_log)
def start_chewie(self):
"""Start Chewie Server"""
self.chewie_pid = os.fork()
if self.chewie_pid == 0:
file = open(os.path.join(self.current_log_dir + "chewie.log"), 'w+')
logger = get_logger('CHEWIE', file)
logger.info('Starting chewie.')
chewie = Chewie(CHEWIE_SUPPLICANT.name, logger, auth_handler,
failure_handler, logoff_handler,
radius_server_ip=RADIUS_IP, radius_server_secret=RADIUS_SECRET)
chewie.run()
def start_wpa_supplicant(self, eap_method):
"""Start WPA_Supplicant / EAP Client"""
return self.run_command_and_detach(
"wpa_supplicant -dd -c/tmp/wpasupplicant/wired-{}.conf -i{} -Dwired".format(
eap_method, SUPPLICANT.name), self.wpa_supplicant_log)
def start_dhclient(self):
"""Start dhclient on the MAB port"""
return self.run_command_and_detach("dhclient -i {}".format(SUPPLICANT.name))
def check_output(self, **kwargs):
"""Check the output of the Log Files to verify state of system"""
with open(os.path.join(self.current_log_dir + "chewie.log"), "r") as file:
chewie_log = file.read()
chewie_requirements = kwargs.get("chewie_requirements", None)
if chewie_requirements:
for requirement in chewie_requirements:
assert requirement in chewie_log, "Unable to find {} in chewie logs".format(
requirement, )
currentframe = inspect.currentframe()
assert currentframe
if currentframe:
assert "Authentication successful" in chewie_log, "Authentication failed for {}".format(
currentframe.f_back.f_code.co_name)
@staticmethod
def prepare_freeradius():
chewie_rad_dir = CHEWIE_CONF_DIR + "freeradius/"
if os.path.isfile('/etc/freeradius/users'):
# Assume we are dealing with freeradius < 3
radius_config_base = '/etc/freeradius/'
else:
# Assume we are dealing with freeradius >=3 configuration
freerad_version = os.popen(
r'freeradius -v | egrep -o -m 1 "Version ([0-9]\.[0.9])"').read().rstrip()
freerad_major_version = freerad_version.split(' ')[1]
radius_config_base = '/etc/freeradius/%s/' % freerad_major_version
try:
# Copy files
file_map = {
chewie_rad_dir + 'clients.conf': radius_config_base,
chewie_rad_dir + 'users': radius_config_base,
chewie_rad_dir + 'default/eap': radius_config_base + 'mods-available/',
chewie_rad_dir + 'default/inner-eap': radius_config_base + 'mods-available/',
chewie_rad_dir + 'default/tls': radius_config_base + 'sites-available/',
}
for src, dst in file_map.items():
shutil.copy(src, dst)
# Copy Folder
folder_map = {
chewie_rad_dir + 'certs': radius_config_base + 'certs',
}
for src, dst in folder_map.items():
if os.path.exists(dst) and os.path.isdir(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
except OSError as err:
print("Unable to copy FreeRadius files into place.", file=sys.stderr)
raise err
@staticmethod
def prepare_wpa_supplicant():
folder_map = {
CHEWIE_CONF_DIR + 'wpasupplicant/': '/tmp/wpasupplicant',
CHEWIE_CONF_DIR + 'wpasupplicant/cert': '/tmp/cert'
}
for src, dst in folder_map.items():
if os.path.exists(dst) and os.path.isdir(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
|
#!/usr/bin/env python3
from ideone import ideone_automation
from db_model import manage_db
import os
import time
import webbrowser
from datetime import datetime
usr = ideone_automation()
db = manage_db()
home_page = "https://ideone.com/"
def header():
print("----------------------------------------------------\n")
print("----------------- Automated Ideone -----------------\n")
print("----------------------------------------------------\n")
def clear():
os.system("clear")
def check_login():
cu = db.get_current_user()
if cu is not None:
return cu;
else:
while True:
clear()
header()
print("1. Login\n2. Back")
print("----------------------------------------------------\n")
choose = input("__ ")
if choose == "" or int(choose) == 2:
return None
else:
clear()
header()
print("Please provide correct Username and Password")
print("----------------------------------------------------\n")
usr = input("Username : ")
pas = input("Password : ")
id = db.get_user_id([usr, pas])
db.set_current_user(id)
return id
def handle_code(code):
clear()
header()
print("Name: ", code[1])
print("Tag: ", code[3])
print("Date: ", code[2].split()[0])
print("----------------------------------------------------\n")
print("1. Download the Code")
print("2. Open in Browser")
print("3. Submit in vjudge")
print("4. Back")
print("----------------------------------------------------\n")
choose = input("__ ")
try:
if choose == "" or int(choose) == 4:
return
choose = int(choose)
except:
return
if choose == 2:
webbrowser.open(home_page+code[0])
elif choose == 1:
try:
usr.download(code[0], code[1], code[4])
clear()
header()
print("----------------------------------------------------\n")
print("Code saved in:", usr.current_dir()+"/downloads/\n")
print("----------------------------------------------------\n")
input(".\n.\npress enter to continue _ ")
except:
clear()
header()
print("----------------------------------------------------\n")
print("Error occured!! please try again.")
print("----------------------------------------------------\n")
input(".\n.\npress enter to continue _ ")
elif choose == 3:
webbrowser.open(home_page+code[0])
webbrowser.open("https://www.google.com/search?q=vjudge+"+code[1].replace(' ','+'))
handle_code(code)
def search_code(user_id):
clear()
header()
search_text = input("Search : ")
search_result = db.get_codes(search_text, user_id)
search_result.sort(key = lambda code_data : code_data[2])
search_result.reverse()
while True:
clear()
header()
a = len(str(len(search_result) + 1)) + 2;
b = 0
c = 0
for i in range(len(search_result)):
b = max(b, len(search_result[i][3]) + 1)
c = max(c, len(search_result[i][1]) + 1)
for i in range(len(search_result)):
p1 = str(i+1)+'.';
p2 = search_result[i][3]
p3 = search_result[i][1]
p4 = search_result[i][2]
print(p1, end = "")
for j in range(a-len(p1)):
print(" ", end="")
print(p2, end = "")
for j in range(b-len(p2)):
print(" ", end="")
print(p3, end = "")
for j in range(c-len(p3)):
print(" ", end="")
print(p4.split()[0])
print("----------------------------------------------------")
print(str(len(search_result)+1)+'. Back')
print("----------------------------------------------------\n")
choose = input("__ ")
if choose == "" or int(choose) == len(search_result)+1:
return
choose = int(choose)
handle_code(search_result[choose-1])
def add_new_codes(log):
try:
usr.add_new_codes(log, db)
except:
clear()
header()
print("----------------------------------------------------\n")
print("Error occured!! please try again.")
print("----------------------------------------------------\n")
input(".\n.\npress enter to continue _ ")
def main():
log = check_login()
if log is None:
clear()
return
while True:
clear()
header()
print("1. Search Code\n2. Add New Codes\n3. Logout\n4. Back")
print("----------------------------------------------------\n")
choose = input("__ ")
if choose == "" or int(choose) == 4:
clear()
return
choose = int(choose)
if choose == 1:
search_code(log)
elif choose == 2:
add_new_codes(log)
else:
db.erase_current_user()
clear()
return
if __name__ == "__main__":
main()
|
"""empty message
Revision ID: 12ca023b94f8
Revises: e759ca20884f
Create Date: 2021-09-09 13:13:24.802259
"""
import sqlalchemy as sa
import sqlalchemy_utils
from alembic import op
from project import dbtypes
# revision identifiers, used by Alembic.
revision = "12ca023b94f8"
down_revision = "e759ca20884f"
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
"event", sa.Column("allday", sa.Boolean(), server_default="0", nullable=False)
)
op.add_column(
"eventdate",
sa.Column("allday", sa.Boolean(), server_default="0", nullable=False),
)
op.add_column(
"eventsuggestion",
sa.Column("allday", sa.Boolean(), server_default="0", nullable=False),
)
def downgrade():
op.drop_column("eventsuggestion", "allday")
op.drop_column("eventdate", "allday")
op.drop_column("event", "allday")
|
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
from . RunnerBase import RunnerBaseClass
from .. Analysers.GPUVerify import GPUVerifyAnalyser
import logging
import os
import psutil
import re
import sys
import yaml
_logger = logging.getLogger(__name__)
class GPUVerifyRunnerException(Exception):
def __init__(self, msg):
self.msg = msg
class GPUVerifyRunner(RunnerBaseClass):
softTimeoutDiff = 5
def __init__(self, boogieProgram, workingDirectory, rc):
_logger.debug('Initialising {}'.format(boogieProgram))
super(GPUVerifyRunner, self).__init__(boogieProgram, workingDirectory, rc)
# Sanity checks
# TODO
self.softTimeout = self.maxTimeInSeconds
if self.maxTimeInSeconds > 0:
# We use GPUVerify's timeout function and enforce the
# requested timeout and enforce a hard timeout slightly later
self.maxTimeInSeconds = self.maxTimeInSeconds + self.softTimeoutDiff
if not self.toolPath.endswith('.py'):
raise GPUVerifyRunnerException(
'toolPath needs to be the GPUVerify python script')
@property
def name(self):
return "gpuverify"
def _buildResultDict(self):
results = super(GPUVerifyRunner, self)._buildResultDict()
# TODO: Remove this. It's now redundant
results['hit_hard_timeout'] = results['backend_timeout']
return results
def GetNewAnalyser(self, resultDict):
return GPUVerifyAnalyser(resultDict)
def run(self):
# Run using python interpreter
cmdLine = [ sys.executable, self.toolPath ]
cmdLine.append('--timeout={}'.format(self.softTimeout))
# Note we ignore self.entryPoint
_logger.info('Ignoring entry point {}'.format(self.entryPoint))
# GPUVerify needs PATH environment variable set
env = {}
path = os.getenv('PATH')
if path == None:
path = ""
env['PATH'] = path
cmdLine.extend(self.additionalArgs)
# Add the boogie source file as last arg
cmdLine.append(self.programPathArgument)
backendResult = self.runTool(cmdLine,
isDotNet=False,
envExtra=env)
if backendResult.outOfTime:
_logger.warning('GPUVerify hit hard timeout')
def get():
return GPUVerifyRunner
|
from edmonds_karp import FlowNetwork, defaultdict
class CapacityScaler(FlowNetwork):
__slots__ = "U"
def __init__(self):
super().__init__()
self.U = -self.INF
self.discovered = defaultdict(lambda: False)
self.pred = defaultdict(lambda: None)
def insert_edges_from_iterable(self, edges):
for edge in edges:
self.insert_edge(edge)
self.U = max(self.U, self[edge[0]][edge[1]].cap)
def augment_paths(self, source, sink, delta):
"""Find all augmenting paths with a bottleneck capacity >= delta."""
# mark all nodes as unvisited
while True:
self.mark_as_unvisited()
S = [source]
gf = 0
while S:
u = S.pop()
if u == sink:
cf, _ = self.update_network(self.pred, source, sink)
gf = max(gf, cf)
continue
if self.discovered[u]:
continue
self.discovered[u] = True
for v in self[u]:
if (self[u][v].cap - self[u][v].flow) >= delta and not self.discovered[v]:
self.pred[v] = u
S.append(v)
if not gf:
break
def find_max_flow(self, source, sink):
self.set_flows(0)
self.build_residual_graph()
delta = 1 << (self.U.bit_length() - 1)
while delta > 0:
self.augment_paths(source, sink, delta)
delta >>= 1
return self.maxflow(source)
|
# -*- coding=utf-8 -*-
r"""
key(str)-value(str) database-files
https://docs.python.org/3/library/dbm.html
"""
from ._filebase import FileBase
import dbm
from typing import Union
_T = Union[str, bytes]
class DBFile(FileBase):
FILE_EXTENSION = '.dbm'
READY_ONLY = 'r' # only read from file
WRITE_AND_READ = 'w' # read and write
CREATE_IF_NOT_EXISTS = 'c' # read and write (and create if not exists
ALWAYS_NEW = 'n' # ready and write and clear database-file
def __init__(self, fp: str, mode: str = WRITE_AND_READ):
self._filepath = fp
self._file = dbm.open(fp, mode) # noqa
def __del__(self):
self._file.close()
def __enter__(self):
self._file.__enter__()
return self # don't know if this is more useful than useless
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.__exit__(exc_type, exc_val, exc_tb)
####################################################################################################################
def __getitem__(self, key: _T) -> bytes:
return self._file[key]
def get(self, key: _T, default=None) -> bytes:
return self._file.get(key, default)
def __setitem__(self, key: _T, value: _T):
self._file[key] = value
def set(self, key: _T, value: _T):
self.__setitem__(key, value)
def __delitem__(self, key: _T):
del self._file[key]
def delete(self, key: _T):
del self._file[key]
|
from setuptools import setup
setup(name="hydrand", packages=["hydrand"])
|
from aria import workflow
from aria.orchestrator.workflows.api import task
from aria.orchestrator.workflows.exceptions import TaskException
INTERFACE_NAME = 'Custom'
DEPLOY_OPERATION_NAME = 'deploy'
@workflow
def deploy(ctx, graph):
"""
Custom workflow to call the operations on the Deploy interface.
"""
print "Inside the deploy workflow"
for node in ctx.model.node.iter():
try:
graph.add_tasks(task.OperationTask(node,
interface_name=INTERFACE_NAME,
operation_name=DEPLOY_OPERATION_NAME))
except TaskException:
pass
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stand-alone script to evalute the word error rate (WER) for ASR tasks.
Tensorflow and Lingvo are not required to run this script.
Example of Usage::
`python simple_wer.py file_hypothesis file_reference`
where `file_hypothesis` is the file name for hypothesis text and
`file_reference` is the file name for reference text.
Or you can use this file as a library, and call either of the following:
- `ComputeWER(hyp, ref)` compute WER for one pair of hypothesis/reference
- `AverageWERs(hyps, refs)` average WER for a list of hypotheses/references
Note to evaluate the ASR, we consider the following pre-processing:
- change transcripts to lower-case
- remove punctuation: `" , . ! ? ( ) [ ]`
- remove extra empty spaces
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
def _ComputeEditDistance(hs, rs):
"""Compute edit distance between two list of strings.
Args:
hs: the list of words in the hypothesis sentence
rs: the list of words in the reference sentence
Returns:
edit distance as an integer
"""
dr, dh = len(rs) + 1, len(hs) + 1
dists = [[]] * dr
# initialization for dynamic programming
for i in range(dr):
dists[i] = [0] * dh
for j in range(dh):
if i == 0:
dists[0][j] = j
elif j == 0:
dists[i][0] = i
# do dynamic programming
for i in range(1, dr):
for j in range(1, dh):
if rs[i - 1] == hs[j - 1]:
dists[i][j] = dists[i - 1][j - 1]
else:
tmp0 = dists[i - 1][j - 1] + 1
tmp1 = dists[i][j - 1] + 1
tmp2 = dists[i - 1][j] + 1
dists[i][j] = min(tmp0, tmp1, tmp2)
return dists[-1][-1]
def _PreprocessTxtBeforeWER(txt):
"""Preprocess text before WER caculation."""
# lower case, and remove \t and new line
txt = re.sub(r'[\t\n]', ' ', txt.lower())
# remove punctuation before space
txt = re.sub(r'[,.\?!]+ ', ' ', txt)
# remove punctuation before end
txt = re.sub(r'[,.\?!]+$', ' ', txt)
# remove punctuation after space
txt = re.sub(r' [,.\?!]+', ' ', txt)
# remove quotes, [, ], ( and )
txt = re.sub(r'["\(\)\[\]]', '', txt)
# remove extra space
txt = re.sub(' +', ' ', txt.strip())
return txt
def ComputeWER(hyp, ref):
"""Computes WER for ASR by ignoring diff of punctuation, space, captions.
Args:
hyp: Hypothesis string.
ref: Reference string.
Returns:
num of errors, num of reference words
"""
hyp = _PreprocessTxtBeforeWER(hyp)
ref = _PreprocessTxtBeforeWER(ref)
# compute num of word errors
hs = hyp.split()
rs = ref.split()
d = _ComputeEditDistance(hs, rs)
# num of words. For empty ref we set num = 1
nr = max(len(rs), 1)
return d, nr
def AverageWERs(hyps, refs):
"""Computes average WER from a list of references/hypotheses.
Args:
hyps: list of hypothesis strings.
refs: list of reference strings.
Returns:
total num of errors, total num of words in refs
"""
totale, totalw = 0, 0
for hyp, ref in zip(hyps, refs):
ei, ni = ComputeWER(hyp, ref)
totale += ei
totalw += ni
print('total error = %d, total word = %d, wer = %.2f' %
(totale, totalw, totale * 100.0 / totalw))
return totale, totalw
def main(argv):
hyp = open(argv[1], 'r').read()
ref = open(argv[2], 'r').read()
ne, nw = ComputeWER(hyp, ref)
print('num of error = %d, num of word = %d, wer = %.2f' %
(ne, nw, ne * 100.0 / nw))
if __name__ == '__main__':
if len(sys.argv) != 3:
print("""
Example of Usage:
python simple_wer.py file_hypothesis file_reference
where file_hypothesis is the file name for hypothesis text
and file_reference is the file name for reference text.
Or you can use this file as a library, and call either of the following
- ComputeWER(hyp, ref) to compute WER for one pair of hypothesis/reference
- AverageWERs(hyps, refs) to average WER for a list of hypotheses/references
""")
sys.exit(1)
main(sys.argv)
|
import pytest
import feeds.notification_level as level
from feeds.exceptions import (
MissingLevelError
)
def test_register_level_ok():
class TestLevel(level.Level):
id=666
name="test"
level.register(TestLevel)
assert '666' in level._level_register
assert level._level_register['666'] == TestLevel
assert 'test' in level._level_register
assert level._level_register['test'] == TestLevel
def test_register_level_bad():
class NoId(level.Level):
id=None
name="noid"
with pytest.raises(ValueError) as e:
level.register(NoId)
assert "A level must have an id" in str(e.value)
class NoName(level.Level):
id=667
with pytest.raises(ValueError) as e:
level.register(NoName)
assert "A level must have a name" in str(e.value)
class DuplicateId(level.Level):
id='1'
name='duplicate'
with pytest.raises(ValueError) as e:
level.register(DuplicateId)
assert "The level id '1' is already taken by alert" in str(e.value)
class DuplicateName(level.Level):
id=668
name="warning"
with pytest.raises(ValueError) as e:
level.register(DuplicateName)
assert "The level 'warning' is already registered" in str(e.value)
with pytest.raises(TypeError) as e:
level.register(str)
assert "Can only register Level subclasses" in str(e.value)
with pytest.raises(ValueError) as e:
level.register(level.Alert)
assert "The level id '1' is already taken by alert" in str(e.value)
def test_get_level():
l = level.get_level('warning')
assert isinstance(l, level.Warning)
assert l.id == level.Warning.id
assert l.name == level.Warning.name
missing = "not_a_real_level"
with pytest.raises(MissingLevelError) as e:
level.get_level(missing)
assert 'Level "{}" not found'.format(missing) in str(e.value)
def test_translate_level():
l = level.Alert()
l_trans = level.translate_level(l)
assert isinstance(l_trans, level.Alert)
l = level.translate_level(1)
assert isinstance(l, level.Alert)
assert l.name == 'alert'
l = level.translate_level('1')
assert isinstance(l, level.Alert)
assert l.name == 'alert'
l = level.translate_level('alert')
assert isinstance(l, level.Alert)
with pytest.raises(MissingLevelError) as e:
level.translate_level('foo')
assert 'Level "foo" not found' in str(e.value)
with pytest.raises(TypeError) as e:
level.translate_level([])
assert 'Must be either a subclass of Level or a string' in str(e.value)
|
import json
import os
from abc import ABC, abstractmethod
from src import DATA_FOLDER, UNZIPED_FOLDER_NAME
from src import settings
from src.db_models.models import dict_db_models
from src.io import CNAE_JSON_NAME, NATJU_JSON_NAME, QUAL_SOCIO_JSON_NAME, MOTIVOS_JSON_NAME, PAIS_JSON_NAME, \
MUNIC_JSON_NAME
from src.io.get_last_ref_date import main as get_last_ref_date
class EngineCore(ABC):
def __init__(self, type_file, table_model, n_rows_chunk=settings.N_ROWS_CHUNKSIZE, ref_date=None):
self._type_file = type_file
self._n_rows_chunk = n_rows_chunk
self._ref_date = ref_date
self.get_ref_date()
self.get_all_jsons()
self.get_list_files(type_file=type_file)
self._tbl = table_model()
self._table_name = self._tbl.__tablename__
assert self._table_name in dict_db_models.keys()
self._cols = self._tbl.list_cols()
self._n_raw_columns = self._tbl.N_RAW_COLUMNS
self._dict_args_read_csv = {
'sep': ';',
'encoding': 'latin1',
'header': None,
'dtype': str,
'engine': 'c',
'memory_map': True
}
def get_ref_date(self):
""" Get ref date to get data from """
self._ref_date = self._ref_date or get_last_ref_date()
def __repr__(self):
return f"{self._type_file} with ref_date: '{self._ref_date}'"
def get_list_files(self, type_file):
list_files_full_path = []
if isinstance(type_file, str):
folder_unziped = os.path.join(DATA_FOLDER, self._ref_date, UNZIPED_FOLDER_NAME)
list_files = os.listdir(folder_unziped)
list_files_full_path = [os.path.join(folder_unziped, file) for file in list_files if
type_file in file]
elif isinstance(type_file, list):
folder_unziped = os.path.join(DATA_FOLDER, self._ref_date, UNZIPED_FOLDER_NAME)
list_files = os.listdir(folder_unziped)
for file in type_file:
for list_file in list_files:
if file in list_file:
list_files_full_path.append(os.path.join(folder_unziped, list_file))
self.list_files_full_path = list_files_full_path
def load_dicts_code_to_name(self, file_name):
full_path_file_name = os.path.join(DATA_FOLDER, self._ref_date, UNZIPED_FOLDER_NAME, file_name)
with open(full_path_file_name, encoding='utf-8') as json_file:
return json.load(json_file)
def get_all_jsons(self):
self._dict_cnae = self.load_dicts_code_to_name(file_name=CNAE_JSON_NAME)
self._dict_natju = self.load_dicts_code_to_name(file_name=NATJU_JSON_NAME)
self._dict_qual_socio = self.load_dicts_code_to_name(file_name=QUAL_SOCIO_JSON_NAME)
self._dict_motivos = self.load_dicts_code_to_name(file_name=MOTIVOS_JSON_NAME)
self._dict_pais = self.load_dicts_code_to_name(file_name=PAIS_JSON_NAME)
self._dict_munic = self.load_dicts_code_to_name(file_name=MUNIC_JSON_NAME)
@abstractmethod
def delete_pk_and_indexes(self):
pass
@abstractmethod
def create_pk_and_indexes(self):
pass
@abstractmethod
def parse_file(self, file):
pass
def parse_all_files(self):
for file in sorted(self.list_files_full_path):
self.parse_file(file=file)
@abstractmethod
def execute(self):
pass
def _display_status(self, dict_status):
filename = dict_status['filename']
total_rows_file = dict_status['total_rows_file']
lasts_this_round = dict_status['lasts_this_round']
lasts_since_begin_file = dict_status['lasts_since_begin_file']
lasts_since_begin_global = dict_status['lasts_since_begin_global']
print(
f"{filename} | rows this file {total_rows_file:<10_} rows global {self._total_rows_global:<10_} | this round {lasts_this_round:<3}, since begin file {lasts_since_begin_file}, since begin global {lasts_since_begin_global} [seconds]")
|
# Generated by Django 3.1.6 on 2021-02-04 20:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('headline', models.CharField(max_length=200)),
('source', models.CharField(max_length=200)),
],
),
]
|
from torch.autograd import Variable
class Learner(object):
def __init__(self, DNN_AE, input, target, batch_size=1000, epochs=1000):
self.DNN_AE = DNN_AE
self.input = Variable(input)
self.input_dim = input.shape
self.target = Variable(target)
self.target_dim = target.shape
self.epochs = epochs
self.batch_size = batch_size
self.current_batch = self.input[:self.batch_size,:]
self.current_target = self.target[:self.batch_size,:]
self.batch_num = 0
self.batch_num_total = self.input_dim[0] // self.batch_size + (self.input_dim[0] % self.batch_size != 0)
def reset(self):
self.batch_num = 0
self.current_batch = self.input[:self.batch_size,:]
self.current_target = self.target[:self.batch_size,:]
def next_batch(self):
self.batch_num += 1
self.current_batch = self.input[self.batch_size * self.batch_num : self.batch_size * (self.batch_num+1),:]
self.current_target = self.target[self.batch_size * self.batch_num : self.batch_size * (self.batch_num+1),:]
if self.batch_num == self.batch_num_total :
self.reset()
def learn(self):
for e in range(self.epochs) :
for b in range(self.batch_num_total):
self.DNN_AE.optimizer.zero_grad()
predictions = self.DNN_AE.forward(self.current_batch)
loss = self.DNN_AE.loss(predictions, self.current_target)
print('epoch', str(e+1), '[ batch', str(b+1),'] - loss : ', str(loss.item()))
loss.backward()
self.next_batch()
self.DNN_AE.optimizer.step()
return self.DNN_AE
|
from __future__ import absolute_import
import http.client
# 2To3-division: this file is skipped as this is version specific implemetation.
def div(a, b):
try:
return a / b
except ZeroDivisionError as exc:
return None
class MyClass (object, metaclass=type):
pass
|
from .SpinConfiguration import *
from .Hamiltonian import *
def montecarlo_metropolis(N, ham, temp, montecarlo_steps, burn_steps=0):
"""
Performs metropolis sampling to determine thermal quantities at the specified temperature
for an N-spin system described by a particular Hamiltonian.
Parameters
----------
N : int
The number of sites in the spin system.
ham : Hamiltonian
The Hamiltonian used to describe the spin system.
temp : float
The temperature of the spin system.
montecarlo_steps : int
The number of times the metropolis sweep is performed and the resulting values are kept.
burn_steps : int, default: 0
The number of times the metropolis sweep is performed before values are kept.
Returns
-------
avg_energy : float
The average of the energy values produced by the kept metropolis sweeps.
avg_mag : float
The average of the magnetization values produced by the kept metropolis sweeps.
heat_cap : float
The heat capacity derived from the average values produced by the kept metropolis sweeps.
mag_susceptibility : float
The magnetic susceptibility derived from the average values produced by the kept metropolis sweeps.
"""
# Initialize spin configuration with N sites
spins = SpinConfiguration()
spins.randomize(N)
energies = []
magnetizations = []
energies_squared = []
magnetizations_squared = []
# runs sweep without producing values
for i in range(burn_steps):
spins = ham.metropolis_sweep(spins, temp)
# runs sweep and populates lists
for i in range(montecarlo_steps):
spins = ham.metropolis_sweep(spins, temp)
E_step = ham.compute_energy(spins)
M_step = spins.compute_magnetization()
energies.append(E_step)
magnetizations.append(M_step)
energies_squared.append(E_step**2)
magnetizations_squared.append(M_step**2)
# calculates quantities of interest
avg_energy = sum(energies) / (montecarlo_steps)
avg_mag = sum(magnetizations) / (montecarlo_steps)
avg_energies_squared = sum(energies_squared) / (montecarlo_steps)
avg_magnetizations_squared = sum(magnetizations_squared) / (montecarlo_steps)
heat_cap = (avg_energies_squared - avg_energy**2) / (temp**2)
mag_susceptibility = (avg_magnetizations_squared - avg_mag**2) / temp
return avg_energy, avg_mag, heat_cap, mag_susceptibility
def generate_montecarlo_thermal_quantities(
N, ham, start=1, end=10, step=0.1, m_steps=1000, burn_steps=100
):
"""
Uses metropolis sampling to generate lists of the average energy, average magnetization, heat
capacity, and magnetic susceptibility values for an N-spin system over a specified range of temperatures.
Parameters
----------
N : int
The number of spins in the system.
ham : Hamiltonian
The Hamiltonian used to characterize the system.
start : float, default: 1
The start of the temperature range..
end : float, default: 10
The end of the temperature range.
step : float, default: 0.1
The size of the gap between successive temperature values.
m_steps : int, default: 1000
The number of times the metropolis sweep is run and the results are kept.
burn_steps : int, default: 100
The number of times the metropolis sweep is run before the results are kept.
Returns
-------
temps_list : list
The list generated from the start, step, and end temperature values.
energies_list : list
The generated list of average energies.
magnetization_list : list
The generated list of average magnetization values.
heat_capacity_list : list
The generated list of heat capacity values.
mag_susceptibility_list : list
The generated list of magnetic susceptibility values.
"""
temps_list = []
energies_list = []
magnetization_list = []
heat_capacity_list = []
mag_susceptibility_list = []
temp = start
while temp < end:
temps_list.append(temp)
temp += step
for temp in temps_list:
a, b, c, d = montecarlo_metropolis(N, ham, temp, m_steps, burn_steps)
energies_list.append(a)
magnetization_list.append(b)
heat_capacity_list.append(c)
mag_susceptibility_list.append(d)
return (
temps_list,
energies_list,
magnetization_list,
heat_capacity_list,
mag_susceptibility_list,
)
|
"""
[M] Given an array, find the sum of all numbers between the K1’th and K2’th
smallest elements of that array.
Example 1:
Input: [1, 3, 12, 5, 15, 11], and K1=3, K2=6
Output: 23
Explanation: The 3rd smallest number is 5 and 6th smallest number 15.
The sum of numbers coming
between 5 and 15 is 23 (11+12).
"""
from heapq import *
# Time: O(N * logN) Space: O(N)
def find_sum_of_elements(nums, k1, k2):
minHeap = []
# insert all numbers to the min heap
for num in nums:
heappush(minHeap, num)
# remove k1 small numbers from the min heap
for _ in range(k1):
heappop(minHeap)
elementSum = 0
# sum next k2-k1-1 numbers
for _ in range(k2 - k1 - 1):
elementSum += heappop(minHeap)
return elementSum
def main():
print("Sum of all numbers between k1 and k2 smallest numbers: " +
str(find_sum_of_elements([1, 3, 12, 5, 15, 11], 3, 6)))
print("Sum of all numbers between k1 and k2 smallest numbers: " +
str(find_sum_of_elements([3, 5, 8, 7], 1, 4)))
main()
|
'''the main mod of ika'''
import random
from string import ascii_letters as al
import flask
from flask_bootstrap import Bootstrap
from frontend import frontend
from endpoint import app
from endpoint import get_topics
Bootstrap(app)
app.register_blueprint(frontend)
# app config for image upload
app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg', 'gif'])
app.config['MAX_CONTENT_LENGTH'] = 4 * 1024 * 1024
app.config['SECRET_KEY'] = ''.join(random.choices(al, k=15))
@app.errorhandler(404)
def page_not_found(err=None):
'''page not found'''
print(err)
return flask.render_template('404.html', topics=get_topics()), 404
@app.route('/service-worker.js')
def service_worker():
'''service worker'''
return app.send_static_file('service-worker.js')
if __name__ == '__main__':
app.run(debug=True)
|
import os
import sys
import functools
import platform
import textwrap
import pytest
IS_PYPY = '__pypy__' in sys.builtin_module_names
def popen_text(call):
"""
Augment the Popen call with the parameters to ensure unicode text.
"""
return functools.partial(call, universal_newlines=True) \
if sys.version_info < (3, 7) else functools.partial(call, text=True)
def win_sr(env):
"""
On Windows, SYSTEMROOT must be present to avoid
> Fatal Python error: _Py_HashRandomization_Init: failed to
> get random numbers to initialize Python
"""
if env is None:
return
if platform.system() == 'Windows':
env['SYSTEMROOT'] = os.environ['SYSTEMROOT']
return env
def find_distutils(venv, imports='distutils', env=None, **kwargs):
py_cmd = 'import {imports}; print(distutils.__file__)'.format(**locals())
cmd = ['python', '-c', py_cmd]
return popen_text(venv.run)(cmd, env=win_sr(env), **kwargs)
def count_meta_path(venv, env=None):
py_cmd = textwrap.dedent(
"""
import sys
is_distutils = lambda finder: finder.__class__.__name__ == "DistutilsMetaFinder"
print(len(list(filter(is_distutils, sys.meta_path))))
""")
cmd = ['python', '-c', py_cmd]
return int(popen_text(venv.run)(cmd, env=win_sr(env)))
def test_distutils_stdlib(venv):
"""
Ensure stdlib distutils is used when appropriate.
"""
env = dict(SETUPTOOLS_USE_DISTUTILS='stdlib')
assert venv.name not in find_distutils(venv, env=env).split(os.sep)
assert count_meta_path(venv, env=env) == 0
def test_distutils_local_with_setuptools(venv):
"""
Ensure local distutils is used when appropriate.
"""
env = dict(SETUPTOOLS_USE_DISTUTILS='local')
loc = find_distutils(venv, imports='setuptools, distutils', env=env)
assert venv.name in loc.split(os.sep)
assert count_meta_path(venv, env=env) <= 1
@pytest.mark.xfail('IS_PYPY', reason='pypy imports distutils on startup')
def test_distutils_local(venv):
"""
Even without importing, the setuptools-local copy of distutils is
preferred.
"""
env = dict(SETUPTOOLS_USE_DISTUTILS='local')
assert venv.name in find_distutils(venv, env=env).split(os.sep)
assert count_meta_path(venv, env=env) <= 1
def test_pip_import(venv):
"""
Ensure pip can be imported.
Regression test for #3002.
"""
cmd = ['python', '-c', 'import pip']
popen_text(venv.run)(cmd)
def test_distutils_has_origin():
"""
Distutils module spec should have an origin. #2990.
"""
assert __import__('distutils').__spec__.origin
ENSURE_IMPORTS_ARE_NOT_DUPLICATED = r"""
# Depending on the importlib machinery and _distutils_hack, some imports are
# duplicated resulting in different module objects being loaded, which prevents
# patches as shown in #3042.
# This script provides a way of verifying if this duplication is happening.
from distutils import cmd
import distutils.command.sdist as sdist
# import last to prevent caching
from distutils import {imported_module}
for mod in (cmd, sdist):
assert mod.{imported_module} == {imported_module}, (
f"\n{{mod.dir_util}}\n!=\n{{{imported_module}}}"
)
print("success")
"""
@pytest.mark.parametrize(
"distutils_version, imported_module",
[
("stdlib", "dir_util"),
("stdlib", "file_util"),
("stdlib", "archive_util"),
("local", "dir_util"),
("local", "file_util"),
("local", "archive_util"),
]
)
def test_modules_are_not_duplicated_on_import(
distutils_version, imported_module, tmpdir_cwd, venv
):
env = dict(SETUPTOOLS_USE_DISTUTILS=distutils_version)
script = ENSURE_IMPORTS_ARE_NOT_DUPLICATED.format(imported_module=imported_module)
cmd = ['python', '-c', script]
output = popen_text(venv.run)(cmd, env=win_sr(env)).strip()
assert output == "success"
ENSURE_LOG_IMPORT_IS_NOT_DUPLICATED = r"""
# Similar to ENSURE_IMPORTS_ARE_NOT_DUPLICATED
import distutils.dist as dist
from distutils import log
assert dist.log == log, (
f"\n{dist.log}\n!=\n{log}"
)
print("success")
"""
@pytest.mark.parametrize("distutils_version", "local stdlib".split())
def test_log_module_is_not_duplicated_on_import(distutils_version, tmpdir_cwd, venv):
env = dict(SETUPTOOLS_USE_DISTUTILS=distutils_version)
cmd = ['python', '-c', ENSURE_LOG_IMPORT_IS_NOT_DUPLICATED]
output = popen_text(venv.run)(cmd, env=win_sr(env)).strip()
assert output == "success"
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'CustomImageOsType',
'EnableStatus',
'EnvironmentPermission',
'HostCachingOptions',
'LinuxOsState',
'NotificationChannelEventType',
'PolicyEvaluatorType',
'PolicyFactName',
'PolicyStatus',
'PremiumDataDisk',
'SourceControlType',
'StorageType',
'TransportProtocol',
'UsagePermissionType',
'VirtualMachineCreationSource',
'WindowsOsState',
]
class CustomImageOsType(str, Enum):
"""
The OS type of the custom image (i.e. Windows, Linux)
"""
WINDOWS = "Windows"
LINUX = "Linux"
NONE = "None"
class EnableStatus(str, Enum):
"""
The status of the schedule (i.e. Enabled, Disabled)
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class EnvironmentPermission(str, Enum):
"""
The access rights to be granted to the user when provisioning an environment
"""
READER = "Reader"
CONTRIBUTOR = "Contributor"
class HostCachingOptions(str, Enum):
"""
Caching option for a data disk (i.e. None, ReadOnly, ReadWrite).
"""
NONE = "None"
READ_ONLY = "ReadOnly"
READ_WRITE = "ReadWrite"
class LinuxOsState(str, Enum):
"""
The state of the Linux OS (i.e. NonDeprovisioned, DeprovisionRequested, DeprovisionApplied).
"""
NON_DEPROVISIONED = "NonDeprovisioned"
DEPROVISION_REQUESTED = "DeprovisionRequested"
DEPROVISION_APPLIED = "DeprovisionApplied"
class NotificationChannelEventType(str, Enum):
"""
The event type for which this notification is enabled (i.e. AutoShutdown, Cost)
"""
AUTO_SHUTDOWN = "AutoShutdown"
COST = "Cost"
class PolicyEvaluatorType(str, Enum):
"""
The evaluator type of the policy (i.e. AllowedValuesPolicy, MaxValuePolicy).
"""
ALLOWED_VALUES_POLICY = "AllowedValuesPolicy"
MAX_VALUE_POLICY = "MaxValuePolicy"
class PolicyFactName(str, Enum):
"""
The fact name of the policy (e.g. LabVmCount, LabVmSize, MaxVmsAllowedPerLab, etc.
"""
USER_OWNED_LAB_VM_COUNT = "UserOwnedLabVmCount"
USER_OWNED_LAB_PREMIUM_VM_COUNT = "UserOwnedLabPremiumVmCount"
LAB_VM_COUNT = "LabVmCount"
LAB_PREMIUM_VM_COUNT = "LabPremiumVmCount"
LAB_VM_SIZE = "LabVmSize"
GALLERY_IMAGE = "GalleryImage"
USER_OWNED_LAB_VM_COUNT_IN_SUBNET = "UserOwnedLabVmCountInSubnet"
LAB_TARGET_COST = "LabTargetCost"
ENVIRONMENT_TEMPLATE = "EnvironmentTemplate"
SCHEDULE_EDIT_PERMISSION = "ScheduleEditPermission"
class PolicyStatus(str, Enum):
"""
The status of the policy.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class PremiumDataDisk(str, Enum):
"""
The setting to enable usage of premium data disks.
When its value is 'Enabled', creation of standard or premium data disks is allowed.
When its value is 'Disabled', only creation of standard data disks is allowed.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class SourceControlType(str, Enum):
"""
The artifact source's type.
"""
VSO_GIT = "VsoGit"
GIT_HUB = "GitHub"
class StorageType(str, Enum):
"""
The storage type for the disk (i.e. Standard, Premium).
"""
STANDARD = "Standard"
PREMIUM = "Premium"
STANDARD_SSD = "StandardSSD"
class TransportProtocol(str, Enum):
"""
Protocol type of the port.
"""
TCP = "Tcp"
UDP = "Udp"
class UsagePermissionType(str, Enum):
"""
Indicates whether public IP addresses can be assigned to virtual machines on this subnet (i.e. Allow, Deny).
"""
DEFAULT = "Default"
DENY = "Deny"
ALLOW = "Allow"
class VirtualMachineCreationSource(str, Enum):
"""
Tells source of creation of lab virtual machine. Output property only.
"""
FROM_CUSTOM_IMAGE = "FromCustomImage"
FROM_GALLERY_IMAGE = "FromGalleryImage"
FROM_SHARED_GALLERY_IMAGE = "FromSharedGalleryImage"
class WindowsOsState(str, Enum):
"""
The state of the Windows OS (i.e. NonSysprepped, SysprepRequested, SysprepApplied).
"""
NON_SYSPREPPED = "NonSysprepped"
SYSPREP_REQUESTED = "SysprepRequested"
SYSPREP_APPLIED = "SysprepApplied"
|
# Programmer: Konstantin Davydov
# Date of creation: 2021.05.03
import sys
import time
"""
Unbeatable TIC-TAC-TOE game vs Computer
"""
# global variables
def global_vars():
global empty_cell, hum_token, comp_token, turn, total_cells, legal_moves, turn_value
empty_cell = ' '
hum_token = 'X'
comp_token = 'O'
total_cells = 9
legal_moves = [1, 2, 3, 4, 5, 6, 7, 8, 9]
turn_value = (5, 1, 3, 7, 9, 2, 4, 6, 8)
def rules():
print('''
Welcome to the game TIC-TAC-TOE where you play against the computer.
***WARNING! THE COMPUTER IS IMPOSSIBLE TO BEAT!***
This is the game board with numerated cells:
1 | 2 | 3
-----------------
4 | 5 | 6
-----------------
7 | 8 | 9
You play by placing your token ('X' or 'O') in board's cells.
The first player, picks 'X' as theirs token.
The game continues until one of the players fills the whole row
(horizontal, vertical or diagonal) with tokens of chosen type.
''')
def ask_yes_no():
"""
gives the user a simple question with a yes/no answer, returns user's input
"""
answer = input().lower()
return answer
def board_empty():
"""
creates a clear board with 9 empty cells
"""
board = [empty_cell for i in range(total_cells)]
return board
def game_board(board):
"""
shows a game board with all turns made so far
"""
print(' ' + board[0] + ' | ' + board[1] + ' | ' + board[2] + ' ')
print('-----------------')
print(' ' + board[3] + ' | ' + board[4] + ' | ' + board[5] + ' ')
print('-----------------')
print(' ' + board[6] + ' | ' + board[7] + ' | ' + board[8] + ' ' + '\n')
def turn_order():
print('\nDo you want to take the first turn?')
print('Enter \'yes\', \'no\' or enter \'quit\' to exit game.')
answer = ask_yes_no()
while answer not in ['yes', 'no', 'quit']:
print('\nPlease make your choice: ', end='')
answer = ask_yes_no()
if answer == 'yes':
hum_token = 'X'
comp_token = 'O'
elif answer == 'no':
comp_token = 'X'
hum_token = 'O'
elif answer == 'quit':
print('Goodbye!')
input('Press Enter to exit.')
sys.exit()
turn = 'X'
return turn, hum_token, comp_token
def human_turn(board, hum_token):
"""
makes a human player's turn, changes the board accordingly
"""
while True:
try:
human_turn = input('\nEnter the number of the gameboard cell you want to \n'
'place your token in (or enter "quit" to exit):\n')
if human_turn == 'quit':
print('\nGoodbye!')
sys.exit()
human_turn = int(human_turn)
if human_turn in legal_moves:
board[human_turn - 1] = hum_token
legal_moves.remove(human_turn)
break
elif human_turn not in list(range(total_cells)) and human_turn not in legal_moves:
print('\nThere is no such cell.', end='')
continue
elif human_turn == 'quit':
print('\nGoodbye!')
input('Press Enter to exit.')
sys.exit()
elif board[human_turn - 1] in ('X', 'O'):
print('\nCell already occupied.', end='')
continue
except ValueError:
print('\nImpossible choice value!', end='')
continue
def comp_turn(board, turn, hum_token, comp_token, turn_value, legal_moves):
# computer's turn with simulating thinking process (time.sleep)
print('My turn now! Hmm, let me think', end='', flush=True)
for i in range(3):
time.sleep(0.7)
print('.', end='', flush=True)
time.sleep(0.5)
print(' Here is my turn:', flush=True); time.sleep(0.7)
for token in (comp_token, hum_token):
for value in turn_value:
if value in legal_moves:
board_check = board[:]
board_check[value - 1] = token
if iswinner(board_check, token):
board[value - 1] = comp_token
legal_moves.remove(value)
return
if not iswinner(board, turn):
for value in turn_value:
if value in legal_moves:
board[value - 1] = comp_token
legal_moves.remove(value)
return
def pass_turn(turn):
if turn == 'X':
turn = 'O'
elif turn == 'O':
turn = 'X'
return turn
def iswinner(board, turn):
# check if a winner is defined
wins = ((1, 2, 3), # all possible win combinations
(4, 5, 6),
(7, 8, 9),
(1, 4, 7),
(2, 5, 8),
(3, 6, 9),
(1, 5, 9),
(3, 5, 7))
winner = ''
for row in wins:
if board[row[0] - 1] == board[row[1] - 1] == board[row[2] - 1] == turn:
winner = turn
return winner or False
def congrat(winner):
# congratulate the winner
if winner == comp_token:
print('\n' + '-' * 60)
print('I won, human! Expectedly.')
print('-' * 60)
if winner == hum_token:
print('\n' + '-' * 60)
print('You won the game against the computer! Congratulations!')
print('-' * 60)
def start_again():
# start the game again
print('Do you want to start a new game? Enter \'yes\' or \'no\': ', end='')
return ask_yes_no()
def main():
global_vars()
rules()
board = board_empty()
turn, hum_token, comp_token = turn_order()
while not iswinner(board, turn) and empty_cell in board:
if turn == hum_token:
human_turn(board, hum_token)
game_board(board)
if iswinner(board, turn) == hum_token:
congrat(hum_token)
break
turn = pass_turn(turn)
continue
if turn == comp_token:
comp_turn(board, turn, hum_token, comp_token, turn_value, legal_moves)
game_board(board)
if iswinner(board, turn):
congrat(comp_token)
break
turn = pass_turn(turn)
continue
if empty_cell not in board:
print('-' * 60)
print('The game is a DRAW!')
print('-' * 60)
again = start_again()
while again.lower() not in ('yes', 'no'):
again = start_again()
if again.lower() == 'yes':
main()
else:
print('\nGoodbye!')
input('Press Enter to exit.')
sys.exit()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import sys
import inspect
import cProfile
from pstats import Stats
from pathlib import Path
from argparse import ArgumentParser
from typing import List
from multiprocessing import cpu_count
from lib.io import export_csv
from lib.pipeline import PipelineChain
from lib.utils import ROOT
# Step 1: Add your pipeline chain to this import block
from pipelines.demographics.demographics_pipeline import DemographicsPipelineChain
from pipelines.economy.economy_pipeline import EconomyPipelineChain
from pipelines.epidemiology.pipeline_chain import EpidemiologyPipelineChain
from pipelines.geography.geography_pipeline import GeographyPipelineChain
from pipelines.index.index_pipeline import IndexPipelineChain
from pipelines.mobility.mobility_pipeline import MobilityPipelineChain
from pipelines.oxford_government_response.oxford_government_response_pipeline import (
OxfordGovernmentResponsePipelineChain,
)
from pipelines.weather.weather_pipeline import WeatherPipelineChain
# Step 2: After adding the import statement above, add your pipeline chain to this list
all_pipeline_chains: List[PipelineChain] = [
MobilityPipelineChain,
DemographicsPipelineChain,
EconomyPipelineChain,
EpidemiologyPipelineChain,
GeographyPipelineChain,
IndexPipelineChain,
OxfordGovernmentResponsePipelineChain,
WeatherPipelineChain,
]
# Process command-line arguments
argarser = ArgumentParser()
argarser.add_argument("--only", type=str, default=None)
argarser.add_argument("--exclude", type=str, default=None)
argarser.add_argument("--verify", type=str, default=None)
argarser.add_argument("--profile", action="store_true")
argarser.add_argument("--no-progress", action="store_true")
argarser.add_argument("--process-count", type=int, default=cpu_count())
args = argarser.parse_args()
assert not (
args.only is not None and args.exclude is not None
), "--only and --exclude options cannot be used simultaneously"
# Ensure that there is an output folder to put the data in
(ROOT / "output" / "tables").mkdir(parents=True, exist_ok=True)
(ROOT / "output" / "snapshot").mkdir(parents=True, exist_ok=True)
if args.profile:
profiler = cProfile.Profile()
profiler.enable()
# Run all the pipelines and place their outputs into the output folder
# All the pipelines imported in this file which subclass PipelineChain are run
# The output name for each pipeline chain will be the name of the directory that the chain is in
for pipeline_chain_class in all_pipeline_chains:
pipeline_chain = pipeline_chain_class()
pipeline_path = Path(str(inspect.getsourcefile(type(pipeline_chain))))
pipeline_name = pipeline_path.parent.name.replace("_", "-")
if args.only and not pipeline_name in args.only.split(","):
continue
if args.exclude and pipeline_name in args.exclude.split(","):
continue
show_progress = not args.no_progress
pipeline_output = pipeline_chain.run(
pipeline_name, verify=args.verify, process_count=args.process_count, progress=show_progress
)
export_csv(pipeline_output, ROOT / "output" / "tables" / "{}.csv".format(pipeline_name))
if args.profile:
stats = Stats(profiler)
stats.strip_dirs()
stats.sort_stats("cumtime")
stats.print_stats(20)
|
from discord.ext import commands
from discord_slash import SlashCommand, SlashContext
from discord_slash.utils.manage_commands import create_choice, create_option
from discord import Client, Intents, Embed
import requests
from dotenv import load_dotenv
import os
load_dotenv(".env")
client = commands.Bot(command_prefix="!")
slash = SlashCommand(client, sync_commands=True)
token = os.getenv("FLOOR_BOT_TOKEN")
def get_activity_section(stats):
output = "```Range Volume Change Average \n"
output = output + "1D".ljust(5, " ") + format_activity_value(stats.get("one_day_volume")) + format_activity_value(stats.get("one_day_change")) + format_activity_value(stats.get("one_day_average_price")) +"\n"
output = output + "7D".ljust(5, " ") + format_activity_value(stats.get("seven_day_volume")) + format_activity_value(stats.get("seven_day_change")) + format_activity_value(stats.get("seven_day_average_price")) +"\n"
output = output + "30D".ljust(5, " ") + format_activity_value(stats.get("thirty_day_volume")) + format_activity_value(stats.get("thirty_day_change")) + format_activity_value(stats.get("thirty_day_average_price")) +"\n"
output = output + "Total" + format_activity_value(stats.get("total_volume")) + format_activity_value("") + format_activity_value(stats.get("average_price")) + "```"
return output
def format_activity_value(value, currency="", padding=17):
formatted_value="0.0"
if isinstance(value, float):
formatted_value = str(round(value, 2))
else:
formatted_value = str(value)
if currency == "DAI":
formatted_value = formatted_value + " "
elif currency == "USDC":
formatted_value = formatted_value + " "
elif formatted_value != "":
formatted_value = formatted_value + " ⧫"
formatted_value = formatted_value.rjust(padding, " ")
return formatted_value
def format_int_value(value, padding=17):
formatted_value="0.0"
formatted_value = str(int(value)).rjust(padding, " ")
return formatted_value
@slash.slash(
name="floorbot",
description="Finds OpenSea floor",
guild_ids=[
# # Dangywing Test Server
849034764190875669,
# ,
# # club-nfts
# 812365773372129311,
# # manzcoin-nftz
# 826820629260533790,
# club-ngmi
762763149728153601
],
options=[
create_option(
name = "customcontract",
description="Paste a contract address here",
required=False,
option_type=3
),
create_option(
name="projects-a-l",
description="Projects starting with A-L",
required=False,
option_type=3,
choices=[
create_choice(name="Alphabettys", value = '0x6d05064fe99e40F1C3464E7310A23FFADed56E20'),
create_choice(name="Animetas", value = '0x18Df6C571F6fE9283B87f910E41dc5c8b77b7da5'),
create_choice(name="Animonkeys", value = '0xA32422dfb5bF85B2084EF299992903eb93FF52B0'),
create_choice(name="Avastar", value = '0xF3E778F839934fC819cFA1040AabaCeCBA01e049'),
create_choice(name="BAYC", value = '0xBC4CA0EdA7647A8aB7C2061c2E118A18a936f13D'),
create_choice(name="Bears On The Block", value = '0x02AA731631c6D7F8241d74F906f5b51724Ab98F8'),
create_choice(name="Bulls", value = '0x3a8778A58993bA4B941f85684D74750043A4bB5f'),
create_choice(name="Chickens", value = '0x8634666bA15AdA4bbC83B9DbF285F73D9e46e4C2'),
create_choice(name="Cool Cats", value = '0x1A92f7381B9F03921564a437210bB9396471050C'),
# create_choice(name="Craniums", value = '0x85f740958906b317de6ed79663012859067E745B'),
create_choice(name="Deadheads", value = '0x6fC355D4e0EE44b292E50878F49798ff755A5bbC'),
# create_choice(name="Elephants", value = '0x613E5136a22206837D12eF7A85f7de2825De1334'),
# create_choice(name="Flufs", value = '0xCcc441ac31f02cD96C153DB6fd5Fe0a2F4e6A68d'),
create_choice(name="FVCK_CRYSTALS", value = '0x7AfEdA4c714e1C0A2a1248332c100924506aC8e6'),
#create_choice(name="Ethlings", value = '0x8A1AbD2E227Db543F4228045dd0aCF658601fedE'),
create_choice(name="Fame Lady Squad", value = '0xf3E6DbBE461C6fa492CeA7Cb1f5C5eA660EB1B47'),
#create_choice(name="Goblin Goons", value = '0x6322834FE489003512A61662044BcFb5Eeb2A035'),
#create_choice(name="Goatz", value = '0x3EAcf2D8ce91b35c048C6Ac6Ec36341aaE002FB9'),
create_choice(name = "Gen.Art", value='0x1ca39c7f0f65b4da24b094a9afac7acf626b7f38'),
create_choice(name="Gutter Cats", value = '0xEdB61f74B0d09B2558F1eeb79B247c1F363Ae452'),
# create_choice(name="Huas", value = '0x495f947276749Ce646f68AC8c248420045cb7b5e'),
create_choice(name="Lazy Lions", value = '0x8943C7bAC1914C9A7ABa750Bf2B6B09Fd21037E0'),
create_choice(name="Lost Pages", value = '0xa7206d878c5c3871826dfdb42191c49b1d11f466'),
create_choice(name="Lucky Maneki", value = '0x14f03368B43E3a3D27d45F84FabD61Cc07EA5da3')
],
),
create_option(
name="projects-m-z",
description="Projects starting with M-Z",
required=False,
option_type=3,
choices=[
create_choice(name="Meebits", value = '0x7Bd29408f11D2bFC23c34f18275bBf23bB716Bc7'),
create_choice(name="Moon Dogs", value = '0xfc007068C862E69213Dc7AA817063B1803D4E941'),
create_choice(name="Pudgy Penguins", value = '0xBd3531dA5CF5857e7CfAA92426877b022e612cf8'),
create_choice(name="Punks Comic", value = '0x5ab21Ec0bfa0B29545230395e3Adaca7d552C948'),
create_choice(name="Purrlinis", value = '0x9759226B2F8ddEFF81583e244Ef3bd13AAA7e4A1'),
create_choice(name="Rabbits", value = '0x91F3114F8818ADe506d0901a44982Dc5c020C99B'),
create_choice(name="Robotos", value = '0x099689220846644F87D1137665CDED7BF3422747'),
create_choice(name="Royal Society of Players (RSOP)", value = '0xB159F1a0920A7f1D336397A52D92da94b1279838'),
create_choice(name="Shibas", value = '0x763864F1A74D748015f45F7c1181B60E62E40804'),
create_choice(name="Slacker Ducks", value = '0xeC516eFECd8276Efc608EcD958a4eAB8618c61e8'),
create_choice(name="Space Poggers", value = '0x4a8B01E437C65FA8612e8b699266c0e0a98FF65c'),
create_choice(name="SSS - Star Sailor Siblings", value = '0x49aC61f2202f6A2f108D59E77535337Ea41F6540'),
create_choice(name="Stoner Cats", value = '0xD4d871419714B778eBec2E22C7c53572b573706e'),
create_choice(name="Strippers", value = '0x9808226ED04e92F9380DA67C5606354FAe5891b0'),
create_choice(name="Theos", value = '0x9E02FFd6643f51aaAFa0f0E2a911Bf25EF2684Cb'),
create_choice(name="Top Dog Beach Club", value = '0x6F0365ca2c1Dd63473F898A60f878A07e0f68A26'),
create_choice(name="Vogu", value = '0x18c7766A10df15Df8c971f6e8c1D2bbA7c7A410b'),
create_choice(name="Vox", value = '0xad9Fd7cB4fC7A0fBCE08d64068f60CbDE22Ed34C'),
create_choice(name="Wizards", value = '0x521f9C7505005CFA19A8E5786a9c3c9c9F5e6f42'),
create_choice(name="World of Women", value = '0xe785E82358879F061BC3dcAC6f0444462D4b5330'),
create_choice(name="Zunks", value = '0x031920cc2D9F5c10B444FD44009cd64F829E7be2'),
],
),
],
)
async def floor_finder(ctx: SlashContext, **kwargs):
for CONTRACT_ADDRESS in kwargs.values():
await ctx.defer(hidden=False)
data_url = "https://api.opensea.io/api/v1/asset/" + str(CONTRACT_ADDRESS) + "/1"
response = requests.get(data_url)
json_data = response.json()
collection_slug = json_data["collection"].get("slug")
floor_price = json_data["collection"]["stats"].get("floor_price")
embed = Embed(
title="View on Opensea ",
type="rich",
url="https://opensea.io/assets/" + str(collection_slug),
)
embed.set_author(
name= str(collection_slug) + " Floor Price: " + str(floor_price) + " ETH", url="", icon_url=""
)
embed.set_footer(
text="Data provided by OpenSea",
icon_url="https://storage.googleapis.com/opensea-static/Logomark/Logomark-Blue.png",
)
await ctx.send(embed=embed)
@slash.slash(
name="projectstats",
description="Finds an OpenSea floor per project",
guild_ids=[
# # Dangywing Test Server
849034764190875669,
# ,
# # club-nfts
# 812365773372129311,
# # manzcoin-nftz
# 826820629260533790,
# club-ngmi
762763149728153601
],
options=[
create_option(
name="projects-a-h",
description="Projects starting with A-H",
required=False,
option_type=3,
choices=[
create_choice(
name="Digital Represenation of a physical cryptocurrency aka the Manzcoin NFTz",
value="manzcoin-nftz",
),
create_choice(name="Digital Represenation of a physical cryptocurrency aka the Manzcoin NFTz", value = '0x495f947276749Ce646f68AC8c248420045cb7b5e'),
create_choice(name="Aliens", value = '0x4581649aF66BCCAeE81eebaE3DDc0511FE4C5312'),
create_choice(name="Alphabettys", value = '0x6d05064fe99e40F1C3464E7310A23FFADed56E20'),
create_choice(name="Animetas", value = '0x18Df6C571F6fE9283B87f910E41dc5c8b77b7da5'),
create_choice(name="Animonkeys", value = '0xA32422dfb5bF85B2084EF299992903eb93FF52B0'),
create_choice(name="Avastar", value = '0xF3E778F839934fC819cFA1040AabaCeCBA01e049'),
create_choice(name="BAYC", value = '0xBC4CA0EdA7647A8aB7C2061c2E118A18a936f13D'),
create_choice(name="Bears On The Block", value = '0x02AA731631c6D7F8241d74F906f5b51724Ab98F8'),
create_choice(name="Bulls", value = '0x3a8778A58993bA4B941f85684D74750043A4bB5f'),
create_choice(name="Chickens", value = '0x8634666bA15AdA4bbC83B9DbF285F73D9e46e4C2'),
create_choice(name="Cool Cats", value = '0x1A92f7381B9F03921564a437210bB9396471050C'),
create_choice(name="Craniums", value = '0x85f740958906b317de6ed79663012859067E745B'),
create_choice(name="Deadheads", value = '0x6fC355D4e0EE44b292E50878F49798ff755A5bbC'),
create_choice(name="Elephants", value = '0x613E5136a22206837D12eF7A85f7de2825De1334'),
create_choice(name="Flufs", value = '0xCcc441ac31f02cD96C153DB6fd5Fe0a2F4e6A68d'),
create_choice(name="FVCK_CRYSTALS", value = '0x7AfEdA4c714e1C0A2a1248332c100924506aC8e6'),
create_choice(name="Ethlings", value = '0x8A1AbD2E227Db543F4228045dd0aCF658601fedE'),
create_choice(name="Fame Lady Squad", value = '0xf3E6DbBE461C6fa492CeA7Cb1f5C5eA660EB1B47'),
create_choice(name="Goblin Goons", value = '0x6322834FE489003512A61662044BcFb5Eeb2A035'),
create_choice(name="Goatz", value = '0x3EAcf2D8ce91b35c048C6Ac6Ec36341aaE002FB9'),
create_choice(name="Gutter Cats", value = '0xEdB61f74B0d09B2558F1eeb79B247c1F363Ae452'),
create_choice(name="Huas", value = '0x495f947276749Ce646f68AC8c248420045cb7b5e'),
create_choice(name="Lazy Lions", value = '0x8943C7bAC1914C9A7ABa750Bf2B6B09Fd21037E0'),
create_choice(name="Lucky Maneki", value = '0x14f03368B43E3a3D27d45F84FabD61Cc07EA5da3'),
],
),
create_option(
name="projects-l-z",
description="Projects starting with L-Z",
required=False,
option_type=3,
choices=[
create_choice(name="Meebits", value = '0x7Bd29408f11D2bFC23c34f18275bBf23bB716Bc7'),
create_choice(name="Moon Dogs", value = '0xfc007068C862E69213Dc7AA817063B1803D4E941'),
create_choice(name="Pudgy Penguins", value = '0xBd3531dA5CF5857e7CfAA92426877b022e612cf8'),
create_choice(name="Punks Comic", value = '0x5ab21Ec0bfa0B29545230395e3Adaca7d552C948'),
create_choice(name="Purrlinis", value = '0x9759226B2F8ddEFF81583e244Ef3bd13AAA7e4A1'),
create_choice(name="Rabbits", value = '0x91F3114F8818ADe506d0901a44982Dc5c020C99B'),
create_choice(name="Robotos", value = '0x099689220846644F87D1137665CDED7BF3422747'),
create_choice(name="Royal Society of Players (RSOP)", value = '0xB159F1a0920A7f1D336397A52D92da94b1279838'),
create_choice(name="Shibas", value = '0x763864F1A74D748015f45F7c1181B60E62E40804'),
create_choice(name="Slacker Ducks", value = '0xeC516eFECd8276Efc608EcD958a4eAB8618c61e8'),
create_choice(name="Space Poggers", value = '0x4a8B01E437C65FA8612e8b699266c0e0a98FF65c'),
create_choice(name="SSS - Star Sailor Siblings", value = '0x49aC61f2202f6A2f108D59E77535337Ea41F6540'),
create_choice(name="Stoner Cats", value = '0xD4d871419714B778eBec2E22C7c53572b573706e'),
create_choice(name="Strippers", value = '0x9808226ED04e92F9380DA67C5606354FAe5891b0'),
create_choice(name="Theos", value = '0x9E02FFd6643f51aaAFa0f0E2a911Bf25EF2684Cb'),
create_choice(name="Time", value = '0xdd69da9a83cedc730bc4d3c56e96d29acc05ecde'),
create_choice(name="Top Dog Beach Club", value = '0x6F0365ca2c1Dd63473F898A60f878A07e0f68A26'),
create_choice(name="Vogu", value = '0x18c7766A10df15Df8c971f6e8c1D2bbA7c7A410b'),
create_choice(name="Vox", value = '0xad9Fd7cB4fC7A0fBCE08d64068f60CbDE22Ed34C'),
create_choice(name="Wizards", value = '0x521f9C7505005CFA19A8E5786a9c3c9c9F5e6f42'),
create_choice(name="World of Women", value = '0xe785E82358879F061BC3dcAC6f0444462D4b5330'),
create_choice(name="Zunks", value = '0x031920cc2D9F5c10B444FD44009cd64F829E7be2'),
],
),
],
)
async def floor(ctx: SlashContext, **kwargs):
for CONTRACT_ADDRESS in kwargs.values():
await ctx.defer(hidden=False)
data_url = "https://api.opensea.io/api/v1/asset/" + str(CONTRACT_ADDRESS) + "/1"
response = requests.get(data_url)
json_data = response.json()
# print(json_data)
collection = json_data["collection"]
collection_slug = collection.get("slug")
collection_name = collection.get("name")
stats = collection.get("stats")
embed = Embed(
title=str(collection_name) + " Collection (__View__)",
type="rich",
url="https://opensea.io/assets/" + str(collection_slug),
)
embed.add_field(
name="__# of Owners__",
value=format_int_value(stats.get("num_owners")),
inline="true",
)
embed.add_field(
name="__Total Supply__",
value=format_int_value(stats.get("total_supply")),
inline="true",
)
embed.add_field(
name="__Total Sales__",
value=format_int_value(stats.get("total_sales")),
inline="true",
)
embed.add_field(
name="__Floor Price__ ",
value=format_activity_value(stats.get("floor_price")),
inline="true",
)
embed.add_field(
name="__Average Price__",
value=format_activity_value(stats.get("average_price")),
inline="true",
)
embed.add_field(
name="__Total Volumne__",
value=format_activity_value(stats.get("total_volume")),
inline="true",
)
activity_section = get_activity_section(stats)
embed.add_field(name="Sales Activity", value=activity_section, inline="false")
embed.set_footer(
text="Data provided by OpenSea",
icon_url="https://storage.googleapis.com/opensea-static/Logomark/Logomark-Blue.png",
)
await ctx.send(embed=embed)
client.run(token)
|
from unittest import TestCase
from src.models.db import *
from src.models.research_group import *
from src.config import *
class TestResearchGroupDataAccess(TestCase):
def _connect(self):
connection = DBConnection(dbname='test_db', dbuser=config_data['dbuser'],
dbpass=config_data['dbpass'], dbhost=config_data['dbhost'])
return connection
def test_connection(self):
connection = self._connect()
connection.close()
print("Research Group Connection: OK")
def test_get_group_names(self):
connection = self._connect()
connection.get_connection().commit()
dao = ResearchGroupDataAccess(dbconnect=connection)
objects = dao.get_group_names(True)
self.assertEqual('Boos'.upper(),
objects[-1].upper())
objects = dao.get_group_names(False)
self.assertEqual('Konijn'.upper(),
objects[-1].upper())
connection.close()
def test_get_research_groups(self):
connection = self._connect()
connection.get_connection().commit()
dao = ResearchGroupDataAccess(dbconnect=connection)
objects = dao.get_research_groups(True)
self.assertEqual('Boos'.upper(),
objects[-1].name.upper())
objects = dao.get_research_groups(False)
self.assertEqual('Konijn'.upper(),
objects[-1].name.upper())
connection.close()
def test_get_research_group(self):
connection = self._connect()
connection.get_connection().commit()
dao = ResearchGroupDataAccess(dbconnect=connection)
object = dao.get_research_group('Boos')
self.assertEqual('Boos'.upper(),
object.name.upper())
connection.close()
def test_add_research_group(self):
connection = self._connect()
connection.get_cursor().execute('DELETE from research_group where name=\'test_ins\'')
connection.get_connection().commit()
dao = ResearchGroupDataAccess(dbconnect=connection)
obj = ResearchGroup(name='test_ins', abbreviation= "test", logo_location=None, description_id=1, address=None, telephone_number=None, study_field='chocolate eating', is_active=True)
dao.add_research_group(obj)
objects = dao.get_research_groups(True)
self.assertEqual('test_ins'.upper(),
objects[-1].name.upper())
connection.get_cursor().execute('DELETE from research_group where name=\'test_ins\'')
connection.get_connection().commit()
connection.close()
def test_update_research_group(self):
connection = self._connect()
connection.get_cursor().execute('DELETE from research_group where name=\'test_ins\'')
connection.get_connection().commit()
dao = ResearchGroupDataAccess(dbconnect=connection)
obj = ResearchGroup(name='test_ins', abbreviation="test", logo_location=None, description_id=1, address=None,
telephone_number=None, study_field='chocolate eating', is_active=True)
dao.add_research_group(obj)
objects = dao.get_research_groups(True)
self.assertEqual('test'.upper(),
objects[-1].abbreviation.upper())
obj2 = ResearchGroup(name='test_ins', abbreviation="test2", logo_location=None, description_id=1, address=None,
telephone_number=None, study_field='chocolate eating', is_active=True)
dao.update_research_group(obj.name, obj2)
objects = dao.get_research_groups(True)
self.assertEqual('test2'.upper(),
objects[-1].abbreviation.upper())
connection.get_cursor().execute('DELETE from research_group where name=\'test_ins\'')
connection.get_connection().commit()
connection.close()
def test_set_active(self):
connection = self._connect()
connection.get_cursor().execute('DELETE from research_group where name=\'test_ins\'')
connection.get_connection().commit()
dao = ResearchGroupDataAccess(dbconnect=connection)
obj = ResearchGroup(name='test_ins', abbreviation="test", logo_location=None, description_id=1, address=None,
telephone_number=None, study_field='chocolate eating', is_active=True)
dao.add_research_group(obj)
objects = dao.get_research_groups(True)
self.assertEqual('test_ins'.upper(),
objects[-1].name.upper())
dao.set_active(obj.name, False)
objects = dao.get_research_groups(False)
self.assertEqual('test_ins'.upper(),
objects[-1].name.upper())
connection.get_cursor().execute('DELETE from research_group where name=\'test_ins\'')
connection.get_connection().commit()
connection.close()
|
"""Lock to prevent sending multiple command at once to Epson projector."""
import time
from .const import (TURN_ON, TURN_OFF, INV_SOURCES, SOURCE, ALL, TIMEOUT_TIMES)
class Lock:
def __init__(self):
"""Init lock for sending request to projector when it is busy."""
self._isLocked = False
self._timer = 0
self._operation = False
def setLock(self, command):
"""Set lock on requests."""
if command in (TURN_ON, TURN_OFF):
self._operation = command
elif command in INV_SOURCES:
self._operation = SOURCE
else:
self._operation = ALL
self._isLocked = True
self._timer = time.time()
def __unlock(self):
"""Unlock sending requests to projector."""
self._operation = False
self._timer = 0
self._isLocked = False
def checkLock(self):
"""
Lock checking.
Check if there is lock pending and check if enough time
passed so requests can be unlocked.
"""
if self._isLocked:
if (time.time() - self._timer) > TIMEOUT_TIMES[self._operation]:
self.__unlock()
return False
return True
return False
|
import pygame, sys
from dashboard.view import DashboardView
from dashboard.arduino import ArduinoConnection
dashboard = DashboardView(size=(600,400))
def update_data(data):
print(data)
dashboard.components["temp"].set_value(data["temp"])
dashboard.components["volt"].set_value(data["voltage"])
dashboard.render()
connection = ArduinoConnection(update_data)
connection.start()
def exit():
connection.close()
dashboard.close()
sys.exit()
dashboard.bind_event(pygame.QUIT, exit)
dashboard.render()
#Run the game loop
while True:
dashboard.handle_events()
|
"""
RamondettiDavide Spa
====================
DuoConsole Python 20
DuoConsole is a free open source console in python and it execute some commands
"""
import sys
print("************************");
print("* RamondettiDavide Spa *");
print("* DuoConsole Python 20 *");
print("************************");
def CommandLine() :
command = input("DuoConsole:~ root$ ");
if (command == null) :
print("DuoConsole Cannot Execute This Code");
else :
if (command == "print") :
wr = input("DuoConsole:~ root$ print ");
print(wr);
if (command == "echo") :
print("To execute the command ECHO you must buy DuoConsole Platinum!");
if (command == "whois") :
whois = input("DuoConsole:~ root$ whois ");
open("http://who.is/whois/",whois);
else :
print("Buy the PRO to Use Some Codes.");
CommandLine();
|
# -*- coding: utf-8 -*-
"""Top-level package for systeminfo."""
__author__ = """Darragh Crotty"""
__email__ = 'darragh@darraghcrotty.com'
__version__ = '0.1.0'
|
import json
from tqdm import tqdm
'''
ANEMONE的初版预测有很多很乐观的完全预测错了的情况
在新的结果跑出来之前,先用启发式方式对这个第一版结果筛选一下,防止结果太烂
'''
whole_simple_prediction_store_dir = 'C:/workspace/服务器备份/ANEMONE/whole_simple_prediction/'
whole_simple_prediction_file_path = f'{whole_simple_prediction_store_dir}all_predictions.json'
def ANEMONE_predictions_match(mention: str, api: str):
'''
判断预测出来的mention和api是不是大概率是预测对了
'''
mention_tokens = [token.lower() for token in mention.split(
' ') if token.isalpha() and len(token) > 2]
api = api.lower()
min_match_tokens = len(mention_tokens) // 2 + 1 if len(
mention_tokens) <= 3 else len(mention_tokens) // 2 + 2 # 启发式规则
if min_match_tokens == 0:
min_match_tokens = 1
match = 0
for token in mention_tokens:
if token in api:
match += 1
elif token.endswith('s') and token[:len(token) - 1] in api:
match += 1
return match >= min_match_tokens
def filter_whole_predictions(whole_simple_prediction_file_path: str, filterd_path: str):
with open(whole_simple_prediction_file_path, 'r', encoding='utf-8') as rf, open(filterd_path, 'w', encoding='utf-8') as wf:
predictions = json.load(rf)
counter = 0
filterd_predictions = {}
for thread_id, preds in tqdm(predictions.items()):
filterd = {}
for mention, api in preds.items():
if ANEMONE_predictions_match(mention, api):
filterd[mention] = api
counter += 1
if len(filterd.keys()) <= 0:
continue
filterd_predictions[thread_id] = filterd
print("data amount: ", counter)
print('thread count: ', len(filterd_predictions))
json.dump(filterd_predictions, wf, indent=2, ensure_ascii=False)
if __name__ == "__main__":
filtered_path = f'{whole_simple_prediction_store_dir}filterd_predictions.json'
filter_whole_predictions(whole_simple_prediction_file_path, filtered_path)
|
#-*- coding:utf-8 -*-
import time, threading
from SocketServer import TCPServer, BaseRequestHandler
import traceback
from PythonSocketBBS import SocketServerBBS
def loop(a):
print 'thread is running...'
#
time.sleep(1)
a.serve_forever()
print 'thread ended.'
def socketMethod():
print 'Method is running...'
hostname=""
port=9996
print port
a=SocketServerBBS.PythonChatServer((hostname,port),SocketServerBBS.RequestHandler)
t = threading.Thread(target=loop,args=(a,))
t.start()
while 1:
time.sleep(1)
b=SocketServerBBS.Sclose()
print b
print (1==b)
if (1==b):
print 'close'
a.shutdown()
a.server_close()
break
# t.join()
print 'Method ended.'
if __name__ == "__main__":
socketMethod()
|
import requests
import json
import os
def get_wicked_data_before_src():
with open('final_output/remaining_list.csv') as file:
packages = [package.strip() for package in file.readlines()]
# write all the packages in a csv file for wicked input
with open('wicked_input.csv','w') as file:
for package in packages:
file.write(package + "\n")
url = 'https://wicked-pedigree-service.w3ibm.mybluemix.net/api/pedigree/check/file'
files = {'file': open('wicked_input.csv', 'rb')}
try:
r = requests.post(url, files=files)
except Exception as e:
print ("Connection to Wicked failed! Check system proxy settings.")
#print e
exit()
try:
file = open("file.json", "w")
file.write(r.text)
file.close()
print ("wicked response received!")
except:
print ("Error while writing wicked response to file!")
exit()
try:
r.connection.close()
except Exception as e:
#print e
exit()
# This file contains wicked response
jsonFile = open('file.json', 'r')
jsonData = json.load(jsonFile)
jsonFile.close()
# print(type(jsonData))
# return dictionary with key as package_name and value as the entire line to be written in Final_license_list.csv file
package_names = _extract_licenses(jsonData)
with open('final_output/Final_license_list.csv','a') as file:
for key, value in package_names.items():
file.write( key + " | ")
file.write( value + '\n' )
packages.remove(key)
_update_remaining_list( {}, packages)
os.remove('file.json')
def get_wicked_data_after_src(dict_packages, src_packages, nosrc_packages):
src_packages = set(src_packages)
with open('wicked_input.csv','w') as file:
for package in src_packages:
file.write(package + '\n')
# write all the src packages in a csv file for wicked input
with open('wicked_input.csv','a') as file:
for package in nosrc_packages:
file.write(package + '\n')
url = 'https://wicked-pedigree-service.w3ibm.mybluemix.net/api/pedigree/check/file'
files = {'file': open('wicked_input.csv', 'rb')}
try:
r = requests.post(url, files=files)
except Exception as e:
print ("Connection to Wicked failed! Check system proxy settings.")
#print e
exit()
try:
file = open("file.json", "w")
file.write(r.text)
file.close()
print ("wicked response received!")
except:
print ("Error while writing wicked response to file!")
exit()
try:
r.connection.close()
except Exception as e:
#print e
exit()
# This file contains wicked response
jsonFile = open('file.json', 'r')
jsonData = json.load(jsonFile)
jsonFile.close()
# return dictionary with key as package_name and value as the entire line to be written in Final_license_list.csv file
package_names = _extract_licenses(jsonData)
# update the package_names to original names
dict_packages = _update_orignal_names(package_names, dict_packages)
_update_remaining_list(dict_packages, nosrc_packages)
os.remove('file.json')
def _extract_licenses(values):
package_names = {}
for i, item in enumerate(values):
#print(i, item, type(item))
for key, value in item.items():
if(key=='possiblyRelatedPackages') and value!=[]:
for p in item['possiblyRelatedPackages']:
line = ""
#package_names.append(p['awsomName'].lower())
#line = line + p['awsomName']
line = line + ' | ' + p['license']
line = line + ' | ' + "WICC4D"
if p['pedigreeReviewed']:
line = line + ' | ' + 'NA'
else:
line = line + ' | ' + 'NA'
line = line + ' | ' + 'NA'
package_names[p['awsomName'].lower()] = line
break
return package_names
def _update_orignal_names(package_names, dict_packages):
remove_keys = []
with open('final_output/Final_license_list.csv','a') as file:
for key, value in dict_packages.items():
if value in package_names.keys():
file.write( key + " | " + value )
file.write( package_names[value] + '\n' )
remove_keys.append(key)
# remove the packages for which licenses have been found after source name change
for key in remove_keys:
del dict_packages[key]
return dict_packages
def _update_remaining_list(dict_packages, nosrc_packages):
with open('final_output/remaining_list.csv', 'w') as file:
for value in dict_packages.keys():
file.write(value.lower()+'\n')
for value in nosrc_packages:
file.write(value.lower()+'\n')
|
# Base class for "other" kinetic (radius + momentum + time) quantities
#
import matplotlib.pyplot as plt
import numpy as np
from . OutputException import OutputException
from . KineticQuantity import KineticQuantity
class OtherKineticQuantity(KineticQuantity):
def __init__(self, name, data, description, grid, output, momentumgrid=None):
"""
Constructor.
"""
attr = {'description': description}
super(OtherKineticQuantity, self).__init__(name=name, data=data, grid=grid, attr=attr, output=output, momentumgrid=momentumgrid)
self.time = grid.t[1:]
def __repr__(self):
"""
Convert this object to an "official" string.
"""
#s = self.__str__()
return self.__str__()
def __str__(self):
"""
Convert this object to a string.
"""
return '({}) Other kinetic quantity of size NT x NR x NP2 x NP1 = {} x {} x {} x {}'.format(self.name, self.data.shape[0], self.data.shape[1], self.data.shape[2], self.data.shape[3])
def __getitem__(self, index):
"""
Direct access to data.
"""
return self.data[index]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description:TikTokMulti.py
@Date :2022/01/29 20:23:37
@Author :JohnserfSeed
@version :1.2.5
@License :(C)Copyright 2019-2022, Liugroup-NLPR-CASIA
@Github :https://github.com/johnserf-seed
@Mail :johnserfseed@gmail.com
'''
import requests,json,os,time,configparser,re,sys,argparse
class TikTok():
# 初始化
def __init__(self):
self.headers = {
'user-agent': 'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Mobile Safari/537.36 Edg/87.0.664.66'
}
# 抓获所有视频
self.Isend = False
self.out_Print()
# 绘制布局
print("#" * 120)
print(
"""
TikTokDownload V1.2.5
使用说明:
1、本程序目前支持命令行调用和配置文件操作,GUI预览版本已经发布
2、命令行操作方法:1)将本程序路径添加到环境变量
2)控制台输入 TikTokMulti -u https://v.douyin.com/JtcjTwo/
3、配置文件操作方法:1)运行软件前先打开目录下 conf.ini 文件按照要求进行配置
2)按照控制台输出信息操作
4、如有您有任何bug或者意见反馈请在 https://github.com/Johnserf-Seed/TikTokDownload/issues 发起
5、GUI预览版本现已发布,操作更简单 https://github.com/Johnserf-Seed/TikTokDownload/tags 下载
注意: 目前已经支持app内分享短链和web端长链识别。
"""
)
print("#" * 120)
print('\r')
# 用户主页 # 保存路径 # 单页下载数 # 下载音频 # 下载模式 # 保存用户名 # 点赞个数
self.uid = '';self.save = '';self.count = '';self.musicarg = '';self.mode = '';self.nickname = '';self.like_counts = 0
# 检测配置文件
if os.path.isfile("conf.ini") == True:
pass
else:
print('[ 提示 ]:没有检测到配置文件,生成中!\r')
try:
self.cf = configparser.ConfigParser()
# 往配置文件写入内容
self.cf.add_section("url")
self.cf.set("url", "uid", "https://v.douyin.com/JcjJ5Tq/")
self.cf.add_section("music")
self.cf.set("music", "musicarg", "yes")
self.cf.add_section("count")
self.cf.set("count", "count", "35")
self.cf.add_section("save")
self.cf.set("save", "url", ".\\Download\\")
self.cf.add_section("mode")
self.cf.set("mode", "mode", "post")
with open("conf.ini", "a+") as f:
self.cf.write(f)
print('[ 提示 ]:生成成功!\r')
except:
input('[ 提示 ]:生成失败,正在为您下载配置文件!\r')
r =requests.get('https://gitee.com/johnserfseed/TikTokDownload/raw/main/conf.ini')
with open("conf.ini", "a+") as conf:
conf.write(r.content)
sys.exit()
# 实例化读取配置文件
self.cf = configparser.ConfigParser()
# 用utf-8防止出错
self.cf.read("conf.ini", encoding="utf-8")
def setting(self,uid,music,count,dir,mode):
"""
@description : 设置命令行参数
---------
@param : uid 用户主页,music 下载音频,count 单页下载数,dir 目录,mode 模式
-------
@Returns : None
-------
"""
if uid != None:
if uid == None:
print('[ 警告 ]:--user不能为空')
pass
else:
self.uid = uid;self.save = dir;self.count=count;self.musicarg=music;self.mode=mode
print('[ 提示 ]:读取命令完成!\r')
self.judge_link()
# 没有接收到命令
else:
print('[ 警告 ]:未检测到命令,将使用配置文件进行批量下载!')
# 读取保存路径
self.save = self.cf.get("save", "url")
# 读取下载视频个数
self.count = int(self.cf.get("count", "count"))
# 读取下载是否下载音频
self.musicarg = self.cf.get("music", "musicarg")
# 读取用户主页地址
self.uid = self.cf.get("url", "uid")
# 读取下载模式
self.mode = self.cf.get("mode", "mode")
print('[ 提示 ]:读取本地配置完成!\r')
input('[ 提示 ]:批量下载直接回车:')
self.judge_link()
def out_Print(self):
print(r'''
████████╗██╗██╗ ██╗████████╗ ██████╗ ██╗ ██╗██████╗ ██████╗ ██╗ ██╗███╗ ██╗██╗ ██████╗ █████╗ ██████╗
╚══██╔══╝██║██║ ██╔╝╚══██╔══╝██╔═══██╗██║ ██╔╝██╔══██╗██╔═══██╗██║ ██║████╗ ██║██║ ██╔═══██╗██╔══██╗██╔══██╗
██║ ██║█████╔╝ ██║ ██║ ██║█████╔╝ ██║ ██║██║ ██║██║ █╗ ██║██╔██╗ ██║██║ ██║ ██║███████║██║ ██║
██║ ██║██╔═██╗ ██║ ██║ ██║██╔═██╗ ██║ ██║██║ ██║██║███╗██║██║╚██╗██║██║ ██║ ██║██╔══██║██║ ██║
██║ ██║██║ ██╗ ██║ ╚██████╔╝██║ ██╗██████╔╝╚██████╔╝╚███╔███╔╝██║ ╚████║███████╗╚██████╔╝██║ ██║██████╔╝
╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚═════╝ ╚═════╝ ╚══╝╚══╝ ╚═╝ ╚═══╝╚══════╝ ╚═════╝ ╚═╝ ╚═╝╚═════╝''')
# 匹配粘贴的url地址
def Find(self, string):
# findall() 查找匹配正则表达式的字符串
url = re.findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)
return url
# 判断个人主页api链接
def judge_link(self):
# 判断长短链
if self.uid[0:20] == 'https://v.douyin.com':
r = requests.get(url = self.Find(self.uid)[0])
print('[ 提示 ]:为您下载多个视频!\r')
# 获取用户sec_uid
for one in re.finditer(r'user/([\d\D]*?)\?',str(r.url)):
key = one.group(1)
# key = re.findall('/user/(.*?)\?', str(r.url))[0]
print('[ 提示 ]:用户的sec_id=%s\r' % key)
else:
r = requests.get(url = self.Find(self.uid)[0])
print('[ 提示 ]:为您下载多个视频!\r')
# 获取用户sec_uid
# 因为某些情况链接中会有?previous_page=app_code_link参数,为了不影响key结果做二次过滤
# 原user/([\d\D]*?)([?])
try:
for one in re.finditer(r'user\/([\d\D]*)([?])',str(r.url)):
key = one.group(1)
except:
for one in re.finditer(r'user\/([\d\D]*)',str(r.url)):
key = one.group(1)
print('[ 提示 ]:用户的sec_id=%s\r' % key)
# 第一次访问页码
max_cursor = 0
# 构造第一次访问链接
api_post_url = 'https://www.iesdouyin.com/web/api/v2/aweme/%s/?sec_uid=%s&count=%s&max_cursor=%s&aid=1128&_signature=PDHVOQAAXMfFyj02QEpGaDwx1S&dytk=' % (
self.mode, key, str(self.count), max_cursor)
response = requests.get(url = api_post_url, headers = self.headers)
html = json.loads(response.content.decode())
self.nickname = html['aweme_list'][0]['author']['nickname']
if not os.path.exists(self.save + self.mode + "\\" + self.nickname):
os.makedirs(self.save + self.mode + "\\" + self.nickname)
self.get_data(api_post_url, max_cursor)
return api_post_url,max_cursor,key
# 获取第一次api数据
def get_data(self, api_post_url, max_cursor):
# 尝试次数
index = 0
# 存储api数据
result = []
while result == []:
index += 1
print('[ 提示 ]:正在进行第 %d 次尝试\r' % index)
time.sleep(0.3)
response = requests.get(
url = api_post_url, headers = self.headers)
html = json.loads(response.content.decode())
# with open('r.json', 'wb')as f:
# f.write(response.content)
if self.Isend == False:
# 下一页值
print('[ 用户 ]:',str(self.nickname),'\r')
max_cursor = html['max_cursor']
result = html['aweme_list']
print('[ 提示 ]:抓获数据成功!\r')
# 处理第一页视频信息
self.video_info(result, max_cursor)
else:
max_cursor = html['max_cursor']
self.next_data(max_cursor)
# self.Isend = True
print('[ 提示 ]:此页无数据,为您跳过......\r')
return result,max_cursor
# 下一页
def next_data(self,max_cursor):
# 获取解码后原地址
r = requests.get(url = self.Find(self.uid)[0])
# 获取用户sec_uid
key = re.findall('/user/(.*?)\?', str(r.url))[0]
if not key:
key = r.url[28:83]
# 构造下一次访问链接
api_naxt_post_url = 'https://www.iesdouyin.com/web/api/v2/aweme/%s/?sec_uid=%s&count=%s&max_cursor=%s&aid=1128&_signature=RuMN1wAAJu7w0.6HdIeO2EbjDc&dytk=' % (
self.mode, key, str(self.count), max_cursor)
index = 0
result = []
while self.Isend == False:
# 回到首页,则结束
if max_cursor == 0:
self.Isend = True
return
index += 1
print('[ 提示 ]:正在对', max_cursor, '页进行第 %d 次尝试!\r' % index)
time.sleep(0.3)
response = requests.get(url = api_naxt_post_url, headers = self.headers)
html = json.loads(response.content.decode())
if self.Isend == False:
# 下一页值
max_cursor = html['max_cursor']
result = html['aweme_list']
print('[ 提示 ]:%d页抓获数据成功!\r' % max_cursor)
# 处理下一页视频信息
self.video_info(result, max_cursor)
else:
self.Isend == True
print('[ 提示 ]:%d页抓获数据失败!\r' % max_cursor)
# sys.exit()
# 处理视频信息
def video_info(self, result, max_cursor):
# 作者信息 # 无水印视频链接 # 作品id # 作者id # 封面大图
author_list = [];video_list = [];aweme_id = [];nickname = [];# dynamic_cover = []
for v in range(self.count):
try:
author_list.append(str(result[v]['desc']))
video_list.append(str(result[v]['video']['play_addr']['url_list'][0]))
aweme_id.append(str(result[v]['aweme_id']))
nickname.append(str(result[v]['author']['nickname']))
# dynamic_cover.append(str(result[v]['video']['dynamic_cover']['url_list'][0]))
except Exception as error:
# print(error)
pass
self.videos_download(author_list, video_list, aweme_id, nickname, max_cursor)
return self,author_list,video_list,aweme_id,nickname,max_cursor
# 检测视频是否已经下载过
def check_info(self, nickname):
if nickname == []:
return
else:
v_info = os.listdir((self.save + self.mode + "\\" + nickname))
return v_info
# 音视频下载
def videos_download(self, author_list, video_list, aweme_id, nickname, max_cursor):
# 创建并检测下载目录是否存在
try:
os.makedirs(self.save + self.mode + "\\" + nickname[0])
except:
pass
v_info = self.check_info(self.nickname)
for i in range(self.count):
# 点赞视频排序
self.like_counts += 1
# 获取单部视频接口信息
try:
jx_url = f'https://www.iesdouyin.com/web/api/v2/aweme/iteminfo/?item_ids={aweme_id[i]}' # 官方接口
js = json.loads(requests.get(
url = jx_url,headers=self.headers).text)
creat_time = time.strftime("%Y-%m-%d %H.%M.%S", time.localtime(js['item_list'][0]['create_time']))
except Exception as error:
# print(error)
pass
# 每次判断视频是否已经下载过
try:
if creat_time + author_list[i] + '.mp4' in v_info:
print('[ 提示 ]:', author_list[i], '[文件已存在,为您跳过]', end = "") # 开始下载,显示下载文件大小
for i in range(20):
print(">",end = '', flush = True)
time.sleep(0.01)
print('\r')
continue
except:
# 防止下标越界
pass
# 尝试下载音频
try:
if self.musicarg == "yes": # 保留音频
music_url = str(js['item_list'][0]['music']['play_url']['url_list'][0])
music_title = str(js['item_list'][0]['music']['author'])
music=requests.get(music_url) # 保存音频
start = time.time() # 下载开始时间
size = 0 # 初始化已下载大小
chunk_size = 1024 # 每次下载的数据大小
content_size = int(music.headers['content-length']) # 下载文件总大小
if music.status_code == 200: # 判断是否响应成功
print('[ 音频 ]:'+ creat_time + author_list[i]+'[文件 大小]:{size:.2f} MB'.format(
size = content_size / chunk_size /1024)) # 开始下载,显示下载文件大小
if self.mode == 'post':
m_url = self.save + self.mode + "\\" + nickname[i] + '\\' + creat_time + re.sub(
r'[\\/:*?"<>|\r\n]+', "_", music_title) + '_' + author_list[i] + '.mp3'
else:
m_url = self.save + self.mode + "\\" + self.nickname + '\\' + str(self.like_counts)+ '、' + re.sub(
r'[\\/:*?"<>|\r\n]+', "_", music_title) + '_' + author_list[i] + '.mp3'
with open(m_url,'wb') as file: # 显示进度条
for data in music.iter_content(chunk_size = chunk_size):
file.write(data)
size += len(data)
print('\r' + '[下载进度]:%s%.2f%%' % (
'>' * int(size * 50 / content_size), float(size / content_size * 100)), end=' ')
end = time.time() # 下载结束时间
print('\n' + '[下载完成]:耗时: %.2f秒\n' % (
end - start)) # 输出下载用时时间
except:
print('\r[ 警告 ]:下载音频出错!\r')
# 尝试下载视频
try:
video = requests.get(video_list[i]) # 保存视频
start = time.time() # 下载开始时间
size = 0 # 初始化已下载大小
chunk_size = 1024 # 每次下载的数据大小
content_size = int(video.headers['content-length']) # 下载文件总大小
try:
if video.status_code == 200: # 判断是否响应成功
print('[ 视频 ]:' + creat_time + author_list[i] + '[文件 大小]:{size:.2f} MB'.format(
size = content_size / chunk_size /1024)) # 开始下载,显示下载文件大小
if self.mode == 'post':
v_url = self.save + self.mode + "\\" + nickname[i] + '\\' + creat_time + re.sub(
r'[\\/:*?"<>|\r\n] + ', "_", author_list[i]) + '.mp4'
else:
v_url = self.save + self.mode + "\\" + self.nickname + '\\' + str(self.like_counts)+ '、' + re.sub(
r'[\\/:*?"<>|\r\n] + ', "_", author_list[i]) + '.mp4'
with open(v_url,'wb') as file: # 显示进度条
for data in video.iter_content(chunk_size = chunk_size):
file.write(data)
size += len(data)
print('\r' + '[下载进度]:%s%.2f%%' % (
'>' * int(size * 50 / content_size), float(size / content_size * 100)), end=' ')
end = time.time() # 下载结束时间
print('\n' + '[下载完成]:耗时: %.2f秒\n' % (
end - start)) # 输出下载用时时间
except Exception as error:
# print(error)
print('[ 警告 ]:下载视频出错!')
print('[ 警告 ]:', error, '\r')
except Exception as error:
# print(error)
print('[ 提示 ]:该页视频资源没有', self.count, '个,已为您跳过!\r')
break
# 获取下一页信息
self.next_data(max_cursor)
# 主模块执行
if __name__ == "__main__":
# 获取命令行函数
def get_args(user,dir,music,count,mode):
# 新建TK实例
TK = TikTok()
# 命令行传参
TK.setting(user,music,count,dir,mode)
input('[ 完成 ]:已完成批量下载,输入任意键后退出:')
sys.exit(0)
try:
parser = argparse.ArgumentParser(description='TikTokMulti V1.2.5 使用帮助')
parser.add_argument('--user', '-u', type=str, help='为用户主页链接,非必要参数', required=False)
parser.add_argument('--dir','-d', type=str,help='视频保存目录,非必要参数, 默认./Download', default='./Download/')
#parser.add_argument('--single', '-s', type=str, help='单条视频链接,非必要参数,与--user参数冲突')
parser.add_argument('--music', '-m', type=str, help='视频音乐下载,非必要参数, 默认no可选yes', default='no')
parser.add_argument('--count', '-c', type=int, help='单页下载的数量,默认参数 35 无须修改', default=35)
parser.add_argument('--mode', '-M', type=str, help='下载模式选择,默认post:发布的视频 可选like:点赞视频(需要开放权限)', default='post')
args = parser.parse_args()
# 获取命令行
get_args(args.user, args.dir, args.music, args.count, args.mode)
except Exception as e:
# print(e)
print('[ 提示 ]:未输入命令,自动退出!')
sys.exit(0)
|
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
from util.eplus_model_interface import EplusModelIndexedList
import os
import multiprocessing as mp
FILE_DIR = os.path.dirname(__file__)
from pathlib import Path
def run(eplus_wea: str, eplus_file: str):
"""run energyplus"""
path = Path(eplus_file)
parent_dir = path.parent.absolute()
os.chdir(parent_dir)
os.system("energyplus -w %s -r %s" % (eplus_wea, eplus_file))
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
wea_dir = os.path.join(FILE_DIR, "eplus_files/weather/chicago_tmy3.epw")
models = [os.path.join(FILE_DIR, "eplus_files/bldg1/PythonPluginCustomSchedule1.idf"),
os.path.join(FILE_DIR, "eplus_files/bldg2/PythonPluginCustomSchedule2.idf"),
os.path.join(FILE_DIR, "eplus_files/bldg3/PythonPluginCustomSchedule3.idf")]
# Setup a list of processes that we want to run
processes = [mp.Process(target=run, args=(wea_dir, x)) for x in models]
# Run processes
for p in processes:
p.start()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
"""The CSDMS Standard Names"""
from ._version import get_versions
from .error import BadNameError, BadRegistryError
from .registry import NamesRegistry
from .standardname import StandardName, is_valid_name
__all__ = [
"StandardName",
"is_valid_name",
"NamesRegistry",
"BadNameError",
"BadRegistryError",
]
__version__ = get_versions()["version"]
del get_versions
|
# -*- coding: utf-8 -*-
import argparse
import os
from ctypes import cdll
from fuse import FUSE
from girder_client import GirderClient
from girderfs.core import \
RESTGirderFS, LocalGirderFS
_libc = cdll.LoadLibrary('libc.so.6')
_setns = _libc.setns
CLONE_NEWNS = 0x00020000
def setns(fd, nstype):
if hasattr(fd, 'fileno'):
fd = fd.fileno()
_setns(fd, nstype)
def main(args=None):
parser = argparse.ArgumentParser(
description='Mount Girder filesystem assetstore.')
parser.add_argument('--api-url', required=True, default=None,
help='full URL to the RESTful API of Girder server')
parser.add_argument('--username', required=False, default=None)
parser.add_argument('--password', required=False, default=None)
parser.add_argument('--api-key', required=False, default=None)
parser.add_argument('--token', required=False, default=None)
parser.add_argument('-c', default='remote', choices=['remote', 'direct'],
help='command to run')
parser.add_argument('--foreground', dest='foreground',
action='store_true')
parser.add_argument('--hostns', dest='hostns', action='store_true')
parser.add_argument('local_folder', help='path to local target folder')
parser.add_argument('remote_folder', help='Girder\'s folder id')
args = parser.parse_args()
gc = GirderClient(apiUrl=args.api_url)
if args.token:
gc.token = args.token
elif args.api_key:
gc.authenticate(apiKey=args.api_key)
elif args.username and args.password:
gc.authenticate(username=args.username, password=args.password)
else:
raise RuntimeError("You need to specify apiKey or user/pass")
if args.hostns:
targetns = os.path.join(os.environ.get('HOSTDIR', '/'),
'proc/1/ns/mnt')
with open(targetns) as fd:
setns(fd, CLONE_NEWNS)
if args.c == 'remote':
FUSE(RESTGirderFS(args.remote_folder, gc), args.local_folder,
foreground=args.foreground, ro=True, allow_other=True)
elif args.c == 'direct':
FUSE(LocalGirderFS(args.remote_folder, gc), args.local_folder,
foreground=args.foreground, ro=True, allow_other=True)
else:
print('No implementation for command %s' % args.c)
if __name__ == "__main__":
main()
|
"""
Django settings for webapp project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os, sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cmvze15krnk8ntcmg(g*10)ikqqbnw6bv_kmca7yr)y+k^g$nc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'rest_framework',
'rest_framework_docs',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'django_celery_results',
'stefani',
'videobank.apps.Config'
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_AUTHENTICATION_CLASSES': [],
'DEFAULT_PERMISSION_CLASSES': [],
}
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
#'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webapp.urls'
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'webapp/templates'),
os.path.join(BASE_DIR, 'videobank/templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'webapp.app_info.gui_config',
'videobank.app_info.gui_config'
],
},
},
]
WSGI_APPLICATION = 'webapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
"""
DB_NAME = os.environ.get('PG_DBNAME', 'stefani')
DB_HOST = os.environ.get('PG_HOST', 'postgres')
DB_USER = os.environ.get('PG_USER', 'webservice')
DB_PASSWORD = os.environ.get('PG_PASSWORD', 'something')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': DB_NAME,
'HOST': DB_HOST,
'USER': DB_USER,
'PASSWORD': DB_PASSWORD,
}
}
"""
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
CASSANDRA_FALLBACK_ORDER_BY_PYTHON = True
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATICFILES_DIRS = [
'/usr/src/website/src/'
]
STATIC_URL = '/static/'
__LOG_FORMAT = os.environ.get('EEN_LOG_FORMAT', '%(asctime)s.%(msecs).03d[%(levelname)0.3s] %(name)s:%(funcName)s.%(lineno)d %(message)s')
# __LOG_FORMAT = os.environ.get('EEN_LOG_FORMAT', '%(message)s')
__DATE_FORMAT = os.environ.get('EEN_LOG_DATE_FORMAT', '%m-%d %H:%M:%S')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': __LOG_FORMAT,
'datefmt': __DATE_FORMAT
},
'terse': {
'format': "%(message)s",
'datefmt': __DATE_FORMAT
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'stdout': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'verbose'
},
'stderr': {
'level': 'ERROR',
'class': 'logging.StreamHandler',
'stream': sys.stderr,
'formatter': 'verbose'
},
},
'loggers': {
# Any message not caught by a lower level logger OR
# Propagates the message to this level will be filtered at
# the root.
'': {
'handlers': ['stdout'],
'level': os.environ.get('LOGLEVEL_ROOT', 'DEBUG'),
'propagate': False,
},
'webapp': {
'handlers': ['stdout'],
'level': os.environ.get('LOGLEVEL_WEBAPP', 'DEBUG'),
'propagate': False,
},
'stefani': {
'handlers': ['stdout'],
'level': os.environ.get('LOGLEVEL_STEFANI', 'DEBUG'),
'propagate': False,
},
'stefani.web': {
'handlers': ['stdout'],
'level': os.environ.get('LOGLEVEL_WEBAPP', 'DEBUG'),
'propagate': False,
},
}
}
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FruitSourceGenerator:
def __init__(self, use_fruit_2_x_syntax=False):
self.use_fruit_2_x_syntax = use_fruit_2_x_syntax
def _get_component_type(self, component_index):
if self.use_fruit_2_x_syntax:
return 'const fruit::Component<Interface{component_index}>&'.format(**locals())
else:
return 'fruit::Component<Interface{component_index}>'.format(**locals())
def generate_component_header(self, component_index):
component_type = self._get_component_type(component_index)
template = """
#ifndef COMPONENT{component_index}_H
#define COMPONENT{component_index}_H
#include <fruit/fruit.h>
struct Interface{component_index} {{
virtual ~Interface{component_index}() = default;
}};
{component_type} getComponent{component_index}();
#endif // COMPONENT{component_index}_H
"""
return template.format(**locals())
def generate_component_source(self, component_index, deps):
include_directives = ''.join(['#include "component%s.h"\n' % index for index in deps + [component_index]])
component_deps = ', '.join(['std::shared_ptr<Interface%s>' % dep for dep in deps])
if self.use_fruit_2_x_syntax:
install_expressions = ''.join([' .install(getComponent%s())\n' % dep for dep in deps])
else:
install_expressions = ''.join([' .install(getComponent%s)\n' % dep for dep in deps])
component_type = self._get_component_type(component_index)
template = """
{include_directives}
struct X{component_index} : public Interface{component_index} {{
INJECT(X{component_index}({component_deps})) {{}}
virtual ~X{component_index}() = default;
}};
"""
if self.use_fruit_2_x_syntax:
template += """
{component_type} getComponent{component_index}() {{
static {component_type} comp = fruit::createComponent(){install_expressions}
.bind<Interface{component_index}, X{component_index}>();
return comp;
}}
"""
else:
template += """
{component_type} getComponent{component_index}() {{
return fruit::createComponent(){install_expressions}
.bind<Interface{component_index}, X{component_index}>();
}}
"""
return template.format(**locals())
def generate_main(self, toplevel_component):
if self.use_fruit_2_x_syntax:
return self.generate_main_with_fruit_2_x_syntax(toplevel_component)
else:
return self.generate_main_with_fruit_3_x_syntax(toplevel_component)
def generate_main_with_fruit_2_x_syntax(self, toplevel_component):
template = """
#include "component{toplevel_component}.h"
#include <ctime>
#include <iostream>
#include <cstdlib>
#include <iomanip>
#include <chrono>
using namespace std;
int main(int argc, char* argv[]) {{
if (argc != 2) {{
std::cout << "Need to specify num_loops as argument." << std::endl;
exit(1);
}}
size_t num_loops = std::atoi(argv[1]);
double componentCreationTime = 0;
double componentNormalizationTime = 0;
std::chrono::high_resolution_clock::time_point start_time;
for (size_t i = 0; i < 1 + num_loops/100; i++) {{
start_time = std::chrono::high_resolution_clock::now();
fruit::Component<Interface{toplevel_component}> component(getComponent{toplevel_component}());
componentCreationTime += std::chrono::duration_cast<std::chrono::duration<double>>(std::chrono::high_resolution_clock::now() - start_time).count();
start_time = std::chrono::high_resolution_clock::now();
fruit::NormalizedComponent<Interface{toplevel_component}> normalizedComponent(std::move(component));
componentNormalizationTime += std::chrono::duration_cast<std::chrono::duration<double>>(std::chrono::high_resolution_clock::now() - start_time).count();
}}
start_time = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < 1 + num_loops/100; i++) {{
fruit::Injector<Interface{toplevel_component}> injector(getComponent{toplevel_component}());
injector.get<std::shared_ptr<Interface{toplevel_component}>>();
}}
double fullInjectionTime = std::chrono::duration_cast<std::chrono::duration<double>>(std::chrono::high_resolution_clock::now() - start_time).count();
// The cast to Component<Interface{toplevel_component}> is needed for Fruit<2.1.0, where the constructor of
// NormalizedComponent only accepted a Component&&.
fruit::NormalizedComponent<Interface{toplevel_component}> normalizedComponent{{fruit::Component<Interface{toplevel_component}>{{getComponent{toplevel_component}()}}}};
start_time = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < num_loops; i++) {{
fruit::Injector<Interface{toplevel_component}> injector(normalizedComponent, fruit::Component<>(fruit::createComponent()));
injector.get<std::shared_ptr<Interface{toplevel_component}>>();
}}
double perRequestTime = std::chrono::duration_cast<std::chrono::duration<double>>(std::chrono::high_resolution_clock::now() - start_time).count();
std::cout << std::fixed;
std::cout << std::setprecision(15);
std::cout << "componentNormalizationTime = " << componentNormalizationTime * 100 / num_loops << std::endl;
std::cout << "Total for setup = " << (componentCreationTime + componentNormalizationTime) * 100 / num_loops << std::endl;
std::cout << "Full injection time = " << fullInjectionTime * 100 / num_loops << std::endl;
std::cout << "Total per request = " << perRequestTime / num_loops << std::endl;
return 0;
}}
"""
return template.format(**locals())
def generate_main_with_fruit_3_x_syntax(self, toplevel_component):
template = """
#include "component{toplevel_component}.h"
#include <ctime>
#include <iostream>
#include <cstdlib>
#include <iomanip>
#include <chrono>
using namespace std;
fruit::Component<> getEmptyComponent() {{
return fruit::createComponent();
}}
int main(int argc, char* argv[]) {{
if (argc != 2) {{
std::cout << "Need to specify num_loops as argument." << std::endl;
exit(1);
}}
size_t num_loops = std::atoi(argv[1]);
std::chrono::high_resolution_clock::time_point start_time = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < 1 + num_loops/100; i++) {{
fruit::NormalizedComponent<Interface{toplevel_component}> normalizedComponent(getComponent{toplevel_component});
(void)normalizedComponent;
}}
double componentNormalizationTime = std::chrono::duration_cast<std::chrono::duration<double>>(std::chrono::high_resolution_clock::now() - start_time).count();
start_time = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < 1 + num_loops/100; i++) {{
fruit::Injector<Interface{toplevel_component}> injector(getComponent{toplevel_component});
injector.get<std::shared_ptr<Interface{toplevel_component}>>();
}}
double fullInjectionTime = std::chrono::duration_cast<std::chrono::duration<double>>(std::chrono::high_resolution_clock::now() - start_time).count();
fruit::NormalizedComponent<Interface{toplevel_component}> normalizedComponent(getComponent{toplevel_component});
start_time = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < num_loops; i++) {{
fruit::Injector<Interface{toplevel_component}> injector(normalizedComponent, getEmptyComponent);
injector.get<std::shared_ptr<Interface{toplevel_component}>>();
}}
double perRequestTime = std::chrono::duration_cast<std::chrono::duration<double>>(std::chrono::high_resolution_clock::now() - start_time).count();
std::cout << std::fixed;
std::cout << std::setprecision(15);
std::cout << "componentNormalizationTime = " << componentNormalizationTime * 100 / num_loops << std::endl;
std::cout << "Total for setup = " << componentNormalizationTime * 100 / num_loops << std::endl;
std::cout << "Full injection time = " << fullInjectionTime * 100 / num_loops << std::endl;
std::cout << "Total per request = " << perRequestTime / num_loops << std::endl;
return 0;
}}
"""
return template.format(**locals())
|
# variable length argument
def add(*args):
sum = 0
for i in args:
sum+=i
return sum
# Keyword length argument
def what_to_do(farg,**kwargs):
sum = 0
sub = 0
if farg=='sum':
for i,j in kwargs.items():
sum+=j
if farg=='sub':
for k,v in kwargs.items():
sub-=v
if sum!=0:
return sum
else: return sub
|
# This example is provided for informational purposes only and has not been audited for security.
from pyteal import *
"""Split Payment"""
tmpl_fee = Int(1000)
tmpl_rcv1 = Addr("6ZHGHH5Z5CTPCF5WCESXMGRSVK7QJETR63M3NY5FJCUYDHO57VTCMJOBGY")
tmpl_rcv2 = Addr("7Z5PWO2C6LFNQFGHWKSK5H47IQP5OJW2M3HA2QPXTY3WTNP5NU2MHBW27M")
tmpl_own = Addr("5MK5NGBRT5RL6IGUSYDIX5P7TNNZKRVXKT6FGVI6UVK6IZAWTYQGE4RZIQ")
tmpl_ratn = Int(1)
tmpl_ratd = Int(3)
tmpl_min_pay = Int(1000)
tmpl_timeout = Int(3000)
def split(tmpl_fee=tmpl_fee,
tmpl_rcv1=tmpl_rcv1,
tmpl_rcv2=tmpl_rcv2,
tmpl_own=tmpl_own,
tmpl_ratn=tmpl_ratn,
tmpl_ratd=tmpl_ratd,
tmpl_min_pay=tmpl_min_pay,
tmpl_timeout=tmpl_timeout):
split_core = And(
Txn.type_enum() == TxnType.Payment,
Txn.fee() < tmpl_fee,
Txn.rekey_to() == Global.zero_address()
)
split_transfer = And(
Gtxn[0].sender() == Gtxn[1].sender(),
Txn.close_remainder_to() == Global.zero_address(),
Gtxn[0].receiver() == tmpl_rcv1,
Gtxn[1].receiver() == tmpl_rcv2,
Gtxn[0].amount() == ((Gtxn[0].amount() + Gtxn[1].amount()) * tmpl_ratn) / tmpl_ratd,
Gtxn[0].amount() == tmpl_min_pay
)
split_close = And(
Txn.close_remainder_to() == tmpl_own,
Txn.receiver() == Global.zero_address(),
Txn.amount() == Int(0),
Txn.first_valid() > tmpl_timeout
)
split_program = And(
split_core,
If(Global.group_size() == Int(2),
split_transfer,
split_close
)
)
return split_program
if __name__ == "__main__":
print(compileTeal(split(), Mode.Signature))
|
from pyspark import keyword_only
from pyspark.ml.param import TypeConverters
from pyspark.ml.param.shared import Param, Params
from pyspark.ml.util import DefaultParamsReadable, DefaultParamsWritable
from pyspark.ml.wrapper import JavaTransformer
class OneHotDecoder(JavaTransformer, DefaultParamsReadable, DefaultParamsWritable):
"""
OneHotDecoder Custom Scala / Python Wrapper
"""
_classpath = 'com.clarifyhealth.ohe.decoder.OneHotDecoder'
oheSuffix = Param(Params._dummy(), 'oheSuffix', 'oheSuffix', typeConverter=TypeConverters.toString)
idxSuffix = Param(Params._dummy(), 'idxSuffix', 'idxSuffix', typeConverter=TypeConverters.toString)
unknownSuffix = Param(Params._dummy(), 'unknownSuffix', 'unknownSuffix', typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, oheSuffix=None, idxSuffix=None, unknownSuffix=None):
super(OneHotDecoder, self).__init__()
self._java_obj = self._new_java_obj(OneHotDecoder._classpath, self.uid)
self._setDefault(oheSuffix="_OHE", idxSuffix="_IDX", unknownSuffix="Unknown")
kwargs = self._input_kwargs
self.setParams(**kwargs)
# noinspection PyPep8Naming
@keyword_only
def setParams(self, oheSuffix=None, idxSuffix=None, unknownSuffix=None):
"""
Set the params for the OneHotDecoder
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
# noinspection PyPep8Naming,PyMissingOrEmptyDocstring
def setOheSuffix(self, value):
self._set(oheSuffix=value)
return self
# noinspection PyPep8Naming,PyMissingOrEmptyDocstring
def getOheSuffix(self):
return self.getOrDefault(self.oheSuffix)
# noinspection PyPep8Naming,PyMissingOrEmptyDocstring
def setIdxSuffix(self, value):
self._set(idxSuffix=value)
return self
# noinspection PyPep8Naming,PyMissingOrEmptyDocstring
def getIdxSuffix(self):
return self.getOrDefault(self.idxSuffix)
# noinspection PyPep8Naming,PyMissingOrEmptyDocstring
def setUnknownSuffix(self, value):
self._set(unknownSuffix=value)
return self
# noinspection PyPep8Naming,PyMissingOrEmptyDocstring
def getUnknownSuffix(self):
return self.getOrDefault(self.unknownSuffix)
# noinspection PyPep8Naming,PyMissingOrEmptyDocstring
def getName(self):
return "one_hot_decoder"
|
# -*- coding: utf-8 -*-
import requests
import json
import random
from twitchio.ext import commands
import config
bot = commands.Bot(
irc_token=config.OAUTH,
client_id=config.CLIENT_ID,
nick=config.BOT_NAME,
prefix=config.BOT_PREFIX,
initial_channels=[config.CHANNEL]
)
bms_list = [{'url': 'http://mirai-yokohama.sakura.ne.jp/bms/data.json', 'prefix': '★'},
{'url': 'https://stellabms.xyz/sl/score.json', 'prefix': 'sl'},
{'url': 'https://stellabms.xyz/st/score.json', 'prefix': 'st'}
]
def random_select(bms, filter=None):
res = requests.get(bms['url'])
if not res.status_code == requests.codes.ok:
return f"Error. status = {res.status_code}"
jsonData = res.json()
if filter:
jsonData = [x for x in jsonData if x['level'] == filter]
if jsonData == []:
return f"Error. Maybe wrong difficulty?"
i = random.randrange(len(jsonData))
return f"{bms['prefix']}{jsonData[i]['level']} {jsonData[i]['title']}"
async def channel_send(ctx, bms):
if ' ' in ctx.content:
song = random_select(bms, ctx.content.split()[1])
else:
song = random_select(bms)
message = f"Random Select -> [{song}] from {ctx.author.name}"
await ctx.channel.send(message)
@bot.event
async def event_ready():
print(f"{config.BOT_NAME}(insane) is online.")
@bot.command(name='insane')
async def insane(ctx):
await channel_send(ctx, bms_list[0])
@bot.command(name='sl')
async def satellite(ctx):
await channel_send(ctx, bms_list[1])
@bot.command(name='st')
async def stella(ctx):
await channel_send(ctx, bms_list[2])
if __name__ == '__main__':
bot.run()
|
from uuid import UUID
import pytest
import requests
from exporter.applications.forms import goods
application_id = UUID("2a199722-75fc-4699-abfc-ddfb30381c0f")
sub_case_type_siel = {"key": "standard", "value": "Standard Licence"}
@pytest.fixture(scope="session")
def good_template():
return {
"component_details": None,
"control_list_entries": [],
"id": "7aa98481-c547-448c-9fd3-1d22d0eb7c33",
"information_security_details": None,
"is_component": None,
"is_good_controlled": {"key": "False", "value": "No"},
"is_military_use": None,
"is_pv_graded": {"key": "no", "value": "No"},
"modified_military_use_details": None,
"part_number": "",
"pv_grading_details": None,
"software_or_technology_details": None,
"uses_information_security": None,
# to be set by fixture
"item_category": {},
"name": "good",
"description": "",
"firearm_details": {},
}
@pytest.fixture(scope="session")
def good_ammo(good_template):
return {
**good_template,
"name": "Ammunition",
"description": "box of 25 game shot cartridges",
"item_category": {"key": "group2_firearms", "value": "Firearms"},
"firearm_details": {
"calibre": "16 gramme",
"has_identification_markings": False,
"has_proof_mark": None,
"identification_markings_details": None,
"is_covered_by_firearm_act_section_one_two_or_five": False,
"no_identification_markings_details": "it's ammo",
"no_proof_mark_details": "",
"section_certificate_date_of_expiry": None,
"section_certificate_number": None,
"type": {"key": "ammunition", "value": "Ammunition"},
"year_of_manufacture": 2019,
},
}
@pytest.fixture(scope="session")
def good_shotgun(good_template):
return {
**good_template,
"name": "Shotgun",
"description": "A shotgun",
"item_category": {"key": "group2_firearms", "value": "Firearms"},
"firearm_details": {
"calibre": "12 guage",
"has_identification_markings": False,
"has_proof_mark": None,
"identification_markings_details": None,
"is_covered_by_firearm_act_section_one_two_or_five": False,
"no_identification_markings_details": "dd",
"no_proof_mark_details": "",
"section_certificate_date_of_expiry": None,
"section_certificate_number": None,
"type": {"key": "firearms", "value": "Firearms"},
"year_of_manufacture": 2020,
},
}
@pytest.fixture(scope="session")
def good_gun_barrel(good_template):
return {
**good_template,
"name": "Gun barrel",
"description": "A barrel",
"item_category": {"key": "group2_firearms", "value": "Firearms"},
"firearm_details": {
"calibre": "12 guage",
"has_identification_markings": False,
"has_proof_mark": None,
"identification_markings_details": None,
"is_covered_by_firearm_act_section_one_two_or_five": False,
"no_identification_markings_details": "foo",
"no_proof_mark_details": "",
"section_certificate_date_of_expiry": None,
"section_certificate_number": None,
"type": {"key": "components_for_firearms", "value": "Components for firearms"},
"year_of_manufacture": 2020,
},
}
@pytest.fixture(scope="session")
def good_widget(good_template):
return {
**good_template,
"name": "Widget",
"description": "A widget",
"item_category": {"key": "group1_components", "value": "Components, modules or accessories of something"},
}
@pytest.fixture
def default_request(rf, client):
request = rf.get("/")
request.session = client.session
request.requests_session = requests.Session()
return request
def test_good_on_application_form_ammunition(default_request, good_ammo, mock_units):
form = goods.unit_quantity_value(
request=default_request, good=good_ammo, sub_case_type=sub_case_type_siel, application_id=application_id,
)
assert len(form.questions) == 8
assert form.questions[-2].title == goods.firearm_proof_mark_field().title
assert form.questions[-1].title == goods.firearm_is_deactivated_field().title
def test_good_on_application_form_firearm(default_request, good_shotgun, mock_units):
form = goods.unit_quantity_value(
request=default_request, good=good_shotgun, sub_case_type=sub_case_type_siel, application_id=application_id,
)
assert len(form.questions) == 8
assert form.questions[-2].title == goods.firearm_proof_mark_field().title
assert form.questions[-1].title == goods.firearm_is_deactivated_field().title
def test_good_on_application_form_firearm_component(default_request, good_gun_barrel, mock_units):
form = goods.unit_quantity_value(
request=default_request, good=good_gun_barrel, sub_case_type=sub_case_type_siel, application_id=application_id,
)
assert len(form.questions) == 8
assert form.questions[-2].options[0].components[0].title == goods.firearm_proof_mark_field().title
assert form.questions[-1].title == goods.firearm_is_deactivated_field().title
def test_good_on_application_form_not_firearm(default_request, good_widget, mock_units):
form = goods.unit_quantity_value(
request=default_request, good=good_widget, sub_case_type=sub_case_type_siel, application_id=application_id,
)
assert len(form.questions) == 6
assert form.questions[-1].title != goods.firearm_proof_mark_field().title
|
from __future__ import print_function, division
import time
import datetime
import argparse
from pathlib import Path
import numpy as np
from cornell_dataset import CornellDataset, ToTensor, Normalize, de_normalize
def parse_arguments():
parser = argparse.ArgumentParser(description='Grasping detection system')
parser.add_argument('--src_path', type=str,
help='Path to the src folder with all the orthographic images.')
parser.add_argument('--output_path', type=str,
help='Path to the folder where all the final images will be saved.')
args = parser.parse_args()
return args.src_path, args.output_path
if __name__ == '__main__':
SRC_PATH, OUTPUT_PATH = parse_arguments()
if SRC_PATH is None:
SRC_PATH = Path.cwd() / 'ortographic_modelnet10_dataset_gray_images'
print(f"No path was given. Using {SRC_PATH} as src path for the images.")
else:
SRC_PATH = Path(SRC_PATH)
if OUTPUT_PATH is None:
OUTPUT_PATH = Path.cwd() / "result_img"
print(f"No path was given. Using {OUTPUT_PATH} as src path where all the final images will be saved.")
else:
OUTPUT_PATH = Path(OUTPUT_PATH)
# Make sure output exists
if not OUTPUT_PATH.exists():
Path.mkdir(OUTPUT_PATH, parents=True)
# Orth images data loader
# TODO
# Create model
# TODO
# print(model.model)
# For each image, plot the predicted rectangle and save it to OUTPUT_PATH
# TODO
print("End of testing orthogonal projection images. Byeeee :D!")
|
import matplotlib.pyplot as plt
import datetime as dt
import time as ti
from astropy import time
from astropy import units as u
from poliastro.neos import neows
from poliastro.examples import molniya
from poliastro.plotting import plot, OrbitPlotter, BODY_COLORS
from poliastro.bodies import Sun, Earth, Mars
from poliastro.twobody import Orbit
date = time.Time("2018-02-07 12:00", scale='utc')
start=dt.datetime(2018, 2, 1, 12, 0)
length=1
days_dt=[dt.datetime(2018, 2, 1, 12, 0)+dt.timedelta(days=1*n) for n in range(length)]
days_as=[time.Time(day, scale='tdb') for day in days_dt]
op = OrbitPlotter(num_points=1000)
r_p = Sun.R + 165 * u.km
r_a = Sun.R + 215 * u.km
a = (r_p + r_a) / 2
roadster=Orbit.from_classical(attractor=Sun,
a=0.9860407221838553 * u.AU,
ecc=0.2799145376150214*u.one,
inc=1.194199764898942*u.deg,
raan=49*u.deg,
argp=286*u.deg,
nu=23*u.deg,
epoch=date)
for date in days_as:
apophis_orbit = neows.orbit_from_name('99942')
spacex = neows.orbit_from_name('-143205')
op.orbits.clear()
earth = Orbit.from_body_ephem(Earth, date)
mars = Orbit.from_body_ephem(Mars, date)
op.plot(earth, label=Earth)
op.plot(mars, label=Mars)
op.plot(roadster, label='Roadster')
op.plot(apophis_orbit, label='Apophis')
op._redraw()
plt.pause(0.01)
input('type to exit')
op.plot(Orbit.from_body_ephem(Mars, time.Time("2018-07-28 12:00", scale='utc')), label=Mars)
|
import datetime
import functools
import logging
import os
import sys
from collections import namedtuple
from datetime import date, timedelta
from logging import handlers
from time import sleep
def retry(exceptions, tries=2, wait=None):
""" Decorator factory creates retry-decorators which repeats the function
execution until it finally executes without throwing an exception
or until the max number of attempts <tries> is reached.
If <wait> is provided, the process waits that amount of seconds before
going for the next attempt.
"""
def decorator(f):
@functools.wraps(f)
def protegee(*args, **kwargs):
for attempt in range(tries):
try:
return f(*args, **kwargs)
except exceptions:
if attempt == tries - 1: # Exception in final attempt
raise
if wait is not None:
sleep(wait)
return protegee
return decorator
def get_fallbacker(logger, default=None, exceptions=RuntimeError):
""" copied from the interface (doq) !!! """
def fallbacker_(f):
@functools.wraps(f)
def fallbacked(*args, **kwargs):
try:
return f(*args, **kwargs)
except exceptions:
logger.error(
'Failed executing {}.{}\n'
'Positional arguments: {}\nKeyword arguments: {}'.format(
f.__module__,
f.__name__,
', '.join(map(str, args)),
', '.join(['({}, {})'.format(str(k), str(v))
for k, v in kwargs.items()])
),
exc_info=True
)
if callable(default):
return default(*args, **kwargs)
return default
return fallbacked
return fallbacker_
def _seconds_until_tomorrow():
now = datetime.datetime.now()
time_of_awakening = datetime.datetime.combine(
(now + datetime.timedelta(1)).date(),
datetime.time(0, 0))
return (time_of_awakening - now).seconds
def wait_until_tomorrow(goodwill=0):
""" Does what it name says it does.
:param goodwill: Some extra minutes to wait, to make sure everybody agrees
that the next day has arrived.
"""
sleep(_seconds_until_tomorrow() + goodwill * 60)
def get_file_content(file_path, **kwargs):
with open(file_path, **kwargs) as f:
return f.read()
class TwoWay:
def __init__(self, names, pair_list=None):
"""
:param names: tuple of strings, meaning the names of each "column"
:param pair_list: List of tuples
"""
self.left_name, self.right_name = names
self.d = {
names[0]: dict(),
names[1]: dict()
}
for l, r in pair_list or tuple():
self.set(**{self.left_name: l, self.right_name: r})
def keys(self, column):
return self.d[column].keys()
def set(self, **kw):
self.d[self.left_name][kw[self.left_name]] = kw[self.right_name]
self.d[self.right_name][kw[self.right_name]] = kw[self.left_name]
def get(self, **kwargs):
key, value = list(kwargs.items())[0]
return self.d[key][value]
def __repr__(self):
return 'TwoWay(("{}", "{}"), {})'.format(
self.left_name,
self.right_name,
str(list((left, right)
for left, right in self.d[self.left_name].items()))
)
def items(self):
return self.d[self.left_name].items()
class FullMonth(namedtuple('FM', ['year', 'month'])):
@property
def ultimo(self) -> date:
return self.next().first - timedelta(1)
@property
def first(self) -> date:
return date(self.year, self.month, 1)
def next(self):
if self.month == 12:
return FullMonth(self.year + 1, 1)
return FullMonth(self.year, self.month + 1)
def previous(self):
if self.month == 1:
return FullMonth(self.year - 1, 12)
return FullMonth(self.year, self.month - 1)
@classmethod
def instantiate(cls, v):
if type(v) is str:
try:
return cls(int(v[0:4]), int(v[4:6]))
except IndexError:
raise ValueError(f'Could not parse {v} as FullMonth')
if type(v) is date:
return cls(v.year, v.month)
class SwingingFileLogger(logging.Logger):
formatter = logging.Formatter(
'%(levelname)s %(asctime)s %(module)s.%(funcName)s: %(message)s')
_instance = None
@classmethod
def get(cls, name, file_path=None):
self = cls._instance or cls('sfl', logging.INFO)
if cls._instance is None:
cls._instance = self
for handler in self.handlers[:]:
self.removeHandler(handler)
self.addHandler(cls.get_handler(name, file_path))
return self
@classmethod
def get_handler(cls, name, file_path=None):
if file_path:
handler = handlers.TimedRotatingFileHandler(
os.path.join(file_path, '{}.log'.format(name)),
interval=4,
backupCount=5,
encoding='utf-8'
)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(cls.formatter)
return handler
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 09:53:33 2021
@author: ljia
"""
if __name__ == '__main__':
tasks = [
{'path': '.',
'file': 'run_job_treelet.py'
},
]
import os
for t in tasks:
print(t['file'])
command = ''
command += 'cd ' + t['path'] + '\n'
command += 'python3 ' + t['file'] + '\n'
# command += 'cd ' + '/'.join(['..'] * len(t['path'].split('/'))) + '\n'
os.system(command)
|
def mdc(m,n):
if n==0: return m
return mdc(n,m%n)
for k in range(int(input())):
A = input().split()
op = A[3]
A[0] = int(A[0])
A[1] = int(A[2])
A[2] = int(A[4])
A[3] = int(A[6])
res = [[],[]]
if op == '+':
res[0] = A[0]*A[3]+A[2]*A[1]
res[1] = A[1]*A[3]
elif op == '-':
res[0] = A[0]*A[3]-A[2]*A[1]
res[1] = A[1]*A[3]
elif op == '*':
res[0] = A[0]*A[2]
res[1] = A[1]*A[3]
else:
res[0] = A[0]*A[3]
res[1] = A[2]*A[1]
m = mdc(res[0],res[1])
print("%d/%d = %d/%d" % (res[0],res[1],res[0]/m,res[1]/m))
|
"""
Tools for narrating the screenplay.
"""
from .narrator import Narrator
__all__ = [
"Narrator",
]
|
from bitmovin_api_sdk.encoding.encodings.muxings.ts.drm.aes.aes_api import AesApi
from bitmovin_api_sdk.encoding.encodings.muxings.ts.drm.aes.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.encodings.muxings.ts.drm.aes.aes_encryption_drm_list_query_params import AesEncryptionDrmListQueryParams
|
from .goal_emb import *
from .st_emb import *
from .st_enc import *
from .st_search import *
from .tac_sc import *
from .thm_emb import *
from .thm_sc import *
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-02-22 21:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('awards', '0068_merge_20170216_1631'),
]
operations = [
migrations.AddField(
model_name='financialaccountsbyawards',
name='transaction_obligated_amount',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=21, null=True),
)
]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from toscaparser.elements.statefulentitytype import StatefulEntityType
class ArtifactTypeDef(StatefulEntityType):
'''TOSCA built-in artifacts type.'''
def __init__(self, atype, custom_def=None):
super(ArtifactTypeDef, self).__init__(atype, self.ARTIFACT_PREFIX,
custom_def)
self.type = atype
self.properties = None
if self.PROPERTIES in self.defs:
self.properties = self.defs[self.PROPERTIES]
self.parent_artifacts = self._get_parent_artifacts()
def _get_parent_artifacts(self):
artifacts = {}
parent_artif = self.parent_type
if parent_artif:
while parent_artif != 'tosca.artifacts.Root':
artifacts[parent_artif] = self.TOSCA_DEF[parent_artif]
parent_artif = artifacts[parent_artif]['derived_from']
return artifacts
@property
def parent_type(self):
'''Return an artifact this artifact is derived from.'''
return self.derived_from(self.defs)
def get_artifact(self, name):
'''Return the definition of an artifact field by name.'''
if name in self.defs:
return self.defs[name]
|
# Copyright 2013 by Eric Suh
# Copyright (c) 2013 Theo Crevon
# This code is freely licensed under the MIT license found at
# <http://opensource.org/licenses/MIT>
import sys
import os
import errno
import atexit
import signal
import time
import pidfile
class daemon(object):
'Context manager for POSIX daemon processes'
def __init__(self,
pidfile=None,
workingdir='/',
umask=0,
stdin=None,
stdout=None,
stderr=None,
):
self.pidfile = pidfile
self.workingdir = workingdir
self.umask = umask
devnull = os.open(os.devnull, os.O_RDWR)
self.stdin = stdin.fileno() if stdin is not None else devnull
self.stdout = stdout.fileno() if stdout is not None else devnull
self.stderr = stderr.fileno() if stderr is not None else self.stdout
def __enter__(self):
self.daemonize()
return
def __exit__(self, exc_type, exc_value, exc_traceback):
self.stop()
return
def daemonize(self):
'''Set up a daemon.
There are a few major steps:
1. Changing to a working directory that won't go away
2. Changing user permissions mask
3. Forking twice to detach from terminal and become new process leader
4. Redirecting standard input/output
5. Creating a PID file'''
# Set up process conditions
os.chdir(self.workingdir)
os.umask(self.umask)
# Double fork to daemonize
_getchildfork(1)
os.setsid()
_getchildfork(2)
# Redirect standard input/output files
sys.stdin.flush()
sys.stdout.flush()
sys.stderr.flush()
os.dup2(self.stdin, sys.stdin.fileno())
os.dup2(self.stdout, sys.stdout.fileno())
os.dup2(self.stderr, sys.stderr.fileno())
# Create PID file
if self.pidfile is not None:
pid = str(os.getpid())
try:
pidfile.make_pidfile(self.pidfile, pid)
except pidfile.PIDFileError as e:
sys.stederr.write('Creating PID file failed. ({})'.format(e))
os._exit(os.EX_OSERR)
atexit.register(self.stop)
def stop(self):
if self.pidfile is not None:
pid = pidfile.readpid(self.pidfile)
try:
while True:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError as e:
if e.errno == errno.ESRCH:
pidfile.remove_pidfile(self.pidfile)
else:
raise
def _getchildfork(n):
try:
pid = os.fork()
if pid > 0:
sys.exit(os.EX_OK) # Exit in parent
except OSError as e:
sys.stederr.write('Fork #{} failed: {} ({})\n'.format(
n, e.errno, e.strerror))
os._exit(os.EX_OSERR)
|
# I like this version FizzBuzz better than the standard :P
# sum of first "n" multiples of "d" (limited case of arithmetic series sum)
def sum_of_n_muls(d, n):
# (2*a1+(n-1)*d)*n/2 = (2*d+(n-1)*d)*n/2 = ((n+1)*d)*n/2
return (n+1)*d*n/2
# sum of all multiples of "d" below positive integer "number"
def sum_muls_below(d, number):
return sum_of_n_muls(d, (number-1)//d)
def solution(number):
return sum_muls_below(3, number) + sum_muls_below(5, number) - sum_muls_below(15, number)
|
# -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
from elixir import using_options
from elixir import ManyToMany, ManyToOne
from elixir import Field, Integer, DateTime, UnicodeText
from nagare.database import session
import datetime
from kansha.models import Entity
class DataCard(Entity):
'''Card mapper'''
using_options(tablename='card')
title = Field(UnicodeText)
index = Field(Integer)
creation_date = Field(DateTime, default=datetime.datetime.utcnow)
column = ManyToOne('DataColumn')
def update(self, other):
self.title = other.title
self.index = other.index
session.flush()
@property
def archived(self):
return self.column.archive
class DummyDataCard(object):
def __init__(self, title='dummy card', creation_date=datetime.datetime.utcnow()):
self.title = title
self.creation_date = creation_date
self.index = 0
def update(self, other):
print 'update!'
@property
def archived(self):
return False
|
import regex as re
def remove_article(text):
return re.sub('^l[ea] ', '', text)
def remove_feminine_infl(text):
text = re.sub('(?<=\w+)\(e\)(?!\w)', '', text)
text = re.sub('(?<=\w+)\(ne\)(?!\w)', '', text)
return text
|
from queue import Queue
import logging
log = logging.getLogger(__name__)
class DependencyMapper:
SUMMARY_LOG_MESSAGE = "Dependencies tree for package {name}-{version} retrieved.{count} dependencies were found."
def __init__(self, cache):
self.cache = cache
@staticmethod
def _create_package_dict(name, version):
return {
'name': name,
'version': version
}
@staticmethod
def _get_explicit_version(package_version):
# TODO: Better handle semantic versioning and conditions - ^, ~, ||.
# and things like:
# {
# "name": "safer-buffer",
# "version": ">= 2.1.2 < 3"
# }
# For now I'm only extracting the first version number I find
version = package_version.replace('~', '').replace('^', '')
return version.strip('><= .|&').split(' ')[0]
def get_dependencies_tree_for_package(self, name, version):
log.debug("Retrieving dependencies tree for package {}-{}...".format(name, version))
dependencies_queue = Queue()
total_num_of_dependencies = 0
root_package = self._create_package_dict(name, self._get_explicit_version(version))
dependencies_queue.put(root_package)
while not dependencies_queue.empty():
package = dependencies_queue.get()
package_dependencies = self.cache.get(package['name'], package['version'])
sub_dependencies = list()
for dep_name, dep_version in package_dependencies.items():
dep_version = self._get_explicit_version(dep_version)
child = self._create_package_dict(dep_name, dep_version)
dependencies_queue.put(child)
sub_dependencies.append(child)
# If the package has dependencies, add them to the package object
if sub_dependencies:
package['dependencies'] = sub_dependencies
total_num_of_dependencies += len(sub_dependencies)
log.debug(self.SUMMARY_LOG_MESSAGE.format(name=name, version=version, count=total_num_of_dependencies))
# Return just the dependencies, or an empty list
return root_package.get('dependencies', list())
|
# -*- coding: utf-8 -*-
"""Authentication methods."""
from typing import List
from ..exceptions import AlreadyLoggedIn
from ..http import Http
from .models import Mixins
class ApiKey(Mixins):
"""Authentication method using API key & API secret."""
def __init__(self, http: Http, key: str, secret: str, **kwargs):
"""Authenticate using API key & API secret.
Args:
http: HTTP client to use to send requests
key: API key to use in credentials
secret: API secret to use in credentials
"""
creds = {"key": key, "secret": secret}
super().__init__(http=http, creds=creds, **kwargs)
def login(self):
"""Login to API."""
if self.is_logged_in:
raise AlreadyLoggedIn(f"Already logged in on {self}")
self.http.session.headers["api-key"] = self._creds["key"]
self.http.session.headers["api-secret"] = self._creds["secret"]
self._validate()
self._logged_in = True
self.LOG.debug(f"Successfully logged in using {self._cred_fields}")
def logout(self):
"""Logout from API."""
super().logout()
@property
def _cred_fields(self) -> List[str]:
"""Credential fields used by this auth model."""
return ["key", "secret"]
def _logout(self):
"""Logout from API."""
self._logged_in = False
self.http.session.headers = {}
|
import simplejson as json
from bamboo.controllers.calculations import Calculations
from bamboo.controllers.datasets import Datasets
from bamboo.core.summary import SUMMARY
from bamboo.lib.jsontools import df_to_jsondict
from bamboo.lib.mongo import MONGO_ID
from bamboo.models.dataset import Dataset
from bamboo.tests.test_base import TestBase
def comparable(dframe):
return [reduce_precision(r) for r in df_to_jsondict(dframe)]
def reduce_precision(row):
return {k: round(v, 10) if isinstance(v, float) else v
for k, v in row.iteritems()}
class TestAbstractDatasets(TestBase):
NUM_COLS = 15
NUM_ROWS = 19
def setUp(self):
TestBase.setUp(self)
self.controller = Datasets()
self._file_name = 'good_eats.csv'
self._update_file_name = 'good_eats_update.json'
self._update_check_file_path = '%sgood_eats_update_values.json' % (
self.FIXTURE_PATH)
self.default_formulae = [
'amount',
'amount + 1',
'amount - 5',
]
def _put_row_updates(self, dataset_id=None, file_name=None, validate=True):
if not dataset_id:
dataset_id = self.dataset_id
if not file_name:
file_name = self._update_file_name
update = open('%s%s' % (self.FIXTURE_PATH, file_name), 'r').read()
result = json.loads(self.controller.update(dataset_id=dataset_id,
update=update))
if validate:
self.assertTrue(isinstance(result, dict))
self.assertTrue(Dataset.ID in result)
# set up the (default) values to test against
with open(self._update_check_file_path, 'r') as f:
self._update_values = json.loads(f.read())
def _load_schema(self):
return json.loads(
self.controller.info(self.dataset_id))[Dataset.SCHEMA]
def _check_dframes_are_equal(self, dframe1, dframe2):
rows1 = comparable(dframe1)
rows2 = comparable(dframe2)
self.__check_dframe_is_subset(rows1, rows2)
self.__check_dframe_is_subset(rows2, rows1)
def __check_dframe_is_subset(self, rows1, rows2):
for row in rows1:
self.assertTrue(row in rows2,
'\nrow:\n%s\n\nnot in rows2:\n%s' % (row, rows2))
def _post_calculations(self, formulae=[], group=None):
schema = self._load_schema()
controller = Calculations()
for idx, formula in enumerate(formulae):
name = 'calc_%d' % idx if not schema or\
formula in schema.keys() else formula
controller.create(self.dataset_id, formula=formula, name=name,
group=group)
def _test_summary_built(self, result):
# check that summary is created
self.assertTrue(isinstance(result, dict))
self.assertTrue(Dataset.ID in result)
self.dataset_id = result[Dataset.ID]
results = self.controller.summary(
self.dataset_id,
select=self.controller.SELECT_ALL_FOR_SUMMARY)
return self._test_summary_results(results)
def _test_summary_results(self, results):
results = json.loads(results)
self.assertTrue(isinstance(results, dict))
return results
def _test_aggregations(self, groups=['']):
results = json.loads(self.controller.aggregations(self.dataset_id))
self.assertTrue(isinstance(results, dict))
self.assertEqual(len(results.keys()), len(groups))
self.assertEqual(results.keys(), groups)
linked_dataset_id = results[groups[0]]
self.assertTrue(isinstance(linked_dataset_id, basestring))
# inspect linked dataset
return json.loads(self.controller.show(linked_dataset_id))
def _test_summary_no_group(self, results, dataset_id=None, group=None):
if not dataset_id:
dataset_id = self.dataset_id
group = [group] if group else []
result_keys = results.keys()
# minus the column that we are grouping on
self.assertEqual(len(result_keys), self.NUM_COLS - len(group))
columns = [col for col in
self.get_data(self._file_name).columns.tolist()
if not col in [MONGO_ID] + group]
dataset = Dataset.find_one(dataset_id)
labels_to_slugs = dataset.schema.labels_to_slugs
for col in columns:
slug = labels_to_slugs[col]
self.assertTrue(slug in result_keys,
'col (slug): %s in: %s' % (slug, result_keys))
self.assertTrue(SUMMARY in results[slug].keys())
|
import setuptools
setuptools.setup(use_scm_version={"write_to": "mbta_analysis/_version.py"})
|
# -*- coding: utf-8 -*-
# Copyright 2017-TODAY LasLabs Inc.
# License MIT (https://opensource.org/licenses/MIT).
import properties
from ..base_model import GeneralOverview, LinksModelWithImage
from .producer import Producer
from .strain import Strain
class AbstractItem(LinksModelWithImage):
"""Represents the base attributes for a saleable cannabis item."""
name = properties.String(
'Name of the item.',
)
barcode = properties.String(
'Link to the barcode for this item.',
)
producer = properties.Instance(
'Information about the producer that created the item.',
instance_class=Producer,
)
type = properties.String(
'Type of item.',
)
strain = properties.Instance(
'Strain that this item comes from.',
instance_class=Strain,
)
lab_test = properties.String(
'Link to the PDF containing lab test information for this item.',
)
thc = properties.String(
'Amount of `THC <https://www.cannabisreports.com/faq/'
'cannabis-community/what-is-thc-tetrahydrocannabinol>`_ in this '
'item.',
)
cbd = properties.String(
'Amount of `CBD <https://www.cannabisreports.com/faq/'
'cannabis-community/what-is-cbd-cannabidiol>`_ in this item.',
)
cannabis = properties.String(
'Milligrams of cannabis in this item.',
)
hash_oil = properties.String(
'Milligrams of hash oil in this item.',
)
reviews = properties.Instance(
'Object containing information on the reviews for the item.',
instance_class=GeneralOverview,
)
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Implementation of Bank interface using REST."""
from google.protobuf.json_format import Parse
from cosmpy.bank.interface import Bank
from cosmpy.common.rest_client import RestClient
from cosmpy.protos.cosmos.bank.v1beta1.query_pb2 import (
QueryAllBalancesRequest,
QueryAllBalancesResponse,
QueryBalanceRequest,
QueryBalanceResponse,
QueryDenomMetadataRequest,
QueryDenomMetadataResponse,
QueryDenomsMetadataRequest,
QueryDenomsMetadataResponse,
QueryParamsRequest,
QueryParamsResponse,
QuerySupplyOfRequest,
QuerySupplyOfResponse,
QueryTotalSupplyRequest,
QueryTotalSupplyResponse,
)
class BankRestClient(Bank):
"""Bank REST client."""
API_URL = "/cosmos/bank/v1beta1"
def __init__(self, rest_api: RestClient):
"""
Create bank rest client
:param rest_api: RestClient api
"""
self._rest_api = rest_api
def Balance(self, request: QueryBalanceRequest) -> QueryBalanceResponse:
"""
Queries balance of selected denomination from specific account
:param request: QueryBalanceRequest with address and denomination
:return: QueryBalanceResponse
"""
response = self._rest_api.get(
f"{self.API_URL}/balances/{request.address}/{request.denom}",
)
return Parse(response, QueryBalanceResponse())
def AllBalances(self, request: QueryAllBalancesRequest) -> QueryAllBalancesResponse:
"""
Queries balance of all denominations from specific account
:param request: QueryAllBalancesRequest with account address
:return: QueryAllBalancesResponse
"""
response = self._rest_api.get(
f"{self.API_URL}/balances/{request.address}", request, ["address"]
)
return Parse(response, QueryAllBalancesResponse())
def TotalSupply(self, request: QueryTotalSupplyRequest) -> QueryTotalSupplyResponse:
"""
Queries total supply of all denominations
:param request: QueryTotalSupplyRequest
:return: QueryTotalSupplyResponse
"""
response = self._rest_api.get(f"{self.API_URL}/supply")
return Parse(response, QueryTotalSupplyResponse())
def SupplyOf(self, request: QuerySupplyOfRequest) -> QuerySupplyOfResponse:
"""
Queries total supply of specific denomination
:param request: QuerySupplyOfRequest with denomination
:return: QuerySupplyOfResponse
"""
response = self._rest_api.get(f"{self.API_URL}/supply/{request.denom}")
return Parse(response, QuerySupplyOfResponse())
def Params(self, request: QueryParamsRequest) -> QueryParamsResponse:
"""
Queries the parameters of bank module
:param request: QueryParamsRequest
:return: QueryParamsResponse
"""
response = self._rest_api.get(f"{self.API_URL}/params")
return Parse(response, QueryParamsResponse())
def DenomMetadata(
self, request: QueryDenomMetadataRequest
) -> QueryDenomMetadataResponse:
"""
Queries the client metadata for all registered coin denominations
:param request: QueryDenomMetadataRequest with denomination
:return: QueryDenomMetadataResponse
"""
response = self._rest_api.get(f"{self.API_URL}/denoms_metadata/{request.denom}")
return Parse(response, QueryDenomMetadataResponse())
def DenomsMetadata(
self, request: QueryDenomsMetadataRequest
) -> QueryDenomsMetadataResponse:
"""
Queries the client metadata of a given coin denomination
:param request: QueryDenomsMetadataRequest
:return: QueryDenomsMetadataResponse
"""
response = self._rest_api.get(f"{self.API_URL}/denoms_metadata", request)
return Parse(response, QueryDenomsMetadataResponse())
|
# Declare and initialize variables
even_count=0
odd_count=0
even_sum=0
odd_sum=0
# Main loop
while True:
Int_input = input("Input an integer (Enter '0' to End Entry) --> ")
Int = int(Int_input)
# check for 0 and exit loop
if Int == 0:
print("Ending Input")
break
# Check for negative numbers and alert
elif Int < 0:
print("No negative integers")
# Find even numbers
elif Int % 2 == 0:
print("That's Even...")
even_sum = even_sum+Int
even_count = even_count+1
# Treat as odd by default
else:
print("That's Odd...")
odd_sum = odd_sum+Int
odd_count = odd_count+1
# Display Results
print("")
print("Results:")
print("Sum of even numbers entered :",even_sum,)
print("Sum of odd numbers entered :",odd_sum)
print("Number of even integers entered :",even_count)
print("Number of odd integers entered :",odd_count)
print("")
if even_sum > odd_sum:
print("Evens Win")
else:
print("Odds Win")
|
import sys
import shutil
import os
from os import path
from subprocess import call as execute
from setuptools.command.build_ext import build_ext
from setuptools import setup, find_packages, Extension
PACKAGE_NAME = "MulticoreTSNE"
VERSION = '0.1'
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
if 0 != os.system('cmake --version'):
sys.exit('\nError: Cannot find cmake. Install cmake, e.g. `pip install cmake`.')
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
SOURCE_DIR = ext.sourcedir
EXT_DIR = path.abspath(path.dirname(self.get_ext_fullpath(ext.name)))
BUILD_TEMP = self.build_temp
shutil.rmtree(BUILD_TEMP, ignore_errors=True)
os.makedirs(BUILD_TEMP)
# Run cmake
if 0 != execute(['cmake',
'-DCMAKE_BUILD_TYPE={}'.format('Debug' if self.debug else 'Release'),
'-DCMAKE_VERBOSE_MAKEFILE={}'.format(int(self.verbose)),
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY='{}'".format(EXT_DIR),
SOURCE_DIR], cwd=BUILD_TEMP):
sys.exit('\nERROR: Cannot generate Makefile. See above errors.')
# Run make
if 0 != execute('cmake --build . -- -j4', shell=True, cwd=BUILD_TEMP):
sys.exit('\nERROR: Cannot find make? See above errors.')
if __name__ == '__main__':
EXT_MODULES = []
if 'test' not in sys.argv:
EXT_MODULES = [CMakeExtension('MulticoreTSNE.MulticoreTSNE',
sourcedir='multicore_tsne')]
setup(
name=PACKAGE_NAME,
version=VERSION,
description='Multicore version of t-SNE algorithm.',
author="Dmitry Ulyanov (based on L. Van der Maaten's code)",
author_email='dmitry.ulyanov.msu@gmail.com',
url='https://github.com/DmitryUlyanov/Multicore-TSNE',
install_requires=[
'numpy',
'cffi'
],
packages=find_packages(),
include_package_data=True,
ext_modules=EXT_MODULES,
cmdclass={'build_ext': CMakeBuild},
extras_require={
'test': [
'scikit-learn',
'scipy',
],
},
test_suite='MulticoreTSNE.tests',
tests_require=['MulticoreTSNE[test]']
)
|
# *---------------------------------------------------------------
# * Copyright (c) 2018
# * Broadcom Corporation
# * All Rights Reserved.
# *---------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution. Neither the name of the Broadcom nor the names of
# contributors may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author Robert J. McMahon, Broadcom LTD
# Date April 2016
import re
import subprocess
import logging
import asyncio, sys
import time, datetime
import locale
import signal
import weakref
import os
import ssh_nodes
import collections
import math
import scipy
import scipy.spatial
import numpy as np
import tkinter
import matplotlib.pyplot as plt
import concurrent.futures
import functools
import csv
from datetime import datetime as datetime, timezone
from scipy import stats
from scipy.cluster import hierarchy
from scipy.cluster.hierarchy import linkage
from ssh_nodes import *
from math import floor
from collections import namedtuple, defaultdict, Counter
# See pages 10 and 13 of https://docs.google.com/document/d/1a2Vo0AUBMo1utUWYLkErSSqQM9sMf0D9FPfyni_e4Qk/edit#heading=h.9lme8ct208v3
#
# Probe points: Tx Path*
# T8Tx - Frame/Pkt generated timestamp at Application
# DhdT0 - DHD Driver xmits pkt to dongle
# DhdT5 - DHD receives Tx-Completion from FW
# FWT1 - Firmware sees the TxPost work item from host
# FWT2 - Firmware submits the TxPost work item to Mac tx DMA (after header conversion)
# FWT3 - Firmware processes TxStatus from uCode
# FWT4 - Firmware posts Tx-Completion message to host
# Rx Path*
# FWR1 - RxStatus TSF as reported by Ucode (time at which pkt was rxed by the MAC)
# FWR2 - Time at which Rx-Completion is posted to host.
# DRRx - DHD Driver process Rx-Completion and forwards Rxed pkt to Network Stack
# T8Rx - Frame/Packet Rxed by Application
class FlowPoint(object):
def __init__(self):
self.flowid = None
self.seqno = None
self._dhdt0gps = None
self._dhdt5gps = None
self._dhdr3gps = None
self._fwt1tsf = None
self._fwt2tsf = None
self._fwt3tsf = None
self._fwt4tsf = None
self._fwr1tsf = None
self._fwr2tsf = None
self.tsf_txdrift = 0
self.tsf_rxdrift = 0
self.tsfgps_txt0 = None
self.tsfgps_rxt0 = None
self.tsf_rxt0 = None
self.tsf_txt0 = None
# Type 3 below
self._pktfetch = None
self.media_ac_delay = None
self.rxdur = None
self.mac_suspend = None
self._txstatus = None
self.txencnt = None
self.datatxscnt = None
self.oactxscnt = None
self.rtstxcnt = None
self.ctsrxcnt = None
@property
def dhdr3gps(self) :
if self._dhdr3gps :
return self._dhdr3gps
else :
return None
@dhdr3gps.setter
def dhdr3gps(self, value) :
self._dhdr3gps = value * 1000
@property
def dhdt0gps(self) :
if self._dhdt0gps :
return self._dhdt0gps
else :
return None
@dhdt0gps.setter
def dhdt0gps(self, value) :
self._dhdt0gps = value * 1000
@property
def dhdt5gps(self) :
if self._dhdt5gps :
return self._dhdt5gps
else :
return None
@dhdt5gps.setter
def dhdt5gps(self, value) :
self._dhdt5gps = value * 1000
@property
def fwt1tsf(self) :
return self._fwt1tsf
@fwt1tsf.setter
def fwt1tsf(self, value) :
if value < self.tsf_txt0 :
value += (1<<32)
self._fwt1tsf = value
@property
def fwt2tsf(self) :
return self._fwt2tsf
@fwt2tsf.setter
def fwt2tsf(self, value) :
if value < self.tsf_txt0 :
value += (1<<32)
self._fwt2tsf = value
@property
def fwt3tsf(self) :
return self._fwt3tsf
@fwt3tsf.setter
def fwt3tsf(self, value) :
if value < self.tsf_txt0 :
value += (1<<32)
self._fwt3tsf = value
@property
def fwt4tsf(self) :
return self._fwt4tsf
@fwt4tsf.setter
def fwt4tsf(self, value) :
if value < self.tsf_txt0 :
value += (1<<32)
self._fwt4tsf = value
@property
def fwr1tsf(self) :
return self._fwr1tsf
@fwr1tsf.setter
def fwr1tsf(self, value) :
if value < self.tsf_rxt0 :
value += (1<<32)
self._fwr1tsf = value
@property
def fwr2tsf(self) :
return self._fwr2tsf
@fwr2tsf.setter
def fwr2tsf(self, value) :
if value < self.tsf_rxt0 :
value += (1<<32)
self._fwr2tsf = value
@property
def fwt1gps(self) :
if self.fwt1tsf and self.tsfgps_txt0:
return ((self.tsfgps_txt0 + (self.fwt1tsf / 1000000.0) + (self.tsf_txdrift / 1000000.0)) * 1000)
else :
return None
@property
def fwt2gps(self) :
if self.fwt2tsf and self.tsfgps_txt0:
return ((self.tsfgps_txt0 + (self.fwt2tsf / 1000000.0) + (self.tsf_txdrift / 1000000.0)) * 1000)
else :
return None
@property
def fwt3gps(self) :
if self.fwt3tsf and self.tsfgps_txt0:
return ((self.tsfgps_txt0 + (self.fwt3tsf / 1000000.0) + (self.tsf_txdrift / 1000000.0)) * 1000)
else :
return None
@property
def fwt4gps(self) :
if self.fwt4tsf and self.tsfgps_txt0:
return ((self.tsfgps_txt0 + (self.fwt4tsf / 1000000.0) + (self.tsf_txdrift / 1000000.0)) * 1000)
else :
return None
@property
def fwr1gps(self) :
if self.fwr1tsf and self.tsfgps_txt0:
return ((self.tsfgps_rxt0 + (self.fwr1tsf / 1000000.0) + (self.tsf_rxdrift / 1000000.0)) * 1000)
else :
return None
@property
def fwr2gps(self):
if self.fwr2tsf and self.tsfgps_txt0:
return ((self.tsfgps_rxt0 + (self.fwr2tsf / 1000000.0) + (self.tsf_rxdrift / 1000000.0)) * 1000)
else :
return None
# type 3 below
@property
def pktfetchtsf(self) :
return self._pktfetch
@pktfetchtsf.setter
def pktfetchtsf(self, value) :
if self.tsf_txt0 is not None and (value < self.tsf_txt0) :
value += (1<<32)
self._pktfetch = value
@property
def pktfetchgps(self) :
if self.pktfetchtsf and self.tsfgps_txt0:
return ((self.tsfgps_txt0 + (self.pktfetchtsf / 1000000.0) + (self.tsf_txdrift / 1000000.0)) * 1000)
else :
return None
@property
def txstatustsf(self) :
return self._txstatus
@txstatustsf.setter
def txstatustsf(self, value) :
if self.tsf_txt0 is not None and (value < self.tsf_txt0) :
value += (1<<32)
self._txstatus = value
@property
def txstatusgps(self) :
if self.txstatustsf and self.tsfgps_txt0:
return ((self.tsfgps_txt0 + (self.txstatustsf / 1000000.0) + (self.tsf_txdrift / 1000000.0)) * 1000)
else :
return None
# Delta timings below
@property
def txfw_total_tsf(self):
if self.fwt2tsf and self.fwt1tsf :
return (self.fwt2tsf - self.fwt1tsf)
else :
return None
@property
def host_total(self):
if self.dhdr3gps and self.dhdt0gps :
return (self.dhdr3gps - self.dhdt0gps)
else :
return None
@property
def tx_airwithtxstatus_tsf(self):
if self.fwt3tsf and self.fwt2tsf:
return (self.fwt3tsf - self.fwt2tsf)
else :
return None
@property
def tx_total(self):
if self.fwt2gps and self.dhdt0gps :
return (self.fwt2gps - self.dhdt0gps)
else :
return None
@property
def tx_airwithtxstatus(self):
if self.fwt3gps and self.fwt2gps:
return (self.fwt3gps - self.fwt2gps)
else :
return None
@property
def rxfw_total(self):
if self.fwr2gps and self.fwr1gps :
return (self.fwr2gps - self.fwr1gps)
else :
return None
@property
def rxfw_total_tsf(self):
if self.fwr2tsf and self.fwr1tsf :
return (self.fwr2tsf - self.fwr1tsf)
else :
return None
@property
def rxfwdhd(self):
if self.dhdr3gps and self.fwr2gps:
return(self.dhdr3gps - self.fwr2gps)
else :
return None
@property
def txdhdfw1(self):
if self.fwt1gps and self.dhdt0gps:
return(self.fwt1gps - self.dhdt0gps)
else :
return None
@property
def tx_air(self):
if self.fwr1gps and self.fwt2gps :
return (self.fwr1gps - self.fwt2gps)
else :
return None
@property
def txfw_total(self):
if self.fwt2gps and self.fwt1gps:
return (self.fwt2gps - self.fwt1gps)
else :
return None
# This is the order of tsf timestamps:
# fwt1 < fwt2 < uc_pktfetch < uc_txstatus < fwt3 < fwt4
#
# Type 3 deltas:
# BoffTime = uCMacAccDly - Rx_Duration
# ucdmadly = T2 - ucdma;
# Ucpacketdly = uctxstatus - ucdma;
# drvstsdly = T3 - uctxstatus;
# drvdma2txsdly = T3 - T2
@property
def BoffTime(self) :
if self.media_ac_delay is not None and self.rxdurtsf is not None:
return (self.media_ac_delay - self.rxdurtsf)
else :
return None
@property
def ucdmadly(self) :
if self.fwt2tsf and self.pktfetchtsf:
return (int(self.pktfetchtsf - self.fwt2tsf))
else :
return None
@property
def ucpacketdly(self) :
if self.txstatustsf and self.pktfetchtsf:
return (int(self.txstatustsf - self.pktfetchtsf))
else :
return None
@property
def drvstsdly(self) :
if self.txstatustsf and self.fwt3tsf:
return (int(self.fwt3tsf - self.txstatustsf))
else :
return None
@property
def drvdma2txsdly(self) :
if self.fwt2tsf and self.fwt3tsf:
return (int(self.fwt3tsf - self.fwt2tsf))
else :
return None
#pktlat tx only histograms
@property
def txdhdfw3(self):
if self.fwt3gps and self.dhdt0gps:
return(self.fwt3gps - self.dhdt0gps)
else :
return None
@property
def txdhdfw4(self):
if self.fwt4gps and self.dhdt0gps:
return(self.fwt4gps - self.dhdt0gps)
else :
return None
@property
def fw4fw3(self):
if self.fwt4tsf and self.fwt3tsf:
return((self.fwt4tsf - self.fwt3tsf)/1000.0)
else :
return None
@property
def txcomplete(self):
return (self.dhdt0gps \
and self.dhdt5gps \
and self.fwt1tsf \
and self.fwt2tsf \
and self.fwt3tsf \
and self.fwt4tsf \
and self.tsfgps_txt0)
@property
def complete(self):
return (self.dhdt0gps \
and self.dhdt5gps \
and self.fwt1tsf \
and self.fwt2tsf \
and self.fwt3tsf \
and self.fwt4tsf \
and self.dhdr3gps \
and self.fwr1tsf \
and self.fwr2tsf \
and self.tsfgps_rxt0 \
and self.tsfgps_txt0)
@property
def t3complete(self):
return (self.pktfetchtsf \
and self.txstatustsf \
and (self.media_ac_delay is not None) \
and (self.rxdurtsf is not None) \
and (self.mac_suspend is not None) \
and (self.txencnt is not None) \
and (self.datatxscnt is not None) \
and (self.oactxscnt is not None) \
and (self.rtstxcnt is not None) \
and (self.ctsrxcnt is not None))
@classmethod
def plot(cls, flowpoints=None, directory='.', type=None, title=None, filename=None, keyposition='left', average=None) :
if not flowpoints :
return
# write out the datafiles for the plotting tool, e.g. gnuplot
if filename is None:
filename = flowpoints[0].name + '_' + str(flowpoints[0].flowid)
if title is None:
title=" ".join(filename.split('_'))
if not os.path.exists(directory):
logging.debug('Making results directory {}'.format(directory))
os.makedirs(directory)
logging.info('Writing {} results to directory {}'.format(directory, filename))
basefilename = os.path.join(directory, filename)
datafilename = os.path.join(basefilename + '.data')
with open(datafilename, 'w') as fid :
if average :
fid.write('{} {} {} {} {} {} {} {}\n'.format\
('flowid', 'avg', average['host_total'], average['txdhdfw1'], average['txfw_total'],\
average['tx_air'], average['rxfw_total'], average['rxfwdhd']))
for flowpoint, _, bin in flowpoints :
fid.write('{} {} {} {} {} {} {} {}\n'.format\
(flowpoint.flowid, flowpoint.seqno, flowpoint.host_total, flowpoint.txdhdfw1, flowpoint.txfw_total,\
flowpoint.tx_air, flowpoint.rxfw_total, flowpoint.rxfwdhd))
# write gpc file
gpcfilename = basefilename + '.gpc'
#write out the gnuplot control file
with open(gpcfilename, 'w') as fid :
fid.write('set output \"{}.{}\"\n'.format(basefilename, 'png'))
fid.write('set terminal png size 1920,1080\n')
fid.write('set key {}\n'.format(keyposition))
fid.write('set title \"{}\" noenhanced\n'.format(title))
fid.write('set grid x\n')
fid.write('set style data histograms\n')
fid.write('set style histogram rowstacked\n')
fid.write('set boxwidth 0.4\n')
fid.write('set style fill solid\n')
fid.write('set xtics rotate\n')
fid.write('set yrange [0:]\n')
fid.write('plot \"{0}\" using 4:xtic(2) title \"DHDFW1\", "" using 5 title \"TXFW\", "" using 6 title \"Mac2Mac\", "" using 7 title \"RXFW\", "" using 8 title \"RXDHD"\n'.format(datafilename))
try:
gnuplotcmd = ['/usr/bin/gnuplot', gpcfilename]
logging.info('Gnuplot {}'.format(gnuplotcmd))
subprocess.run(gnuplotcmd)
except:
pass
def __str__(self) :
if self.complete and self.t3complete :
return('FLOWID={} SEQNO={} DHDT0={} DHDT5={} DHDR3={} FWT1={}/0x{:08x} FWT2={}/0x{:08x} FWT3={}/0x{:08x} FWT4={}/0x{:08x} FWR1={}/0x{:08x} FWR2={}/0x{:08x} TXT0={}/0x{:08x} RXT0={}/0x{:08x} TXdrift={} RXdrift={} pktfetch={}'\
.format(self.flowid, self.seqno, self.dhdt0gps, self.dhdt5gps, self.dhdr3gps, self.fwt1gps, self.fwt1tsf, \
self.fwt2gps, self.fwt2tsf, self.fwt3gps, self.fwt3tsf, self.fwt4gps, self.fwt4tsf, self.fwr1gps, self.fwr1tsf, \
self.fwr2gps, self.fwr2tsf, self.tsfgps_txt0, self.tsf_txt0, self.tsfgps_rxt0, self.tsf_rxt0, self.tsf_txdrift, \
self.tsf_rxdrift, self.pktfetchgps))
elif self.complete :
return('FLOWID={} SEQNO={} DHDT0={} DHDT5={} DHDR3={} FWT1={}/0x{:08x} FWT2={}/0x{:08x} FWT3={}/0x{:08x} FWT4={}/0x{:08x} FWR1={}/0x{:08x} FWR2={}/0x{:08x} TXT0={}/0x{:08x} RXT0={}/0x{:08x} TXdrift={} RXdrift={}'\
.format(self.flowid, self.seqno, self.dhdt0gps, self.dhdt5gps, self.dhdr3gps, self.fwt1gps, self.fwt1tsf, \
self.fwt2gps, self.fwt2tsf, self.fwt3gps, self.fwt3tsf, self.fwt4gps, self.fwt4tsf, self.fwr1gps, self.fwr1tsf, \
self.fwr2gps, self.fwr2tsf, self.tsfgps_txt0, self.tsf_txt0, self.tsfgps_rxt0, self.tsf_rxt0, self.tsf_txdrift, self.tsf_rxdrift))
elif self.txcomplete :
return('FLOWID={} SEQNO={} DHDT0={} DHDT5={} FWT1={}/0x{:08x} FWT2={}/0x{:08x} FWT3={}/0x{:08x} FWT4={}/0x{:08x} TXT0={}/0x{:08x} TXdrift={}'\
.format(self.flowid, self.seqno, self.dhdt0gps, self.dhdt5gps, self.fwt1gps, self.fwt1tsf, \
self.fwt2gps, self.fwt2tsf, self.fwt3gps, self.fwt3tsf, self.fwt4gps, self.fwt4tsf, \
self.tsf_txt0, self.tsf_txdrift))
else:
return ('Not complete')
def log_basics(self, bin=None) :
if self.complete :
logging.info('{}'.format(self))
logging.info('FLOWID={0} SEQNO={1} HostTot={2:.3f}: FWT2-DHDT0={3:.3f} FWT2-FWT1={4:.3f}/{5} FWT3-FWT2={6:.3f}/{7} FWR2-FWR1={8:.3f}/{9} bin={10}'.format\
(self.flowid, self.seqno, self.host_total, self.tx_total, self.txfw_total, self.txfw_total_tsf, \
self.tx_airwithtxstatus, self.tx_airwithtxstatus_tsf, self.rxfw_total, self.rxfw_total_tsf, bin))
else :
logging.info('FLOWID={} SEQNO={} Not complete'.format(self.flowid, self.seqno))
#
# For your stacked plot, the whole tx timeline could be represented by these timestamps in order:
#
# T8Tx
# DHDT0
# FWT1
# FWT2 (FWT3-FWT2 has interupt latency vs pure AIR)
# ------------- (Air)
# FWR1
# FWR2
# DHDR3
# T8Rx
#
# Note, FWT3 and FWT4 do not figure into the Tx timeline. They are on the status reporting feedback of the tx path, and could overlap with Rx.
#
def packet_timeline(self) :
if self.complete :
logging.info('FLOWID={0} SEQNO={1} HostTot={2:.3f}: DHD2FWT1={3:.3f} FWTX={4:.3f}/{5} AIR={6:.3f} FWRX={7:.3f}/{8} FWR2DHD={9:.3f}'.format\
(self.flowid, self.seqno, self.host_total, self.txdhdfw1, self.txfw_total, self.txfw_total_tsf,\
self.tx_air, self.rxfw_total, self.rxfw_total_tsf, self.rxfwdhd))
if self.host_total < 0 :
logging.error('Err'.format(self))
else :
logging.info('FLOWID={} SEQNO={} Not complete'.format(self.flowid, self.seqno))
class FlowPointHistogram(object):
instances = weakref.WeakSet()
def __init__(self, name=None, flowid=None, flowname = None, binwidth=1e-5, title=None) :
FlowPointHistogram.instances.add(self)
self.name = name
self.flowname = flowname
self.flowid = flowid
self.binwidth = binwidth
self.bins = defaultdict(list)
self.population = 0
self.population_min = None
self.sum = 0.0
self.ks_index = None
self.createtime = datetime.now(timezone.utc).astimezone()
self.title=title
self.basefilename = None
self.starttime = None
self.endtime = None
self.ci3std_low = None
self.ci3std_high = None
self.ci_highest = None
self.offeredload = None
self.flowname = None
self.mytitle = None
self.testtitle = None
self.run_number = None
self.units = None
def __str__(self) :
return('{} {}'.format(self.population, self.sum))
@property
def average(self):
return (self.sum / self.population)
# units for insert are milliseconds, bins are 10 us wide, insert is the actual flowpoint object
def insert(self, flowpoint=None, value=None, name=None, flowname = None, statname = None, units = 'ms', population_min = 0):
assert flowpoint is not None, 'insert of None for flowpoint not allowed'
assert value is not None, 'insert of None for flowpoint value not allowed'
assert statname is not None, 'insert of None for flowpoint stat not allowed'
self.statname = statname
if self.flowid is None :
self.flowid = flowpoint.flowid
if self.name is None :
self.name = name
if self.population_min is None :
self.population_min = population_min
elif name and (self.name != name) :
logging.error('Invalid insert per name mismatch {} and {}'.format(self.name, name))
if self.flowname is None :
self.flowname = flowname
elif flowname and (self.flowname != flowname) :
logging.error('Invalid insert per flowname mismatch {} and {}'.format(self.flowname, flowname))
if self.units is None :
self.units = units
elif self.units != units :
logging.error('Invalid insert per unit mismatch {} and {}'.format(self.units, units))
if self.flowid != flowpoint.flowid :
logging.error('Invalid insert per flowid mismatch {} and {}'.format(self.flowid, flowpoint.flowid))
else :
self.sum += value
self.population += 1
if self.units == 'ms' :
bin_no = floor(value * 1e2)
else :
bin_no = floor(value / 10)
self.bins[bin_no].append((flowpoint,value, bin_no))
def ci2bin(self, ci=0.997, log=False):
assert (self.population > 0), "ci2bin histogram {}({}) plot called but no samples}".format(self.name, self.flowid)
assert (self.population >= self.population_min), "ci2bkn histogram {}({}) plot called with too few samples{}/{}".format(self.name, self.flowid, self.population_min, self.population)
runningsum = 0;
result = 0
for binkey in sorted(self.bins.keys()) :
flowpoints=self.bins[binkey]
runningsum += len(flowpoints)
if (runningsum / float(self.population)) < ci :
result = binkey
pass
else :
break
if result and log :
logging.info('(***Packets below***) STAT={} CI={}'.format(self.name,ci))
for flowpoint, _ in self.bins[result] :
flowpoint.log_basics()
logging.info('(***Packets done***)')
return result
def topn(self, count=50, log=False, which='worst'):
assert (self.population > 0), "topn histogram {}({}) plot called but no samples}".format(self.name, self.flowid)
assert (self.population >= self.population_min), "topn histogram {}({}) plot called with too few samples{}/{}".format(self.name, self.flowid, self.population_min, self.population)
if which == 'worst' :
reverseflag = True
txttag = 'WORST'
else :
reverseflag = False
txttag = 'BEST'
binkeys = sorted(self.bins.keys(), reverse=reverseflag)
ix = 0
topnpackets = []
while (len(topnpackets) < count) :
try :
thisbin = sorted(self.bins[binkeys[ix]], key=lambda x : float(x[1]), reverse=reverseflag)
topnpackets.extend(thisbin)
ix += 1
except :
break
if len(topnpackets) > count :
topnpackets = topnpackets[:count]
if False :
logging.info('(*** STAT={} -- {}({}) -- Packets below ***)'.format(self.name, txttag, count))
for flowpoint, _, bin in topnpackets :
flowpoint.log_basics(bin=bin)
return topnpackets
def plot(self, directory='.', filename=None, title=None) :
assert (self.population > 0), "Histogram {}({}) plot called but no samples}".format(self.name, self.flowid)
assert (self.population >= self.population_min), "Histogram {}({}) plot called with too few samples{}/{}".format(self.name, self.flowid, self.population_min, self.population)
# write out the datafiles for the plotting tool, e.g. gnuplot
if filename is None:
filename = self.name + '_' + str(self.flowid)
if not os.path.exists(directory):
logging.debug('Making results directory {}'.format(directory))
os.makedirs(directory)
logging.info('Writing {} results to directory {}'.format(directory, filename))
basefilename = os.path.join(directory, filename)
datafilename = os.path.join(basefilename + '.data')
xmax = 0
with open(datafilename, 'w') as fid :
runningsum = 0;
result = 0
for bin in sorted(self.bins.keys()) :
flowpoints=self.bins[bin]
runningsum += len(flowpoints)
perc = (runningsum / float(self.population))
#logging.debug('bin={} x={} y={}'.format(bin, len(flowpoints), perc))
if self.units == 'ms' :
value = (bin*10)/1000.0
else :
value = (bin*10)
fid.write('{} {} {}\n'.format(value, len(flowpoints), perc))
if bin > xmax :
xmax = bin
runtime = round((self.endtime - self.starttime).total_seconds())
if title is None :
mytitle = '{} {}\\n({} samples)(3std, 99.99={}/{}, {} us) runtime={}s'.format(self.name, self.testtitle, self.population, self.ci3std_low, self.ci3std_high, self.ci_highest, runtime)
self.mytitle = mytitle
else :
self.mytitle = title
print('mytitle={} run={} dir={}'.format(mytitle, self.run_number, directory))
# write gpc file
gpcfilename = basefilename + '.gpc'
#write out the gnuplot control file
with open(gpcfilename, 'w') as fid :
fid.write('set output \"{}.{}\"\n'.format(basefilename, 'png'))
fid.write('set terminal png size 1024,768\n')
fid.write('set key bottom\n')
fid.write('set title \"{}\" noenhanced\n'.format(mytitle))
if self.units == 'ms' :
if xmax < 50 :
fid.write('set format x \"%.2f"\n')
else :
fid.write('set format x \"%.1f"\n')
else :
fid.write('set format x \"%.0f"\n')
fid.write('set format y \"%.1f"\n')
fid.write('set xrange [0:*]\n')
fid.write('set yrange [0:1.01]\n')
fid.write('set y2range [0:*]\n')
fid.write('set ytics add 0.1\n')
fid.write('set y2tics nomirror\n')
fid.write('set grid\n')
fid.write('set xlabel \"time ({})\\n{} - {}\"\n'.format(self.units, self.starttime, self.endtime))
fid.write('plot \"{0}\" using 1:2 index 0 axes x1y2 with impulses linetype 3 notitle, \"{0}\" using 1:3 index 0 axes x1y1 with lines linetype 1 linewidth 2 notitle\n'.format(datafilename))
try:
gnuplotcmd = ['/usr/bin/gnuplot', gpcfilename]
logging.info('Gnuplot {}'.format(gnuplotcmd))
subprocess.run(gnuplotcmd)
except:
pass
class netlink_pktts(object):
instances = weakref.WeakSet()
flowpoints = defaultdict(lambda: defaultdict(FlowPoint))
flowpoint_histograms = defaultdict(lambda: defaultdict(FlowPointHistogram))
flowpoint_filter = set()
@classmethod
def get_instances(cls):
return list(netlink_pktts.instances)
# dhd -i eth1 pktts_flow --help
# required args: <srcip> <destip> <sport> <dport> <proto> <ip_prec> <pkt_offset>
#
# pktts_flow
# set/get pktts flow configuration
#
# Examples:
#
# $ dhd -i eth1 pktts_flow 192.168.1.1 192.168.1.5 1 5 17 3 10
#
# $ dhd -i eth1 pktts_flow
# [0]. ip:101a8c0:501a8c0, port:01:05, proto:11, prec:3, offset:000a, chksum:5000513
@classmethod
def flowpoint_filter_add(cls, flowhash):
netlink_pktts.flowpoint_filter.add(flowhash)
print('Add flowhash {}'.format(flowhash))
# logging.info('Flowhash {} added to netlink filter as permit'.format(flowhash))
@classmethod
def flowpoint_filter_del(cls, flowhash):
netlink_pktts.flowpoint_filter.discard(flowhash)
@classmethod
def commence(cls, time=None, netlinks='all', start_dhd_pipe=True, dhd_pktts_enab=True, start_servo=True) :
loop = asyncio.get_running_loop()
if netlinks == 'all' :
mynetlinks = netlink_pktts.get_instances()
if not netlinks:
logging.warn('netlink_pktts commence method called with none instantiated')
return
logging.info('netlink commence invoked on {} devices'.format(len(mynetlinks)))
for netlink in mynetlinks :
netlink.mynode.rexec(cmd='pkill dhd')
netlink.mynode.rexec(cmd='pkill tsfservo')
netlink.mynode.wl(cmd='mpc 0')
netlink.mynode.rexec(cmd='cat /proc/net/netlink')
ssh_node.run_all_commands()
if dhd_pktts_enab :
for netlink in mynetlinks :
netlink.mynode.dhd(cmd='pktts_enab 1')
ssh_node.run_all_commands()
if start_servo :
tasks = [asyncio.ensure_future(netlink.servo_start(), loop=loop) for netlink in mynetlinks]
if tasks :
try :
logging.info('netlink servo starting')
loop.run_until_complete(asyncio.wait(tasks, timeout=10))
except :
for task in tasks :
if task.exception() :
logging.error('netlink servo start exception')
raise
if start_dhd_pipe :
tasks = [asyncio.ensure_future(netlink.start(), loop=loop) for netlink in mynetlinks]
if tasks :
try :
logging.info('netlink dhd servo starting')
loop.run_until_complete(asyncio.wait(tasks, timeout=10))
except :
for task in tasks :
if task.exception() :
logging.error('netlink dhd servo start timeout')
task.cancel()
raise
for netlink in mynetlinks :
netlink.mynode.rexec(cmd='cat /proc/net/netlink')
ssh_node.run_all_commands()
@classmethod
def cease(cls, time=None, netlinks='all') :
loop = asyncio.get_running_loop()
if netlinks == 'all' :
mynetlinks = netlink_pktts.get_instances()
if not netlinks:
logging.warn('netlink_pktts stop method called with none instantiated')
return
logging.info('netlink stop invoked')
for netlink in mynetlinks :
netlink.mynode.rexec(cmd='pkill tsfservo')
netlink.mynode.rexec(cmd='pkill dhd')
netlink.mynode.wl(cmd='mpc 0')
ssh_node.run_all_commands()
loop.run_until_complete(asyncio.sleep(1))
@classmethod
def disable(cls, netlinks='all') :
if netlinks == 'all' :
mynetlinks = netlink_pktts.get_instances()
else :
mynetlinks = netlinks
for netlink in mynetlinks :
netlink.sampling.clear()
@classmethod
def enable(cls, netlinks='all') :
if netlinks == 'all' :
mynetlinks = netlink_pktts.get_instances()
else :
mynetlinks = netlinks
for netlink in mynetlinks :
netlink.sampling.set()
@classmethod
async def zipfile(cls, filename=None, zipcmd='gzip', loop=None) :
if filename and loop:
logging.info('compress file {} using {}'.format(filename, zipcmd))
childprocess = await asyncio.create_subprocess_exec(zipcmd, filename, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, loop=loop)
stdout, stderr = await childprocess.communicate()
if stderr:
logging.error('zip failed {}'.format(stderr))
@classmethod
def DumpFlowpoints(cls, directory='./', flowtable=None):
logging.info('Netlink dump flowpoints: flow count={} with flow IDs={}'.format(len(netlink_pktts.flowpoints), netlink_pktts.flowpoints.keys()))
if not os.path.exists(directory):
logging.debug('Making results directory {}'.format(directory))
os.makedirs(directory)
print('Writing {} candidate flowpoints to {}'.format(len(netlink_pktts.flowpoints), directory))
ix = 0;
for flowid, seqnos in list(netlink_pktts.flowpoints.items()) :
if flowtable :
try :
flowname = flowtable[flowid]
except KeyError :
flowname = 'unk_{}'.format(flowid)
else :
flowname = 'flowid_{}'.format(flowid)
failed_writes = []
csvfilename = os.path.join(directory, '{}.csv'.format(flowname))
completecnt = 0;
t3completecnt = 0;
with open(csvfilename, 'w', newline='') as f:
logging.info('Dumping flowpoints to file {} using csv'.format(csvfilename))
writer = csv.writer(f)
writer.writerow(['seq_no', 'flowhash', 'host_total', 'tx_total', 'txfw_total', 'rxfw_total', 'tx_airwithtxstatus', 'tx_air', 'txdhdfw1', 'rxfwdhd', 'dhdt0gps', 'dhdt5gps', 'dhdr3gps', 'fwt1gps', 'fwt2gps', 'fwt3gps', 'fwt4gps', 'fwr1gps', 'fwr2gps', 'fwt1tsf', 'fwt2tsf', 'fwt3tsf', 'fwt4tsf', 'fwr1tsf', 'fwr2tsf', 'tsf_txdrift', 'tsf_rxdrift', 'tsfgps_txt0', 'tsf_txt0', 'tsfgps_rxt0', 'tsf_rxt0', 'pktfetchtsf', 'pktfetchgps', 'media_ac_delay', 'rxdurtsf', 'mac_suspend', 'txstatustsf', 'txstatusgps', 'txencnt', 'datatxscnt', 'oactxscnt', 'rtstxcnt', 'ctsrxcnt', 'BoffTime','ucdmadly','ucpacketdly', 'drvstsdly', 'drvdma2txsdly'])
pkts = seqnos.items()
for seqno, flowpoint in pkts :
if flowpoint.complete :
completecnt += 1
if flowpoint.t3complete :
t3completecnt += 1
try :
writer.writerow([flowpoint.seqno, flowid, flowpoint.host_total, flowpoint.tx_total, flowpoint.txfw_total, flowpoint.rxfw_total, flowpoint.tx_airwithtxstatus, flowpoint.tx_air, flowpoint.txdhdfw1, flowpoint.rxfwdhd, flowpoint.dhdt0gps, flowpoint.dhdt5gps, flowpoint.dhdr3gps, flowpoint.fwt1gps, flowpoint.fwt2gps, flowpoint.fwt3gps, flowpoint.fwt4gps, flowpoint.fwr1gps, flowpoint.fwr2gps, flowpoint.fwt1tsf, flowpoint.fwt2tsf, flowpoint.fwt3tsf, flowpoint.fwt4tsf, flowpoint.fwr1tsf, flowpoint.fwr2tsf, flowpoint.tsf_txdrift, flowpoint.tsf_rxdrift, flowpoint.tsfgps_txt0, flowpoint.tsf_txt0, flowpoint.tsfgps_rxt0, flowpoint.tsf_rxt0, flowpoint.pktfetchtsf, flowpoint.pktfetchgps, flowpoint.media_ac_delay, flowpoint.rxdurtsf, flowpoint.mac_suspend, flowpoint.txstatustsf, flowpoint.txstatusgps, flowpoint.txencnt, flowpoint.datatxscnt, flowpoint.oactxscnt, flowpoint.rtstxcnt, flowpoint.ctsrxcnt, flowpoint.BoffTime,flowpoint.ucdmadly,flowpoint.ucpacketdly, flowpoint.drvstsdly, flowpoint.drvdma2txsdly])
except :
failed_writes.append(flowpoint.seqno)
if failed_writes :
logging.warn('Write row failed for flow/flowpoints={}/{}'.format(flowid,failed_writes))
logging.info('Flowpoints: Total={} Completed={} T3completed={}'.format(len(pkts), completecnt, t3completecnt))
if os.path.isfile(csvfilename) :
loop = asyncio.get_running_loop()
tasks = [asyncio.ensure_future(netlink_pktts.zipfile(filename=csvfilename, loop=loop))]
try :
loop.run_until_complete(asyncio.wait(tasks, timeout=60))
except asyncio.TimeoutError:
logging.error('compress timeout')
raise
print('Wrote {}/{} completed flowpoints to {}'.format(completecnt, t3completecnt,csvfilename))
@classmethod
def CreateHistograms(cls, directory='./', run_number=None, starttime=None, endtime=None, testtitle=None, flowtable=None, population_min=0):
for flowid, seqnos in list(netlink_pktts.flowpoints.items()) :
if flowtable :
try :
flowname = flowtable[flowid]
except KeyError :
flowname = 'unk_{}'.format(flowid)
else :
flowname = 'flowid_{}'.format(flowid)
pkts = seqnos.items()
pktscnt = len(pkts)
logging.info('Netlink CreateHistogram for flowname/flowid={}/{} samples={} min={}'.format(flowname, flowid, len(pkts), population_min))
#Make sure there are enough overall samples
if pktscnt > population_min :
complete_count = 0
t3_complete_count = 0
txcomplete_count = 0
for seqno, flowpoint in pkts :
# logging.info('DHDT0={} DHDT5={} FWT1={} FWT2={} FWT3={} FWT4={} GPS={}'.flowpoint.dhdt0gps, flowpoint.dhdt5gps, flowpoint.fwt1tsf, flowpoint.fwt2tsf, flowpoint.fwt3tsf, flowpoint.fwt4tsf, flowpoint.tsfgps_txt0)
if flowpoint.txcomplete :
txcomplete_count += 1
fphisto=netlink_pktts.flowpoint_histograms[flowid]['tx_host_total_wair']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.txdhdfw3, name='(DHDT0-FWT3)_' + flowname, statname='DHDT0-FWT3', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['tx_host_total_wdoorbell']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.txdhdfw4, name='(DHDT0-FWT4)_' + flowname, statname='DHDT0-FWT4', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['tx_doorbell']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.fw4fw3, name='(FWT4-FWT3)_' + flowname, statname='FWT4-FWT3', population_min = population_min)
if flowpoint.complete :
complete_count += 1
fphisto=netlink_pktts.flowpoint_histograms[flowid]['host_total']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.host_total, name='host_total_(DHDR3-DHDT0)_' + flowname, statname='DHDR3-DHDT0', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['tx_total']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.tx_total, name='tx_total_(FWT2-DHDT0)_' + flowname, statname='FWT2-DHDT0', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['txfw_total']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.txfw_total, name='txfw_total_(FWT2-FWT1)_' + flowname, statname='FWT2-FWT1', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['rxfw_total']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.rxfw_total, name='rxfw_total_(FWR2-FWR1)_'+ flowname, statname='FWR2-FWR1', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['tx_airwithtxstatus']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.tx_airwithtxstatus, name='tx_airwithtxstatus_(FWT3-FWT2)_' + flowname, statname='FWT3-FWT2', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['tx_air']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.tx_air, name='tx_air_(FWT2-FWR1)_' + flowname, statname='FWT2-FWR1', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['txdhdfw1']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.txdhdfw1, name='txdhdfw1_(FWT1-DHDT0)_' + flowname, statname='FWT1-DHDT0', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['rxfwdhd']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.rxfwdhd, name='rxfwdhd_(DHDR3-FWR2)_' + flowname, statname='DHDR3-FWR2', population_min = population_min)
if flowpoint.t3complete :
t3_complete_count += 1
fphisto=netlink_pktts.flowpoint_histograms[flowid]['BoffTime']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.BoffTime, name='BoffTime_' + flowname, statname='BoffTime', units='us', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['ucdmadly']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.ucdmadly, name='ucdmadly_' + flowname, statname='ucdmadly', units='us', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['ucpacketdly']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.ucpacketdly, name='ucpacketdly_' + flowname, statname='ucpacketdly', units='us', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['drvstsdly']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.drvstsdly, name='drvstsdly_' + flowname, statname='drvstsdly', units='us', population_min = population_min)
fphisto=netlink_pktts.flowpoint_histograms[flowid]['drvdma2txsdly']
fphisto.insert(flowpoint=flowpoint, value=flowpoint.drvdma2txsdly, name='drvdma2txsdly_' + flowname, statname='drvdma2txsdly', units='us', population_min = population_min)
logging.info('Netlink histograms done for flowname/flowid={}/{} complete/t3complete/txcomplete/samples={}/{}/{}/{}'.format(flowname, flowid, complete_count, t3_complete_count, txcomplete_count, pktscnt))
# filter out uninteresting stats, i.e. stats without sufficient samples
for flowid, stats in list(netlink_pktts.flowpoint_histograms.items()) :
for stat, fp_histo in list(stats.items()):
if netlink_pktts.flowpoint_histograms[flowid][stat].population < population_min :
print('Filtered flowid {} stat of {} population min/actual={}/{}'.format(flowid, stat, population_min, netlink_pktts.flowpoint_histograms[flowid][stat].population))
logging.info('Filtered flowid {} stat of {} population min/actual={}/{}'.format(flowid, stat, population_min, netlink_pktts.flowpoint_histograms[flowid][stat].population))
del netlink_pktts.flowpoint_histograms[flowid][stat]
logging.info("Plot of {} flows".format(len(netlink_pktts.flowpoint_histograms.items())))
for flowid, stats in list(netlink_pktts.flowpoint_histograms.items()) :
if flowtable :
try :
flowname = flowtable[flowid]
except KeyError :
flowname = 'unk_{}'.format(flowid)
else :
flowname = 'flowid_{}'.format(flowid)
# Produce the plots
avg = {}
for stat, fp_histo in list(stats.items()):
avg[stat] = netlink_pktts.flowpoint_histograms[flowid][stat].average
logging.info('Flowid {} histo avg for stat {} = {}'.format(flowid, stat, avg[stat]))
statdirectory = os.path.join(directory, stat)
histo = netlink_pktts.flowpoint_histograms[flowid][stat]
histo.starttime = starttime
histo.endtime = endtime
logging.info('Flowid plot stat {} population={} bins={}'.format(flowid, stat, histo.population, len(histo.bins.keys())))
# RJM, fix below - don't use constant of 10
histo.ci3std_low=histo.ci2bin(ci=0.003, log=False) * 10
histo.ci3std_high=histo.ci2bin(ci=0.997, log=False) * 10
histo.ci_highest=histo.ci2bin(ci=0.9999, log=False) * 10
histo.title = stat
histo.testtitle = testtitle
histo.run_number = run_number
worst=histo.topn(which='worst')
best=histo.topn(which='best')
# FIX ME: Use an object, out to json
print('Pyflows test results: flowid={} flowname={} statname={} value={} run={} files={}'.format(flowid, flowname, histo.statname, histo.ci3std_high, histo.run_number, directory))
# FIX ME: have an option not to plot
try :
histo.plot(directory=statdirectory)
except :
pass
try :
topdirectory = os.path.join(statdirectory, 'bottom50')
FlowPoint.plot(flowpoints=worst, type='timeline', title='{} Bottom50'.format(histo.mytitle), directory=topdirectory, filename='{}_{}_{}_bottom'.format(stat, flowname, flowid), keyposition='right', average=avg)
except :
pass
try :
topdirectory = os.path.join(statdirectory, 'top50')
FlowPoint.plot(flowpoints=best, type='timeline', title='{} Top50'.format(histo.mytitle), directory=topdirectory, filename='{}_{}_{}_top'.format(stat, flowname, flowid), keyposition='right', average=avg)
except :
pass
@classmethod
def CreateHistogramsThreaded(cls, directory='./', starttime=None, endtime=None, testtitle=None, flowtable=None) :
# don't allow inserts while the thread is running
netlink_pktts.disable()
try :
event_loop = asyncio.get_running_loop()
logging.debug('CreateHistogramsThreaded start')
keywordfunc = functools.partial(netlink_pktts.CreateHistograms, directory=directory, starttime=starttime, endtime=endtime, testtitle=testtitle, flowtable=flowtable)
coro = event_loop.run_in_executor(None, keywordfunc)
event_loop.run_until_complete(coro)
finally :
netlink_pktts.enable()
logging.debug('CreateHistogramsThreaded done')
@classmethod
def LogAllFlowpoints(cls):
for flowid, seqnos in list(netlink_pktts.flowpoints.items()) :
for seqno, flowpoint in list(seqnos.items()):
flowpoint.log_basics()
flowpoint.packet_timeline()
@classmethod
def ResetStats(cls):
logging.info('netlink_pktts reset stats')
netlink_pktts.flowpoints.clear()
netlink_pktts.flowpoint_histograms.clear()
# Asycnio protocol for subprocess transport to decode netlink pktts messages
#
# See pages 10 and 13 of https://docs.google.com/document/d/1a2Vo0AUBMo1utUWYLkErSSqQM9sMf0D9FPfyni_e4Qk/edit#heading=h.9lme8ct208v3
#
# Probe points: Tx Path*
# T8Tx - Frame/Pkt generated timestamp at Application
# DhdT0 - DHD Driver xmits pkt to dongle
# DhdT5 - DHD receives Tx-Completion from FW
# FWT1 - Firmware sees the TxPost work item from host
# FWT2 - Firmware submits the TxPost work item to Mac tx DMA (after header conversion)
# FWT3 - Firmware processes TxStatus from uCode
# FWT4 - Firmware posts Tx-Completion message to host
# Rx Path*
# FWR1 - RxStatus TSF as reported by Ucode (time at which pkt was rxed by the MAC)
# FWR2 - Time at which Rx-Completion is posted to host.
# DRRx - DHD Driver process Rx-Completion and forwards Rxed pkt to Network Stack
# T8Rx - Frame/Packet Rxed by Application
class NetlinkPkttsProtocol(asyncio.SubprocessProtocol):
def __init__(self, session):
self.loop = asyncio.get_running_loop()
self.exited = False
self.closed_stdout = False
self.closed_stderr = False
self.stdoutbuffer = ""
self.stderrbuffer = ""
self._session = session
self.debug = session.debug
self.silent = session.silent
self.txmatch_output = True
self.io_watch_timer = None
self.io_watch_default = 2 # units seconds
#type:1, flowid:0x4cee02be, prec:0, xbytes:0x000000000008d56b5b999b8000000000, :::1536793472578955:1536793472579721:::0x000ae906:0x000ae93b:0x000ae965:0x000ae98c
#type:2, flowid:0x4cee02be, prec:0, xbytes:0x000000000008d56b5b999b8000000000, :::1536793472579709:::0x00a0a8a4:0x00a0a8d5
self.netlink_type_flowid_parse = re.compile(r'type:(?P<type>[\d]),\s+flowid:(?P<flowid>0x[0-9a-f]{8}),\sprec');
self.netlink_type1_parse = re.compile(r'type:1,\s+flowid:(?P<flowid>0x[0-9a-f]{8}),\sprec:[0-7],\sxbytes:(?P<payload>0x[0-9a-f]{32}),\s:::(?P<dhdt0>[0-9]{16}):(?P<dhdt5>[0-9]{16}):::(?P<fwt1>0x[0-9a-f]{8}):(?P<fwt2>0x[0-9a-f]{8}):(?P<fwt3>0x[0-9a-f]{8}):(?P<fwt4>0x[0-9a-f]{8})');
self.netlink_type2_parse = re.compile(r'type:2,\s+flowid:(?P<flowid>0x[0-9a-f]{8}),\sprec:[0-7],\sxbytes:(?P<payload>0x[0-9a-f]{32}),\s:::(?P<dhdr3>[0-9]{16}):::(?P<fwr1>0x[0-9a-f]{8}):(?P<fwr2>0x[0-9a-f]{8})');
# type:3, flowid:0x73177206, prec:6, xbytes:0x0000000000022f795eb078120000000f, 0671ba16:00000000:00000000:00000000:0671bab6:::0001:0000:0001:0000:0000:beea:beeb:beec
self.netlink_type3_parse = re.compile(r'type:3,\s+flowid:(?P<flowid>0x[0-9a-f]{8}),\sprec:[0-7],\sxbytes:(?P<payload>0x[0-9a-f]{32}),\s(?P<pktfetch>[0-9a-f]{8}):(?P<medacdly>[0-9a-f]{8}):(?P<rxdur>[0-9a-f]{8}):(?P<macsusdur>[0-9a-f]{8}):(?P<txstatusts>[0-9a-f]{8}):::(?P<txencnt>[0-9]{4}):(?P<oactxscnt>[0-9]{4}):(?P<datatxscnt>[0-9]{4}):(?P<rtstxcnt>[0-9]{4}):(?P<ctsrxcnt>[0-9]{4})');
@property
def finished(self):
return self.exited and self.closed_stdout and self.closed_stderr
def signal_exit(self):
if not self.finished:
return
self._session.closed.set()
self._session.opened.clear()
logging.info('Netlink proto connection done (session={})'.format(self._session.name))
def connection_made(self, trans):
self._session.closed.clear()
self._session.opened.set()
self.mypid = trans.get_pid()
self.io_watch_timer = ssh_node.loop.call_later(30, self.io_watch_event)
self._session.io_inactive.clear()
logging.info('Netlink proto connection made (session={})'.format(self._session.name))
def io_watch_event(self, type=None):
self._session.io_inactive.set()
logging.debug('Netlink watch evetnt io_inactive(session={})'.format(self._session.name))
def pipe_data_received(self, fd, data):
data = data.decode("utf-8")
self._session.io_inactive.clear()
if self.io_watch_timer :
self.io_watch_timer.cancel()
logging.debug('cancel io_watch_timer');
self.io_watch_timer = ssh_node.loop.call_later(self.io_watch_default, self.io_watch_event)
logging.debug('set io_watch_timer for {} seconds'.format(self.io_watch_default));
if fd == 1:
self.stdoutbuffer += data
if not self._session.sampling.is_set() :
return
while "\n" in self.stdoutbuffer:
line, self.stdoutbuffer = self.stdoutbuffer.split("\n", 1)
self._session.stdout_linecount += 1
if self.debug:
logging.debug('{} pktts PREPARSE: {} (stdout)'.format(self._session.name, line))
m = self.netlink_type_flowid_parse.match(line)
if m :
type = m.group('type')
flowid = m.group('flowid')
if netlink_pktts.flowpoint_filter and (flowid not in netlink_pktts.flowpoint_filter) :
if self.debug :
logging.debug('MISS pktts flowid={} filter={}'.format(flowid, netlink_pktts.flowpoint_filter))
return
if type == '1' :
m = self.netlink_type1_parse.match(line)
if m :
if self.debug:
logging.debug('MATCHTx: {} {} {} {} {} {} {} {}'.format(m.group('flowid'),m.group('payload'),m.group('dhdt0'),m.group('dhdt5'),m.group('fwt1'), m.group('fwt2'), m.group('fwt3'), m.group('fwt4')))
if self._session.servo_match and (m.group('flowid') != '0x00000000') :
# The Tx should have DHDT0, DHDT5, FWTx0, FWTx1, FWTx2, FWTx3
flowid = m.group('flowid')
if self.txmatch_output :
logging.info('ServoType1MATCH: {} {} {} {} {} {} {} {}'.format(m.group('flowid'),m.group('payload'),m.group('dhdt0'),m.group('dhdt5'),m.group('fwt1'), m.group('fwt2'), m.group('fwt3'), m.group('fwt4')))
self.txmatch_output = False
seqno = int(m.group('payload')[-8:],16)
dhdt0gps = float(m.group('dhdt0')[:-6] + '.' + m.group('dhdt0')[-6:])
dhdt5gps = float(m.group('dhdt5')[:-6] + '.' + m.group('dhdt5')[-6:])
#insert the values into the flowpoints table for after test processing
fp=netlink_pktts.flowpoints[flowid][seqno] # flowid, seqno
fp.seqno = seqno
fp.dhdt0gps = dhdt0gps
fp.dhdt5gps = dhdt5gps
fp.flowid = flowid
if fp.tsf_txt0 is None :
assert self._session.tsf_t0 is not None, "tsfservo on {} not initialized".format(self._session.name)
fp.tsf_txt0 = self._session.tsf_t0
fp.tsf_txdrift = self._session.tsf_drift
fp.tsfgps_txt0 = self._session.tsf_t0gpsadjust
fp.fwt1tsf = int(m.group('fwt1'),16)
fp.fwt2tsf = int(m.group('fwt2'),16)
fp.fwt3tsf = int(m.group('fwt3'),16)
fp.fwt4tsf = int(m.group('fwt4'),16)
elif type == '2' :
m = self.netlink_type2_parse.match(line)
if m :
if self.debug:
logging.debug('MATCHRx: {} {} {} {} {}'.format(m.group('flowid'), m.group('payload'), m.group('dhdr3'), m.group('fwr1'), m.group('fwr2')))
if self._session.servo_match and (m.group('flowid') != '0x00000000') :
# The Tx should have DHDT0, DHDT5, FWTx0, FWTx1, FWTx2, FWTx3
flowid = m.group('flowid')
seqno = int(m.group('payload')[-8:],16)
seqno = int(m.group('payload')[-8:],16)
dhdr3gps = float(m.group('dhdr3')[:-6] + '.' + m.group('dhdr3')[-6:])
#insert the values into the flowpoints table for after test processing
fp=netlink_pktts.flowpoints[flowid][seqno] # flowid, seqno
fp.dhdr3gps = dhdr3gps
fp.seqno = seqno
fp.flowid = flowid
if fp.tsf_rxt0 is None :
assert self._session.tsf_t0gpsadjust is not None, "tsfservo on {} not initialized".format(self._session.name)
fp.tsf_rxdrift = self._session.tsf_drift
fp.tsfgps_rxt0 = self._session.tsf_t0gpsadjust
fp.tsf_rxt0 = self._session.tsf_t0
fp.fwr1tsf = int(m.group('fwr1'),16)
fp.fwr2tsf = int(m.group('fwr2'),16)
elif type == '3' :
m = self.netlink_type3_parse.match(line)
if m :
if self.debug :
logging.debug('MATCHT3: {} {} {} {} {} {} {} {} {} {} {} {}'.format(m.group('flowid'), m.group('payload'), m.group('pktfetch'), m.group('medacdly'), m.group('rxdur'), m.group('macsusdur'), m.group('txstatusts'), m.group('txencnt'), m.group('oactxscnt'), m.group('datatxscnt'), m.group('rtstxcnt'), m.group('ctsrxcnt')));
if self._session.servo_match and (m.group('flowid') != '0x00000000') :
flowid = m.group('flowid')
seqno = int(m.group('payload')[-8:],16)
fp=netlink_pktts.flowpoints[flowid][seqno] # flowid, seqno
fp.seqno = seqno
fp.flowid = flowid
fp.media_ac_delay = int(m.group('medacdly'),16)
fp.rxdurtsf = int(m.group('rxdur'),16)
fp.mac_suspend = int(m.group('macsusdur'),16)
fp.txencnt = int(m.group('txencnt'))
fp.oactxscnt = int(m.group('oactxscnt'))
fp.datatxscnt = int(m.group('datatxscnt'))
fp.rtstxcnt = int(m.group('rtstxcnt'))
fp.ctsrxcnt = int(m.group('ctsrxcnt'))
if not fp.tsf_txt0 is None:
assert self._session.tsf_t0 is not None, "tsfservo on {} not initialized".format(self._session.name)
fp.tsf_txt0 = self._session.tsf_t0
fp.tsf_txdrift = self._session.tsf_drift
fp.tsfgps_txt0 = self._session.tsf_t0gpsadjust
fp.pktfetchtsf = int(m.group('pktfetch'),16)
fp.txstatustsf = int(m.group('txstatusts'),16)
else :
logging.debug('Unknown type pktts {}: {} (stdout)'.format(type, line))
else :
if self.debug:
logging.debug('MISS: pktts {} (stdout)'.format(line))
elif fd == 2:
self.stderrbuffer += data
while "\n" in self.stderrbuffer:
line, self.stderrbuffer = self.stderrbuffer.split("\n", 1)
self._session.stderr_linecount += 1
logging.warning('{} {} (pktts - stderr)'.format(line, self._session.ipaddr))
def pipe_connection_lost(self, fd, exc):
if self.io_watch_timer :
self.io_watch_timer.cancel()
logging.debug('cancel io_watch_timer lost connection');
self.signal_exit()
def process_exited(self,):
self.signal_exit()
# Asycnio protocol for subprocess transport to decode netlink pktts messages
class NetlinkServoProtocol(asyncio.SubprocessProtocol):
def __init__(self, session):
self.loop = asyncio.get_running_loop()
self.exited = False
self.closed_stdout = False
self.closed_stderr = False
self.stdoutbuffer = ""
self.stderrbuffer = ""
self._session = session
self.debug = session.debug
self.silent = session.silent
# TSFGPS servo for dev ap0 GPS=1537306425.681630927 TSF=2833.449983 RAW=0xa8e303ff TSFT0=1537303592.231647927 (with delay 1.000000 second(s) pid=16591)
# TSFGPS servo for dev eth0 GPS=1537146706.237886334 TSF=25.444833 RAW=0x18441e1 drift=-25542 ns rate=-25.538413 usec/sec
self.tsfservo_init_parse = re.compile(r'TSFGPS servo for dev (?P<device>[a-z0-9\.]+)\sGPS=(?P<gpsts>[0-9\.]+)\s(?P<tsfts>TSF=[0-9\.]+)\sRAW=(?P<tsfraw>0x[0-9a-f]+)\sDHDIoctl=[0-9]+\sTSFT0=(?P<tsft0>[0-9\.]+)')
self.tsfservo_drift_parse = re.compile(r'TSFGPS servo for dev (?P<device>[a-z0-9\.]+)\sGPS=(?P<gpsts>[0-9\.]+)\s(?P<tsfts>TSF=[0-9\.]+)\sRAW=(?P<tsfraw>0x[0-9a-f]+)\sDHDIoctl=[0-9]+\sdrift=(?P<tsfdrift>[\-0-9]+)\sns')
@property
def finished(self):
return self.exited and self.closed_stdout and self.closed_stderr
def signal_exit(self):
if not self.finished:
return
self._session.servo_closed.set()
self._session.servo_opened.clear()
def connection_made(self, trans):
self._session.servo_closed.clear()
self._session.servo_opened.set()
self.mypid = trans.get_pid()
def pipe_data_received(self, fd, data):
data = data.decode("utf-8")
if fd == 1:
self.stdoutbuffer += data
while "\n" in self.stdoutbuffer:
line, self.stdoutbuffer = self.stdoutbuffer.split("\n", 1)
logging.debug('{} {} (stout)'.format(self._session.name, line))
if not self._session.servo_match :
m = self.tsfservo_init_parse.match(line)
if m :
self._session.servo_match = True
self._session.tsf_t0gpsadjust = float(m.group('tsft0'))
self._session.tsf_t0 = int(m.group('tsfraw'),16)
self._session.tsf_drift = 0
logging.info('{} TSF init match {} (stout)'.format(self._session.name, line))
else :
m = self.tsfservo_drift_parse.match(line)
if m:
# convert from ns to us
self._session.tsf_drift = round((int(m.group('tsfdrift')))/1000)
if not self._session.servo_ready.is_set() :
self._session.servo_ready.set()
elif fd == 2:
self.stderrbuffer += data
while "\n" in self.stderrbuffer:
line, self.stderrbuffer = self.stderrbuffer.split("\n", 1)
self._session.stderr_linecount += 1
logging.warning('{} {} (servo - stderr)'.format(line, self._session.ipaddr))
def pipe_connection_lost(self, fd, exc):
self.signal_exit()
def process_exited(self,):
self.signal_exit()
def __init__(self, user='root', name=None, loop=None, sshnode=None, debug=False, output='ssh', silent=True, chip='4387'):
netlink_pktts.instances.add(self)
self.loop = asyncio.get_running_loop()
self.ssh = '/usr/bin/ssh'
self.user = user
self.mynode = sshnode
self.chip = chip
try:
self.ipaddr = sshnode.ipaddr
except AttributeError:
self.ipaddr = sshnode
self.device=sshnode.device
print('Netlink name={} device={} ip={}'.format(sshnode.name, sshnode.device, sshnode.ipaddr))
self.pktts_cmd = '/usr/bin/dhd --pktts'
self.servo_cmd = '/usr/local/bin/tsfservo -i {} -f 2 -t 7200 -d {}'.format(self.device, self.chip)
if output != 'ssh' :
self.pktts_cmd = '{} > /dev/null 2>&1'.format(self.pktts_cmd)
self.stdout_linecount = 0
self.stderr_linecount = 0
self.opened = asyncio.Event()
self.closed = asyncio.Event()
self.sampling = asyncio.Event()
self.servo_opened = asyncio.Event()
self.servo_closed = asyncio.Event()
self.servo_ready = asyncio.Event()
self.name = sshnode.name
self.debug = debug
self.silent = silent
self.servo_match = False
self.io_inactive = asyncio.Event()
def reset(self) :
self.stdout_linecount = 0
self.stderr_linecount = 0
async def servo_start(self):
sshcmd=[self.ssh, self.user + '@' + self.ipaddr, self.servo_cmd]
logging.info('servo_start {}'.format(str(sshcmd)))
self.servo_closed.set()
self.servo_opened.clear()
self.servo_ready.clear()
try :
self._transport, self._protocol = await self.loop.subprocess_exec(lambda: self.NetlinkServoProtocol(self), *sshcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=None)
except:
print('netlink dhd pktts start error: {}'.format(str(sshcmd)))
logging.error('netlink dhd pktts start error: {}'.format(str(sshcmd)))
else :
await self.servo_ready.wait()
async def start(self):
sshcmd=[self.ssh, self.user + '@' + self.ipaddr, self.pktts_cmd]
logging.info('{}'.format(str(sshcmd)))
self.stdout_linecount = 0
self.stderr_linecount = 0
self.closed.set()
self.opened.clear()
self.sampling.set()
try :
self._transport, self._protocol = await self.loop.subprocess_exec(lambda: self.NetlinkPkttsProtocol(self), *sshcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=None)
await self.opened.wait()
except:
logging.error('netlink dhd pktts start error: {}'.format(str(sshcmd)))
pass
async def await_io_finish(self) :
if not self._session.io_inactive.is_set() :
await self._session.io_inactive.wait()
|
def process(pid):
x = 6
y = 10
return pid + x + y
def main():
res = process(8)
print('res: {}'.format(res))
main()
|
"""
Spectral transforms are used in order to estimate the frequency-domain
representation of time-series. Several methods can be used and this module
contains implementations of several algorithms for the calculation of spectral
transforms.
"""
import numpy as np
from nitime.lazy import matplotlib_mlab as mlab
from nitime.lazy import scipy_linalg as linalg
from nitime.lazy import scipy_signal as sig
from nitime.lazy import scipy_interpolate as interpolate
from nitime.lazy import scipy_fftpack as fftpack
import nitime.utils as utils
from nitime.utils import tapered_spectra, dpss_windows
# To support older versions of numpy that don't have tril_indices:
from nitime.index_utils import tril_indices, triu_indices
# Set global variables for the default NFFT to be used in spectral analysis and
# the overlap:
default_nfft = 64
default_n_overlap = int(np.ceil(default_nfft // 2))
def get_spectra(time_series, method=None):
r"""
Compute the spectra of an n-tuple of time series and all of
the pairwise cross-spectra.
Parameters
----------
time_series : float array
The time-series, where time is the last dimension
method : dict, optional
contains: this_method:'welch'
indicates that :func:`mlab.psd` will be used in
order to calculate the psd/csd, in which case, additional optional
inputs (and default values) are:
NFFT=64
Fs=2pi
detrend=mlab.detrend_none
window=mlab.window_hanning
n_overlap=0
this_method:'periodogram_csd'
indicates that :func:`periodogram` will
be used in order to calculate the psd/csd, in which case, additional
optional inputs (and default values) are:
Skx=None
Sky=None
N=None
sides='onesided'
normalize=True
Fs=2pi
this_method:'multi_taper_csd'
indicates that :func:`multi_taper_psd` used in order to calculate
psd/csd, in which case additional optional inputs (and default
values) are:
BW=0.01
Fs=2pi
sides = 'onesided'
Returns
-------
f : float array
The central frequencies for the frequency bands for which the spectra
are estimated
fxy : float array
A semi-filled matrix with the cross-spectra of the signals. The csd of
signal i and signal j is in f[j][i], but not in f[i][j] (which will be
filled with zeros). For i=j fxy[i][j] is the psd of signal i.
"""
if method is None:
method = {'this_method': 'welch'} # The default
# If no choice of method was explicitly set, but other parameters were
# passed, assume that the method is mlab:
this_method = method.get('this_method', 'welch')
if this_method == 'welch':
NFFT = method.get('NFFT', default_nfft)
Fs = method.get('Fs', 2 * np.pi)
detrend = method.get('detrend', mlab.detrend_none)
window = method.get('window', mlab.window_hanning)
n_overlap = method.get('n_overlap', int(np.ceil(NFFT / 2)))
# The length of the spectrum depends on how many sides are taken, which
# depends on whether or not this is a complex object:
if np.iscomplexobj(time_series):
fxy_len = NFFT
else:
fxy_len = NFFT // 2 + 1
# If there is only 1 channel in the time-series:
if len(time_series.shape) == 1 or time_series.shape[0] == 1:
temp, f = mlab.csd(time_series, time_series,
NFFT, Fs, detrend, window, n_overlap,
scale_by_freq=True)
fxy = temp.squeeze() # the output of mlab.csd has a weird shape
else:
fxy = np.zeros((time_series.shape[0],
time_series.shape[0],
fxy_len), dtype=complex) # Make sure it's complex
for i in range(time_series.shape[0]):
for j in range(i, time_series.shape[0]):
#Notice funny indexing, in order to conform to the
#conventions of the other methods:
temp, f = mlab.csd(time_series[j], time_series[i],
NFFT, Fs, detrend, window, n_overlap,
scale_by_freq=True)
fxy[i][j] = temp.squeeze() # the output of mlab.csd has a
# weird shape
elif this_method in ('multi_taper_csd', 'periodogram_csd'):
# these methods should work with similar signatures
mdict = method.copy()
func = eval(mdict.pop('this_method'))
freqs, fxy = func(time_series, **mdict)
f = utils.circle_to_hz(freqs, mdict.get('Fs', 2 * np.pi))
else:
raise ValueError("Unknown method provided")
return f, fxy.squeeze()
def get_spectra_bi(x, y, method=None):
r"""
Computes the spectra of two timeseries and the cross-spectrum between them
Parameters
----------
x,y : float arrays
Time-series data
method : dict, optional
See :func:`get_spectra` documentation for details
Returns
-------
f : float array
The central frequencies for the frequency
bands for which the spectra are estimated
fxx : float array
The psd of the first signal
fyy : float array
The psd of the second signal
fxy : float array
The cross-spectral density of the two signals
"""
f, fij = get_spectra(np.vstack((x, y)), method=method)
fxx = fij[0, 0].real
fyy = fij[1, 1].real
fxy = fij[0, 1]
return f, fxx, fyy, fxy
# The following spectrum estimates are normalized to the convention
# adopted by MATLAB (or at least spectrum.psd)
# By definition, Sxx(f) = DTFT{Rxx(n)}, where Rxx(n) is the autocovariance
# function of x(n). Therefore the integral from
# [-Fs/2, Fs/2] of Sxx(f)*df is Rxx(0).
# And from the definition of Rxx(n),
# Rxx(0) = Expected-Value{x(n)x*(n)} = Expected-Value{ |x|^2 },
# which is estimated as (x*x.conj()).mean()
# In other words, sum(Sxx) * Fs / NFFT ~ var(x)
def periodogram(s, Fs=2 * np.pi, Sk=None, N=None,
sides='default', normalize=True):
"""Takes an N-point periodogram estimate of the PSD function. The
number of points N, or a precomputed FFT Sk may be provided. By default,
the PSD function returned is normalized so that the integral of the PSD
is equal to the mean squared amplitude (mean energy) of s (see Notes).
Parameters
----------
s : ndarray
Signal(s) for which to estimate the PSD, time dimension in the last
axis
Fs : float (optional)
The sampling rate. Defaults to 2*pi
Sk : ndarray (optional)
Precomputed FFT of s
N : int (optional)
Indicates an N-point FFT where N != s.shape[-1]
sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
This determines which sides of the spectrum to return.
For complex-valued inputs, the default is two-sided, for real-valued
inputs, default is one-sided Indicates whether to return a one-sided
or two-sided
PSD normalize : boolean (optional, default=True) Normalizes the PSD
Returns
-------
(f, psd) : tuple
f: The central frequencies for the frequency bands
PSD estimate for each row of s
"""
if Sk is not None:
N = Sk.shape[-1]
else:
N = s.shape[-1] if not N else N
Sk = fftpack.fft(s, n=N)
pshape = list(Sk.shape)
# if the time series is a complex vector, a one sided PSD is invalid:
if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
sides = 'twosided'
elif sides in ('default', 'onesided'):
sides = 'onesided'
if sides == 'onesided':
# putative Nyquist freq
Fn = N // 2 + 1
# last duplicate freq
Fl = (N + 1) // 2
pshape[-1] = Fn
P = np.zeros(pshape, 'd')
freqs = np.linspace(0, Fs // 2, Fn)
P[..., 0] = (Sk[..., 0] * Sk[..., 0].conj()).real
P[..., 1:Fl] = 2 * (Sk[..., 1:Fl] * Sk[..., 1:Fl].conj()).real
if Fn > Fl:
P[..., Fn - 1] = (Sk[..., Fn - 1] * Sk[..., Fn - 1].conj()).real
else:
P = (Sk * Sk.conj()).real
freqs = np.linspace(0, Fs, N, endpoint=False)
if normalize:
P /= (Fs * s.shape[-1])
return freqs, P
def periodogram_csd(s, Fs=2 * np.pi, Sk=None, NFFT=None, sides='default',
normalize=True):
"""Takes an N-point periodogram estimate of all the cross spectral
density functions between rows of s.
The number of points N, or a precomputed FFT Sk may be provided. By
default, the CSD function returned is normalized so that the integral of
the PSD is equal to the mean squared amplitude (mean energy) of s (see
Notes).
Parameters
---------
s : ndarray
Signals for which to estimate the CSD, time dimension in the last axis
Fs : float (optional)
The sampling rate. Defaults to 2*pi
Sk : ndarray (optional)
Precomputed FFT of rows of s
NFFT : int (optional)
Indicates an N-point FFT where N != s.shape[-1]
sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
This determines which sides of the spectrum to return.
For complex-valued inputs, the default is two-sided, for real-valued
inputs, default is one-sided Indicates whether to return a one-sided
or two-sided
normalize : boolean (optional)
Normalizes the PSD
Returns
-------
freqs, csd_est : ndarrays
The estimated CSD and the frequency points vector.
The CSD{i,j}(f) are returned in a square "matrix" of vectors
holding Sij(f). For an input array that is reshaped to (M,N),
the output is (M,M,N)
"""
s_shape = s.shape
s.shape = (-1, s_shape[-1])
# defining an Sk_loc is a little opaque, but it avoids having to
# reset the shape of any user-given Sk later on
if Sk is not None:
Sk_shape = Sk.shape
N = Sk.shape[-1]
Sk_loc = Sk.reshape(np.prod(Sk_shape[:-1]), N)
else:
if NFFT is not None:
N = NFFT
else:
N = s.shape[-1]
Sk_loc = fftpack.fft(s, n=N)
# reset s.shape
s.shape = s_shape
M = Sk_loc.shape[0]
# if the time series is a complex vector, a one sided PSD is invalid:
if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
sides = 'twosided'
elif sides in ('default', 'onesided'):
sides = 'onesided'
if sides == 'onesided':
# putative Nyquist freq
Fn = N // 2 + 1
# last duplicate freq
Fl = (N + 1) // 2
csd_pairs = np.zeros((M, M, Fn), 'D')
freqs = np.linspace(0, Fs / 2, Fn)
for i in range(M):
for j in range(i + 1):
csd_pairs[i, j, 0] = Sk_loc[i, 0] * Sk_loc[j, 0].conj()
csd_pairs[i, j, 1:Fl] = 2 * (Sk_loc[i, 1:Fl] *
Sk_loc[j, 1:Fl].conj())
if Fn > Fl:
csd_pairs[i, j, Fn - 1] = (Sk_loc[i, Fn - 1] *
Sk_loc[j, Fn - 1].conj())
else:
csd_pairs = np.zeros((M, M, N), 'D')
freqs = np.linspace(0, Fs / 2, N, endpoint=False)
for i in range(M):
for j in range(i + 1):
csd_pairs[i, j] = Sk_loc[i] * Sk_loc[j].conj()
if normalize:
csd_pairs /= (Fs*N)
csd_mat = csd_pairs.transpose(1,0,2).conj()
csd_mat += csd_pairs
diag_idc = (np.arange(M), np.arange(M))
csd_mat[diag_idc] /= 2
return freqs, csd_mat
def mtm_cross_spectrum(tx, ty, weights, sides='twosided'):
r"""
The cross-spectrum between two tapered time-series, derived from a
multi-taper spectral estimation.
Parameters
----------
tx, ty : ndarray (K, ..., N)
The complex DFTs of the tapered sequence
weights : ndarray, or 2-tuple or list
Weights can be specified as a length-2 list of weights for spectra tx
and ty respectively. Alternatively, if tx is ty and this function is
computing the spectral density function of a single sequence, the
weights can be given as an ndarray of weights for the spectrum.
Weights may be
* scalars, if the shape of the array is (K, ..., 1)
* vectors, with the shape of the array being the same as tx or ty
sides : str in {'onesided', 'twosided'}
For the symmetric spectra of a real sequence, optionally combine half
of the frequencies and scale the duplicate frequencies in the range
(0, F_nyquist).
Notes
-----
spectral densities are always computed as
:math:`S_{xy}^{mt}(f) = \frac{\sum_k
[d_k^x(f)s_k^x(f)][d_k^y(f)(s_k^y(f))^{*}]}{[\sum_k
d_k^x(f)^2]^{\frac{1}{2}}[\sum_k d_k^y(f)^2]^{\frac{1}{2}}}`
"""
N = tx.shape[-1]
if ty.shape != tx.shape:
raise ValueError('shape mismatch between tx, ty')
# pshape = list(tx.shape)
if isinstance(weights, (list, tuple)):
autospectrum = False
weights_x = weights[0]
weights_y = weights[1]
denom = (np.abs(weights_x) ** 2).sum(axis=0) ** 0.5
denom *= (np.abs(weights_y) ** 2).sum(axis=0) ** 0.5
else:
autospectrum = True
weights_x = weights
weights_y = weights
denom = (np.abs(weights) ** 2).sum(axis=0)
if sides == 'onesided':
# where the nyq freq should be
Fn = N // 2 + 1
truncated_slice = [slice(None)] * len(tx.shape)
truncated_slice[-1] = slice(0, Fn)
tsl = tuple(truncated_slice)
tx = tx[tsl]
ty = ty[tsl]
# if weights.shape[-1] > 1 then make sure weights are truncated too
if weights_x.shape[-1] > 1:
weights_x = weights_x[tsl]
weights_y = weights_y[tsl]
denom = denom[tsl[1:]]
sf = weights_x * tx
sf *= (weights_y * ty).conj()
sf = sf.sum(axis=0)
sf /= denom
if sides == 'onesided':
# dbl power at duplicated freqs
Fl = (N + 1) // 2
sub_slice = [slice(None)] * len(sf.shape)
sub_slice[-1] = slice(1, Fl)
sf[tuple(sub_slice)] *= 2
if autospectrum:
return sf.real
return sf
def multi_taper_psd(
s, Fs=2 * np.pi, NW=None, BW=None, adaptive=False,
jackknife=True, low_bias=True, sides='default', NFFT=None
):
"""Returns an estimate of the PSD function of s using the multitaper
method. If the NW product, or the BW and Fs in Hz are not specified
by the user, a bandwidth of 4 times the fundamental frequency,
corresponding to NW = 4 will be used.
Parameters
----------
s : ndarray
An array of sampled random processes, where the time axis is assumed to
be on the last axis
Fs : float
Sampling rate of the signal
NW : float
The normalized half-bandwidth of the data tapers, indicating a
multiple of the fundamental frequency of the DFT (Fs/N).
Common choices are n/2, for n >= 4. This parameter is unitless
and more MATLAB compatible. As an alternative, set the BW
parameter in Hz. See Notes on bandwidth.
BW : float
The sampling-relative bandwidth of the data tapers, in Hz.
adaptive : {True/False}
Use an adaptive weighting routine to combine the PSD estimates of
different tapers.
jackknife : {True/False}
Use the jackknife method to make an estimate of the PSD variance
at each point.
low_bias : {True/False}
Rather than use 2NW tapers, only use the tapers that have better than
90% spectral concentration within the bandwidth (still using
a maximum of 2NW tapers)
sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
This determines which sides of the spectrum to return.
For complex-valued inputs, the default is two-sided, for real-valued
inputs, default is one-sided Indicates whether to return a one-sided
or two-sided
Returns
-------
(freqs, psd_est, var_or_nu) : ndarrays
The first two arrays are the frequency points vector and the
estimated PSD. The last returned array differs depending on whether
the jackknife was used. It is either
* The jackknife estimated variance of the log-psd, OR
* The degrees of freedom in a chi2 model of how the estimated
PSD is distributed about the true log-PSD (this is either
2*floor(2*NW), or calculated from adaptive weights)
Notes
-----
The bandwidth of the windowing function will determine the number
tapers to use. This parameters represents trade-off between frequency
resolution (lower main lobe BW for the taper) and variance reduction
(higher BW and number of averaged estimates). Typically, the number of
tapers is calculated as 2x the bandwidth-to-fundamental-frequency
ratio, as these eigenfunctions have the best energy concentration.
"""
# have last axis be time series for now
N = s.shape[-1]
M = int(np.product(s.shape[:-1]))
if BW is not None:
# BW wins in a contest (since it was the original implementation)
norm_BW = np.round(BW * N / Fs)
NW = norm_BW / 2.0
elif NW is None:
# default NW
NW = 4
# (else BW is None and NW is not None) ... all set
Kmax = int(2 * NW)
# if the time series is a complex vector, a one sided PSD is invalid:
if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
sides = 'twosided'
elif sides in ('default', 'onesided'):
sides = 'onesided'
# Find the direct spectral estimators S_k(f) for k tapered signals..
# don't normalize the periodograms by 1/N as normal.. since the taper
# windows are orthonormal, they effectively scale the signal by 1/N
spectra, eigvals = tapered_spectra(
s, (NW, Kmax), NFFT=NFFT, low_bias=low_bias
)
NFFT = spectra.shape[-1]
K = len(eigvals)
# collapse spectra's shape back down to 3 dimensions
spectra.shape = (M, K, NFFT)
last_freq = NFFT // 2 + 1 if sides == 'onesided' else NFFT
# degrees of freedom at each timeseries, at each freq
nu = np.empty((M, last_freq))
if adaptive:
weights = np.empty((M, K, last_freq))
for i in range(M):
weights[i], nu[i] = utils.adaptive_weights(
spectra[i], eigvals, sides=sides
)
else:
# let the weights simply be the square-root of the eigenvalues.
# repeat these values across all n_chan channels of data
weights = np.tile(np.sqrt(eigvals), M).reshape(M, K, 1)
nu.fill(2 * K)
if jackknife:
jk_var = np.empty_like(nu)
for i in range(M):
jk_var[i] = utils.jackknifed_sdf_variance(
spectra[i], eigvals, sides=sides, adaptive=adaptive
)
# Compute the unbiased spectral estimator for S(f) as the sum of
# the S_k(f) weighted by the function w_k(f)**2, all divided by the
# sum of the w_k(f)**2 over k
# 1st, roll the tapers axis forward
spectra = np.rollaxis(spectra, 1, start=0)
weights = np.rollaxis(weights, 1, start=0)
sdf_est = mtm_cross_spectrum(
spectra, spectra, weights, sides=sides
)
sdf_est /= Fs
if sides == 'onesided':
freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
else:
freqs = np.linspace(0, Fs, NFFT, endpoint=False)
out_shape = s.shape[:-1] + (len(freqs),)
sdf_est.shape = out_shape
if jackknife:
jk_var.shape = out_shape
return freqs, sdf_est, jk_var
else:
nu.shape = out_shape
return freqs, sdf_est, nu
def multi_taper_csd(s, Fs=2 * np.pi, NW=None, BW=None, low_bias=True,
adaptive=False, sides='default', NFFT=None):
"""Returns an estimate of the Cross Spectral Density (CSD) function
between all (N choose 2) pairs of timeseries in s, using the multitaper
method. If the NW product, or the BW and Fs in Hz are not specified by
the user, a bandwidth of 4 times the fundamental frequency, corresponding
to NW = 4 will be used.
Parameters
----------
s : ndarray
An array of sampled random processes, where the time axis is
assumed to be on the last axis. If ndim > 2, the number of time
series to compare will still be taken as prod(s.shape[:-1])
Fs : float, Sampling rate of the signal
NW : float
The normalized half-bandwidth of the data tapers, indicating a
multiple of the fundamental frequency of the DFT (Fs/N).
Common choices are n/2, for n >= 4. This parameter is unitless
and more MATLAB compatible. As an alternative, set the BW
parameter in Hz. See Notes on bandwidth.
BW : float
The sampling-relative bandwidth of the data tapers, in Hz.
adaptive : {True, False}
Use adaptive weighting to combine spectra
low_bias : {True, False}
Rather than use 2NW tapers, only use the tapers that have better than
90% spectral concentration within the bandwidth (still using
a maximum of 2NW tapers)
sides : str (optional) [ 'default' | 'onesided' | 'twosided' ]
This determines which sides of the spectrum to return. For
complex-valued inputs, the default is two-sided, for real-valued
inputs, default is one-sided Indicates whether to return a one-sided
or two-sided
Returns
-------
(freqs, csd_est) : ndarrays
The estimatated CSD and the frequency points vector.
The CSD{i,j}(f) are returned in a square "matrix" of vectors
holding Sij(f). For an input array of (M,N), the output is (M,M,N)
Notes
-----
The bandwidth of the windowing function will determine the number
tapers to use. This parameters represents trade-off between frequency
resolution (lower main lobe BW for the taper) and variance reduction
(higher BW and number of averaged estimates). Typically, the number of
tapers is calculated as 2x the bandwidth-to-fundamental-frequency
ratio, as these eigenfunctions have the best energy concentration.
"""
# have last axis be time series for now
N = s.shape[-1]
M = int(np.product(s.shape[:-1]))
if BW is not None:
# BW wins in a contest (since it was the original implementation)
norm_BW = np.round(BW * N / Fs)
NW = norm_BW / 2.0
elif NW is None:
# default NW
NW = 4
# (else BW is None and NW is not None) ... all set
Kmax = int(2 * NW)
# if the time series is a complex vector, a one sided PSD is invalid:
if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':
sides = 'twosided'
elif sides in ('default', 'onesided'):
sides = 'onesided'
# Find the direct spectral estimators S_k(f) for k tapered signals..
# don't normalize the periodograms by 1/N as normal.. since the taper
# windows are orthonormal, they effectively scale the signal by 1/N
spectra, eigvals = tapered_spectra(
s, (NW, Kmax), NFFT=NFFT, low_bias=low_bias
)
NFFT = spectra.shape[-1]
K = len(eigvals)
# collapse spectra's shape back down to 3 dimensions
spectra.shape = (M, K, NFFT)
# compute the cross-spectral density functions
last_freq = NFFT // 2 + 1 if sides == 'onesided' else NFFT
if adaptive:
w = np.empty((M, K, last_freq))
nu = np.empty((M, last_freq))
for i in range(M):
w[i], nu[i] = utils.adaptive_weights(
spectra[i], eigvals, sides=sides
)
else:
weights = np.sqrt(eigvals).reshape(K, 1)
csd_pairs = np.zeros((M, M, last_freq), 'D')
for i in range(M):
if adaptive:
wi = w[i]
else:
wi = weights
for j in range(i + 1):
if adaptive:
wj = w[j]
else:
wj = weights
ti = spectra[i]
tj = spectra[j]
csd_pairs[i, j] = mtm_cross_spectrum(ti, tj, (wi, wj), sides=sides)
csdfs = csd_pairs.transpose(1,0,2).conj()
csdfs += csd_pairs
diag_idc = (np.arange(M), np.arange(M))
csdfs[diag_idc] /= 2
csdfs /= Fs
if sides == 'onesided':
freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)
else:
freqs = np.linspace(0, Fs, NFFT, endpoint=False)
return freqs, csdfs
def freq_response(b, a=1., n_freqs=1024, sides='onesided'):
"""
Returns the frequency response of the IIR or FIR filter described
by beta and alpha coefficients.
Parameters
----------
b : beta sequence (moving average component)
a : alpha sequence (autoregressive component)
n_freqs : size of frequency grid
sides : {'onesided', 'twosided'}
compute frequencies between [-PI,PI), or from [0, PI]
Returns
-------
fgrid, H(e^jw)
Notes
-----
For a description of the linear constant-coefficient difference equation,
see
http://en.wikipedia.org/wiki/Z-transform
"""
# transitioning to scipy freqz
real_n = n_freqs // 2 + 1 if sides == 'onesided' else n_freqs
return sig.freqz(b, a=a, worN=real_n, whole=sides != 'onesided')
|
import numpy as np
from numpy.testing import assert_array_equal
import sys, os
my_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, my_path + '/../')
import pyidi
def test_multiprocessing():
video = pyidi.pyIDI(cih_file='./data/data_synthetic.cih')
video.set_method(method='lk', int_order=1, roi_size=(9, 9))
points = np.array([
[ 31, 35],
[ 31, 215],
[ 31, 126],
[ 95, 71],
])
video.set_points(points)
video.method.configure(pbar_type='tqdm', multi_type='multiprocessing')
res_1 = video.get_displacements(processes=2, resume_analysis=False, autosave=False)
video.method.configure(pbar_type='atpbar', multi_type='mantichora')
res_2 = video.get_displacements(processes=2, resume_analysis=False, autosave=False)
assert_array_equal(res_1, res_2)
|
import datetime
import random
import hashlib
basic = {
'一元微积分',
'多元微积分',
'高等微积分',
'几何与代数',
'随机数学方法',
'概率论与数理统计',
'线性代数',
'复变函数引论',
'大学物理',
'数理方程引',
'数值分析',
'离散数学',
'离散数学(Ⅱ)',
'随机过程',
'应用随机过程',
'泛函分析',
'代数编码理论',
'初等数论与多项式',
'应用统计',
'工程图学基础',
'电路原理',
'电子基础',
'电子技术实验',
'电路分析',
'C语言程序设计',
'C++程序设计',
'Java程序设计',
'编译原理',
'操作系统',
'计算机网络',
'数据库原理',
'软件工程',
'软件系统设计',
'Python',
'Java面向对象程序设计',
'计算机系统结构',
'计算机网络及应用',
'计算机组成原理',
'计算机导论',
'大学计算机基础实践',
'数据结构',
'计算机网络与通信',
'微机原理及应用',
'电子技术基础',
'模拟电子技术基础',
'数据库原理',
'多媒体技术',
'计算机接口技术',
'电路与电子技术基础',
'Linux系统及应用'
}
advance = {
'微计算机技术',
'数字系统设计自动化',
'VLSI设计导论',
'网络编程与计算技术',
'通信电路',
'通信原理课组',
'网络安全',
'网格计算',
'高性能计算前沿技术',
'模式识别',
'数字图象处理',
'多媒体技术基础及应用',
'计算机图形学基础',
'计算机实时图形和动画技术',
'系统仿真与虚拟现实',
'现代控制技术',
'信息检索',
'电子商务平台及核心技术',
'数据挖掘',
'机器学习概论',
'人机交互理论与技术',
'人工神经网络',
'信号处理原理',
'系统分析与控制',
'媒体计算',
'形式语言与自动机',
'分布式数据库系统',
'算法分析与设计基础',
'面向对象技术及其应用',
'软件项目管理',
'信息检索技术',
'人工智能导论',
'高级数据结构',
'计算机动画的算法与技术',
'嵌入式系统',
'C++高级编程',
'单片机和嵌入式系统',
'数字系统集成化设计与综合',
'移动通信与卫星通信',
'遥感原理',
'图象处理系统',
'图象压缩',
'Windows操作系统原理与应用',
'专业英语',
'光纤应用技术A',
'光电子技术及其应用',
'微电子学概论',
'付立叶光学',
'激光与光电子技术实验',
'信号处理实验与设计',
'激光光谱',
'光检测技术',
'光传感技术',
'光电子CAD与仿真',
'量子电子学',
'非线性光学',
'真空技术',
'光通信技术',
'微电子新器件',
'微电子系统集成概论',
'量子信息学引论',
'语音信号处理',
'无线信号的光纤传输技术',
'初等数论',
'互联网信息处理',
'机器人智能控制',
'电子测量',
'电力电子系统设计',
'现代检测技术基础',
'智能仪表设计',
'生产系统计划与控制',
'过程控制',
'计算机图象处理与多媒体',
'现代电子技术',
'智能控制',
'过程控制系统',
'UNIX系统基础',
'离散时间信号处理',
'系统的可靠性及容错',
'电力电子电路的微机控制',
'非线性控制理论',
'电子商务概论',
'虚拟现实技术及其应用',
'智能优化算法及其应用',
'随机控制',
'控制专题',
'现场总线技术及其应用',
'数字视频基础与应用',
'嵌入式系统设计与应用',
'多维空间分布系统控制及信号处理杂谈',
'集成传感器',
'RF-CMOS电路设计',
'数/模混合集成电路',
'分子生物电子学',
'集成电路测试',
'纳电子材料',
'半导体材料与器件的表征和测量',
'模式识别与机器学习导论',
'自动控制原理',
'调节器与执行器',
'计算机控制系统',
'电机及电力拖动基础',
'集散控制系统实验',
'过程控制综合实践',
'电气控制技术',
'系统工程导论',
'先进控制理论与技术',
'自动化工程设计',
'企业供电',
'自动化导论',
'数字信号处理',
'电子信息技术导论',
'计算机图形学',
'信息安全',
'高级语言程序设计',
'Android移动终端开发',
'网页设计与网站建设',
'信号与系统',
'传感器与检测技术',
'单片机课程设计',
'数字电子技术基础',
'大学计算机基础与实践',
'大数据基础概论',
'Web系统与技术',
'强化学习',
'智能计算导论',
'并行程序设计',
'数据结构与算法分析',
'人工智能原理',
'机器学习',
'虚拟现实和数字孪生技术',
'智能传感器与控制系统',
'人工智能系统平台实训',
'智能系统综合设计',
'自动控制概论'
}
def course(user_id: int) -> str:
# 用qq、日期生成随机种子
# random_seed_str = str([user_id, datetime.date.today()])
# md5 = hashlib.md5()
# md5.update(random_seed_str.encode('utf-8'))
# random_seed = md5.hexdigest()
# random.seed(random_seed)
course_day = random.sample(basic, k=1)
course_day.extend(random.sample(advance, k=3))
course_t = str.join('》\n《', course_day)
result = f"今天要修行的课有:\n《{course_t}》"
return result
|
import deepspeed
import torch
from apex.optimizers import FusedAdam as Adam
from torch import distributed as dist
import mpu
from fp16 import FP16_Module, FP16_Optimizer, DynamicLossScaler
from learning_rates import AnnealingLR
from model import GLMModel, glm_get_params_for_weight_decay_optimization
from model import GLMForMultiTokenCloze, GLMForMultiTokenClozeFast, GLMForSingleTokenCloze, GLMForSequenceClassification
from model import PyTorchDistributedDataParallel as TorchDDP, DistributedDataParallel as LocalDDP
from model.modeling_bert import BertForMultipleChoice, BertForSequenceClassification
from utils import print_rank_0, get_checkpoint_name, get_checkpoint_iteration
def load_pretrained(model, checkpoint_path, args, task_tokens=None):
load_dir, tag, release, success = get_checkpoint_iteration(checkpoint_path)
checkpoint_name = get_checkpoint_name(load_dir, tag, release)
if mpu.get_data_parallel_rank() == 0:
print('global rank {} is loading pretrained model {}'.format(
torch.distributed.get_rank(), checkpoint_name))
# Load the checkpoint.
sd = torch.load(checkpoint_name, map_location='cpu')
if args.deepspeed:
model = model.module
if isinstance(model, TorchDDP):
model = model.module
if isinstance(model, FP16_Module):
model = model.module
if hasattr(model, "model"):
model = model.model
# Model.
def extend_embedding_weights(state_weights, model_weights):
original_length = state_weights.shape[0]
assert original_length <= args.max_position_embeddings + 1
new_weights = model_weights.clone()
new_weights[:original_length] = state_weights
return new_weights
if args.block_lm:
if "transformer.block_position_embeddings.weight" in sd["module"]:
position_weights = sd['module']["transformer.position_embeddings.weight"]
if args.max_position_embeddings + 1 > position_weights.shape[0]:
sd['module']["transformer.position_embeddings.weight"] = extend_embedding_weights(
position_weights, model.state_dict()["transformer.position_embeddings.weight"].data)
print_rank_0(f"Extend position embedding to {args.max_position_embeddings + 1}")
if "transformer.block_position_embeddings.weight" in sd["module"]:
block_position_weights = sd['module']["transformer.block_position_embeddings.weight"]
if args.max_position_embeddings + 1 > block_position_weights.shape[0]:
sd['module']["transformer.block_position_embeddings.weight"] = extend_embedding_weights(
block_position_weights,
model.state_dict()["transformer.block_position_embeddings.weight"].data)
print_rank_0(f"Extend block position embedding to {args.max_position_embeddings + 1}")
missing_keys, unexpected_keys = model.load_state_dict(sd['module'], strict=False)
if missing_keys or unexpected_keys:
print_rank_0(f"Missing keys {missing_keys}, unexpected keys {unexpected_keys}")
if args.continuous_prompt and args.prompt_init:
model.prompt_spell.init_embedding(model.word_embeddings.weight.data, task_tokens)
def get_model(args, model_type=None, multi_token=True, num_labels=None, spell_length=None):
"""Build the model."""
print_rank_0('building GPT2 model ...')
if args.pretrained_bert:
if model_type == "multiple_choice":
model = BertForMultipleChoice.from_pretrained(args.tokenizer_model_type,
cache_dir=args.cache_dir,
fp32_layernorm=args.fp32_layernorm,
fp32_embedding=args.fp32_embedding,
layernorm_epsilon=args.layernorm_epsilon)
elif model_type == "classification":
model = BertForSequenceClassification.from_pretrained(args.tokenizer_model_type,
cache_dir=args.cache_dir,
fp32_layernorm=args.fp32_layernorm,
fp32_embedding=args.fp32_embedding,
layernorm_epsilon=args.layernorm_epsilon,
num_labels=num_labels)
else:
raise NotImplementedError
else:
output_predict, paralle_output = True, True
if (model_type == "multiple_choice" or model_type == "classification") and not args.cloze_eval:
output_predict = False
if model_type is not None:
paralle_output = False
if spell_length is not None:
print_rank_0(f"Continuous spell length {spell_length}")
model = GLMModel(num_layers=args.num_layers,
vocab_size=args.vocab_size,
hidden_size=args.hidden_size,
num_attention_heads=args.num_attention_heads,
embedding_dropout_prob=args.hidden_dropout,
attention_dropout_prob=args.attention_dropout,
output_dropout_prob=args.hidden_dropout,
max_sequence_length=args.max_position_embeddings,
max_memory_length=args.mem_length,
checkpoint_activations=args.checkpoint_activations,
checkpoint_num_layers=args.checkpoint_num_layers,
parallel_output=paralle_output,
relative_encoding=args.transformer_xl,
block_position_encoding=args.block_lm and not args.masked_lm,
output_predict=output_predict,
spell_length=spell_length,
spell_func=args.prompt_func,
attention_scale=args.attention_scale)
if args.freeze_transformer:
model.freeze_transformer(tune_prefix_layers=args.tune_prefix_layers)
if model_type is not None:
if model_type == 'multiple_choice':
if args.cloze_eval:
if multi_token:
if args.fast_decode:
model = GLMForMultiTokenClozeFast(model, length_penalty=args.length_penalty)
else:
model = GLMForMultiTokenCloze(model, length_penalty=args.length_penalty)
else:
model = GLMForSingleTokenCloze(model, take_softmax=args.adapet)
else:
model = GLMForSequenceClassification(model, args.hidden_size, args.output_dropout, args.pool_token,
num_class=num_labels)
elif model_type == 'classification':
model = GLMForSequenceClassification(model, args.hidden_size, args.output_dropout, args.pool_token,
num_class=num_labels)
elif model_type == 'generation':
pass
else:
raise NotImplementedError(model_type)
if mpu.get_data_parallel_rank() == 0:
print(' > number of parameters on model parallel rank {}: {}'.format(
mpu.get_model_parallel_rank(),
sum([p.nelement() for p in model.parameters()])), flush=True)
# To prevent OOM for model sizes that cannot fit in GPU memory in full precision
if args.fp16:
model.half()
# GPU allocation.
model.cuda(torch.cuda.current_device())
# Fp16 conversion.
if args.fp16:
model = FP16_Module(model)
# Wrap model for distributed training.
if not args.deepspeed and (args.train_iters or args.epochs):
if args.DDP_impl == 'torch':
i = torch.cuda.current_device()
model = TorchDDP(model, device_ids=[i], output_device=i,
process_group=mpu.get_data_parallel_group())
elif args.DDP_impl == 'local':
model = LocalDDP(model)
else:
print_rank_0("Skip DDP model")
return model
def get_optimizer_param_groups(model):
# Build parameter groups (weight decay and non-decay).
while isinstance(model, (LocalDDP, TorchDDP, FP16_Module)):
model = model.module
param_groups = glm_get_params_for_weight_decay_optimization(model)
# Add model parallel attribute if it is not set.
for param_group in param_groups:
# print('## param_group', len(param_group['params']))
for param in param_group['params']:
if not hasattr(param, 'model_parallel'):
param.model_parallel = False
return param_groups
def get_optimizer(param_groups, args):
"""Set up the optimizer."""
if args.cpu_optimizer:
# Apex FusedAdam uses decoupled weight decay so use the same here
if args.cpu_torch_adam:
cpu_adam_optimizer = torch.optim.AdamW
else:
from deepspeed.ops.adam import DeepSpeedCPUAdam
cpu_adam_optimizer = DeepSpeedCPUAdam
optimizer = cpu_adam_optimizer(param_groups,
lr=args.lr, weight_decay=args.weight_decay)
else:
# Use FusedAdam.
if args.optimizer == 'adam':
optimizer = Adam(param_groups,
lr=args.lr,
weight_decay=args.weight_decay,
betas=(args.adam_beta1, args.adam_beta2),
eps=args.adam_eps)
elif args.optimizer == 'adafactor':
from transformers import Adafactor
optimizer = Adafactor(param_groups, lr=args.lr, relative_step=False, warmup_init=False)
else:
raise NotImplementedError
print(f'Optimizer = {optimizer.__class__.__name__}')
if hasattr(args, "deepspeed") and args.deepspeed:
raise NotImplementedError
# fp16 wrapper is not required for DeepSpeed.
# return optimizer
# Wrap into fp16 optimizer.
if args.fp16:
optimizer = FP16_Optimizer(optimizer,
static_loss_scale=args.loss_scale,
dynamic_loss_scale=args.dynamic_loss_scale,
dynamic_loss_args={
'scale_window': args.loss_scale_window,
'min_scale': args.min_scale,
'delayed_shift': args.hysteresis})
return optimizer
def get_learning_rate_scheduler(optimizer, args):
"""Build the learning rate scheduler."""
# Add linear learning rate scheduler.
if args.lr_decay_iters is not None:
num_iters = args.lr_decay_iters
else:
num_iters = args.train_iters
if args.finetune:
num_iters = num_iters // args.gradient_accumulation_steps
num_iters = max(1, num_iters)
init_step = -1
warmup_iter = args.warmup * num_iters
lr_scheduler = AnnealingLR(optimizer,
start_lr=args.lr,
warmup_iter=warmup_iter,
num_iters=num_iters - warmup_iter,
decay_style=args.lr_decay_style,
last_iter=init_step,
decay_ratio=args.lr_decay_ratio)
return lr_scheduler
def setup_model_and_optimizer(args, model_type=None, multi_token=True, num_labels=None, spell_length=None):
"""Setup model and optimizer."""
model = get_model(args, model_type=model_type, multi_token=multi_token, num_labels=num_labels,
spell_length=spell_length)
param_groups = get_optimizer_param_groups(model)
if args.train_data is not None or args.data_dir is not None and (args.epochs > 0 or args.train_iters > 0):
if args.deepspeed:
print_rank_0("DeepSpeed is enabled.")
model, optimizer, _, _ = deepspeed.initialize(
model=model,
model_parameters=param_groups,
args=args,
mpu=mpu,
dist_init_required=False
)
else:
optimizer = get_optimizer(param_groups, args)
lr_scheduler = get_learning_rate_scheduler(optimizer, args)
else:
optimizer, lr_scheduler = None, None
return model, optimizer, lr_scheduler
def backward_step(optimizer, model, lm_loss, args, timers):
"""Backward step."""
# Total loss.
loss = lm_loss
# Backward pass.
if args.deepspeed:
model.backward(loss)
else:
# optimizer.zero_grad()
if args.fp16:
optimizer.backward(loss, update_master_grads=False)
else:
loss.backward()
if args.deepspeed or args.DDP_impl == 'torch':
# DeepSpeed backward propagation already addressed all reduce communication.
# Reset the timer to avoid breaking timer logs below.
timers('allreduce').reset()
else:
timers('allreduce').start()
model.allreduce_params(reduce_after=False, fp32_allreduce=args.fp32_allreduce)
timers('allreduce').stop()
# Update master gradients.
if not args.deepspeed:
if args.fp16:
optimizer.update_master_grads()
# Clipping gradients helps prevent the exploding gradient.
if args.clip_grad > 0:
if not args.fp16:
mpu.clip_grad_norm(model.parameters(), args.clip_grad)
else:
optimizer.clip_master_grads(args.clip_grad)
return lm_loss
def see_memory_usage(message, force=False):
if not force:
return
dist.barrier()
if dist.get_rank() == 0:
print(message)
print("Memory Allocated ", torch.cuda.memory_allocated() / (1024 * 1024 * 1024), "GigaBytes")
print("Max Memory Allocated ", torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024), "GigaBytes")
print("Cache Allocated ", torch.cuda.memory_cached() / (1024 * 1024 * 1024), "GigaBytes")
print("Max cache Allocated ", torch.cuda.max_memory_cached() / (1024 * 1024 * 1024), "GigaBytes")
print(" ")
# input("Press Any Key To Continue ..")
def train_step(data_iterator, model, optimizer, lr_scheduler, args, timers, forward_step_func, mems=None,
single_step=False):
"""Single training step."""
lm_loss_total, count = 0.0, 0
mems = [] if mems is None else mems
if not args.deepspeed:
optimizer.zero_grad()
while True:
skipped_iter, complete = 0, False
# Forward model for one step.
timers('forward').start()
lm_loss, mems, _ = forward_step_func(data_iterator, model, args, timers, mems)
timers('forward').stop()
# print_rank_0("Forward step")
if not args.deepspeed:
lm_loss /= args.gradient_accumulation_steps
reduced_loss = lm_loss.detach().clone().view(1)
torch.distributed.all_reduce(reduced_loss.data, group=mpu.get_data_parallel_group())
reduced_loss.data = reduced_loss.data / (args.world_size / args.model_parallel_size)
if not DynamicLossScaler._has_inf_or_nan(reduced_loss):
lm_loss_total += reduced_loss
count += 1
# Calculate gradients, reduce across processes, and clip.
timers('backward').start()
backward_step(optimizer, model, lm_loss, args, timers)
timers('backward').stop()
# print_rank_0("Backward step")
# Update parameters.
timers('optimizer').start()
if args.deepspeed:
if model.is_gradient_accumulation_boundary():
model.step()
complete = True
if not (args.fp16 and optimizer.overflow):
lr_scheduler.step()
else:
skipped_iter = 1
else:
model.step()
else:
if count == args.gradient_accumulation_steps:
optimizer.step()
complete = True
# Update learning rate.
if not (args.fp16 and optimizer.overflow):
lr_scheduler.step()
else:
skipped_iter = 1
# print_rank_0("Optimizer step")
timers('optimizer').stop()
if complete:
break
else:
print_rank_0("Found NaN loss, skip backward")
del lm_loss, reduced_loss
mems = []
if single_step:
break
if args.deepspeed:
lm_loss_total = lm_loss_total / count
return lm_loss_total, skipped_iter, mems
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-02-18 10:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0430_briefofevidencedocument'),
('wildlifecompliance', '0427_merge_20200214_1614'),
]
operations = [
]
|
import os
from typing import Optional, Mapping, Callable, Union, Tuple, List
from functools import lru_cache
from torch.utils.data import Dataset
import torch
import numpy as np
import h5py
from scipy.integrate import cumtrapz
import matplotlib.pyplot as plt
Sample = Mapping[str, Union[np.ndarray, torch.Tensor, List]]
Transform = Callable[[Sample], Sample]
class TimeSeriesDataset(Dataset):
"""
Dataset for generic time series
"""
def __init__(self,
dataset_path: str,
nb_groups: Optional[int] = None,
nb_parameters: Optional[int] = 1,
transform: Optional[Transform] = None,
group_offset = 0):
"""
Constructor
:param dataset_path: path to the hdf5 dataset file.
:param nb_groups: number of groups to be used in the dataset
:param nb_parameters: number of parameter (i.e. number of time series for a single sample)
:param transform: transform to be applied on a sample.
:param group_offset: groups will only be read from the specified offset (starting at 0 by default)
"""
# User-defined parameters
if os.path.exists(dataset_path):
self.dataset_path = dataset_path
else:
raise OSError(f"File {dataset_path} has not been found.")
if nb_parameters not in [1, 2, 3, 4, 5, 6]:
raise ValueError("The number of parameters shall be between 1 and 6.")
else:
self.nb_parameters = nb_parameters
self.nb_groups = nb_groups
self.group_offset = group_offset
self.transform = transform
# Internal parameters
self.idx_to_address = []
self.dataloader_mode = True
# Automatic configuration
self.array_per_group = {}
self.load_dataset()
def load_dataset(self):
"""
This method is called once at dataset initialization and sets internal parameters of the dataset.
"""
print("loding dataset from disk...")
total_samples = 0
read_groups = 0
with h5py.File(self.dataset_path, "r") as fobj:
for igroup, group in enumerate(fobj.keys()):
if self.nb_groups is not None and igroup < self.group_offset:
continue
if self.nb_groups is not None and read_groups > self.nb_groups - 1:
break
read_groups += 1
array = np.array(fobj[group]["data"])
nb_samples = array.shape[0] // self.nb_parameters
self.array_per_group[group] = array
# Set the address for this range of index
self.idx_to_address.append((total_samples + nb_samples - 1, group, nb_samples))
# Update for the next loop
total_samples += nb_samples
def dataloader(self, value=True):
"""
Set dataloader mode.
"""
self.dataloader_mode = value
return self
# @lru_cache(maxsize=128)
def get_data_array(self, group):
"""
Get data array from specified group.
This method is cached for efficiency.
"""
# print("read group: {}".format(group))
# with h5py.File(self.dataset_path, "r") as fobj:
# array = np.array(fobj[group]["data"])
# if np.any(np.isnan(array)):
# raise ValueError(f"NaN in dataset: dataset = {self.dataset_path} and group = {group}")
# return array
return self.array_per_group[group]
def __len__(self) -> int:
"""
Return number of samples in dataset.
"""
total_count = 0
for _, _, nb_samples in self.idx_to_address:
total_count += nb_samples
return total_count
def __getitem__(self, idx: int) -> Sample:
"""
Return the sample at specified index.
"""
# Get the group and index
offset = 0
idx_in_group = None
for (idx_high, group, nb_samples) in self.idx_to_address:
if idx <= idx_high:
idx_in_group = (idx + offset) * self.nb_parameters
break
else:
offset -= nb_samples
if idx_in_group is None:
raise IndexError(f"Request index {idx} of a dataset with {len(self)} items.")
# Create sample
array = self.get_data_array(group)
# if np.any(np.isnan(array)):
# print(group, self.dataset_path)
# assert False
sample = {
"data": array[idx_in_group:idx_in_group+self.nb_parameters],
"metrics": None, "meta": None, "series_at_order": {},
"address": [[group, idx_in_group, self.dataset_path]]}
# Transform
if self.transform:
sample = self.transform(sample)
if self.dataloader_mode:
sample.pop("address")
sample.pop("meta")
sample.pop("series_at_order")
# Basic assertions
# if np.any(np.isnan(sample["data"].numpy())):
# print(sample)
# assert False
# assert ~np.any(np.isnan(sample["metrics"].numpy()))
return sample
class Normalize():
def __call__(self, sample):
sample["data"] = (sample["data"] + 1.) / 2.
return sample
class RandomTimeWindow():
def __init__(self, output_size, low=None):
self.output_size = output_size
self.low = low
def __call__(self, sample):
if self.low is None:
low = 0
high = sample["data"].shape[1] - self.output_size - 1
if low >= high:
idx = 0
else:
idx = np.random.randint(low, high)
else:
idx = self.low
sample["data"] = sample["data"][:, idx:idx + self.output_size]
return sample
class Smoothing():
def __init__(self, kernel):
self._kernel = kernel
def __call__(self, sample):
kernel = np.ones(self._kernel) / self._kernel
for i in range(sample["data"].shape[0]):
sample["data"][i] = np.convolve(sample["data"][i], kernel ,mode="same")
return sample
class AddScaledChannel():
def __call__(self, sample):
data = sample["data"]
nb_channels = data.shape[0]
nb_parameters = data.shape[1]
nb_time_steps = data.shape[2]
scaled = AddScaledChannel.scale(data, channel=0)
sample["data"] = np.concatenate((data, scaled))
return sample
@staticmethod
def scale(data, channel=0):
local_data = data[channel]
nb_parameters = local_data.shape[0]
nb_time_steps = local_data.shape[1]
min_ = local_data.min(axis=1)
max_ = local_data.max(axis=1)
den = max_ - min_
den[np.abs(den) <= 1.e-8] = 1.0
den = np.tile(den, (nb_time_steps, 1)).T
min_ = np.tile(min_, (nb_time_steps, 1)).T
scaled = (local_data - min_) / den
return scaled.reshape(1, nb_parameters, nb_time_steps)
class AddDerivativeChannel():
def __init__(self, from_channel=0, scaling=True, replace=False):
self.scaling = scaling
self.from_channel = from_channel
self.replace = replace
def __call__(self, sample):
data = sample["data"]
unscaled_derivatives = AddDerivativeChannel.derivative(data, channel=self.from_channel, scaling=False)
if self.scaling:
scaled_derivatives = AddDerivativeChannel.derivative(data, channel=self.from_channel, scaling=True)
if self.replace:
data = np.concatenate((unscaled_derivatives, scaled_derivatives), axis=0)
else:
data = np.concatenate((data, unscaled_derivatives, scaled_derivatives), axis=0)
else:
if self.replace:
data = unscaled_derivatives
else:
data = np.concatenate((data, unscaled_derivatives), axis=0)
sample["data"] = data
return sample
@staticmethod
def derivative(data, channel=0, scaling=True):
unscaled = data[channel]
nb_parameters = data.shape[1]
nb_time_steps = data.shape[2]
derivatives = []
for i in range(nb_parameters):
derivative = np.gradient(unscaled[i], np.linspace(0., 1., nb_time_steps), edge_order=2)
derivative = derivative.reshape(1, 1, -1)
derivatives.append(derivative)
derivatives = np.concatenate(derivatives, axis=1)
if scaling:
derivatives = AddScaledChannel.scale(derivatives, channel=channel)
return derivatives
def time_condition_first_met(sample, t, condition):
# the purpose of a array is to store 1.0 values when condition is true
a = np.zeros(sample.shape)
a[np.where(condition)] = 1.0
# we select the index at which condition is first met with argmax and then
# calculate the corresponding time
ta = t[a.argmax(axis=1)]
# If condition is never met, we set time to a negative value (instant is out of the window)
ta[~np.any(condition, axis=1)] = -1.0
return ta
def compute_metrics(array, t, metrics=None):
"""
The shape of input array is (N, M) where N is the number of parameters and M the number of time samples
Returns:
- a numpy array with all computed metrics
- a list of metadata for the metric (for plotting purposes)
"""
if metrics is None:
metrics = ["min", "max", "mean", "std", "argmin", "argmax", ">0.5", "<0.5", ">mean", "<mean", "@t=0.5"]
all_metrics = []
meta = []
# Loop on metrics
for metric in metrics:
if metric == "min":
min_ = array.min(axis=1)
all_metrics.append(min_)
meta.append({"label": "min","axis": "y", "color": "blue"})
elif metric == "mean":
mean = array.mean(axis=1)
all_metrics.append(mean)
meta.append({"label": "mean","axis": "y", "color": "green"})
elif metric == "max":
max_ = array.max(axis=1)
all_metrics.append(max_)
meta.append({"label": "max","axis": "y", "color": "red"})
elif metric == "std":
std = array.std(axis=1)
all_metrics.append(std)
meta.append({"label": "std","axis": "y", "color": "magenta"})
elif metric == "argmin":
argmin = t[array.argmin(axis=1)]
all_metrics.append(argmin)
meta.append({"label": "argmin","axis": "x", "color": "blue"})
elif metric == "argmax":
argmax = t[array.argmax(axis=1)]
all_metrics.append(argmax)
meta.append({"label": "argmax","axis": "x", "color": "red"})
elif metric == ">0.5":
t_above_0 = time_condition_first_met(array, t, array > 0.5)
all_metrics.append(t_above_0)
meta.append({"label": ">0.5","axis": "x", "color": "cyan"})
elif metric == "<0.5":
t_below_0 = time_condition_first_met(array, t, array < 0.5)
all_metrics.append(t_below_0)
meta.append({"label": "<0.5","axis": "x", "color": "purple"})
elif metric == ">0":
t_above_0 = time_condition_first_met(array, t, array > 0)
all_metrics.append(t_above_0)
meta.append({"label": ">0","axis": "x", "color": "cyan"})
elif metric == "<0":
t_below_0 = time_condition_first_met(array, t, array < 0)
all_metrics.append(t_below_0)
meta.append({"label": "<0","axis": "x", "color": "purple"})
elif metric == ">mean":
mean_ = np.tile(array.mean(axis=1), array.shape[1]).reshape(array.shape[1], -1).T
t_above_mean = time_condition_first_met(array, t, array > mean_)
all_metrics.append(t_above_mean)
meta.append({"label": ">mean","axis": "x", "color": "orange"})
elif metric == "<mean":
mean_ = np.tile(array.mean(axis=1), array.shape[1]).reshape(array.shape[1], -1).T
t_below_mean = time_condition_first_met(array, t, array < mean_)
all_metrics.append(t_below_mean)
meta.append({"label": "<mean","axis": "x", "color": "green"})
elif metric == "@t=0.5":
y_at_05 = 0.5 * (array[:, array.shape[1] // 2 - 1] + array[:, array.shape[1] // 2])
all_metrics.append(y_at_05)
meta.append({"label": "@t=0.5","axis": "y", "color": "lime"})
# Return
return np.concatenate(all_metrics), meta
class Metrics():
def __init__(self, metrics=None, orders=None, kernel_size=11):
if orders is None:
self.orders = [0]
else:
self.orders = orders
self.metrics = metrics
self.kernel_size = kernel_size
def __call__(self, sample):
all_metrics = []
all_meta = []
n, m = sample["data"].shape
t = np.linspace(0., 1., m)
sample_orders = {0: sample["data"]}
series_at_order = {}
if self.kernel_size > 0:
shift = self.kernel_size // 2
else:
shift = 0
metrics = []
# We apply a rolling average using a convolution to smooth the sample before derivating
convolved = []
for i in range(n):
if self.kernel_size > 0:
convolved.append(np.convolve(sample_orders[0][i], np.ones(self.kernel_size)/self.kernel_size, 'same'))
else:
convolved.append(sample_orders[0][i])
if self.kernel_size > 0:
convolved = np.vstack(convolved)[:, shift:-shift]
series_at_order[0] = (t[shift:-shift], convolved)
else:
convolved = np.vstack(convolved)
series_at_order[0] = (t, convolved)
if 0 in self.orders:
metrics.append((0, sample["data"], t))
if 1 in self.orders or 2 in self.orders:
# Calculate first derivative
gradient = []
for i in range(n):
if self.kernel_size > 0:
gradient.append(np.gradient(convolved[i], t[shift:-shift], edge_order=2))
else:
gradient.append(np.gradient(convolved[i], t, edge_order=2))
den = gradient[-1].max() - gradient[-1].min()
if den <= 1.e-8:
den = 1.0
gradient[-1] = (gradient[-1] - gradient[-1].min()) / den
sample_orders[1] = np.vstack(gradient)
if self.kernel_size > 0:
series_at_order[1] = (t[shift:-shift], sample_orders[1])
if 1 in self.orders:
metrics.append((1, sample_orders[1], t[shift:-shift]))
else:
series_at_order[1] = (t, sample_orders[1])
if 1 in self.orders:
metrics.append((1, sample_orders[1], t))
if 2 in self.orders:
# Calculate second derivative
gradient = []
for i in range(n):
if self.kernel_size > 0:
gradient.append(np.gradient(sample_orders[1][i], t[shift:-shift], edge_order=2))
else:
gradient.append(np.gradient(sample_orders[1][i], t, edge_order=2))
den = gradient[-1].max() - gradient[-1].min()
if den <= 1.e-8:
den = 1.0
gradient[-1] = (gradient[-1] - gradient[-1].min()) / den
sample_orders[2] = np.vstack(gradient)
if self.kernel_size > 0:
series_at_order[2] = (t[shift:-shift], sample_orders[2])
metrics.append((2, sample_orders[2], t[shift:-shift]))
else:
series_at_order[2] = (t, sample_orders[2])
metrics.append((2, sample_orders[2], t))
if -1 in self.orders or -2 in self.orders:
# Calculate first integral
integral = [ ]
for i in range(n):
if self.kernel_size > 0:
integral.append(cumtrapz(convolved[i], t[shift:-shift], initial=0.))
else:
integral.append(cumtrapz(convolved[i], t, initial=0.))
den = integral[-1].max() - integral[-1].min()
if den <= 1.e-8:
den = 1.0
integral[-1] = (integral[-1] - integral[-1].min()) / den
sample_orders[-1] = np.vstack(integral)
if self.kernel_size > 0:
series_at_order[-1] = (t[shift:-shift], sample_orders[-1])
if -1 in self.orders:
metrics.append((-1, sample_orders[-1], t[shift:-shift]))
else:
series_at_order[-1] = (t, sample_orders[-1])
if -1 in self.orders:
metrics.append((-1, sample_orders[-1], t))
if -2 in self.orders:
# Calculate second integral
integral = [ ]
for i in range(n):
if self.kernel_size > 0:
integral.append(cumtrapz(sample_orders[-1][i], t[shift:-shift], initial=0.))
else:
integral.append(cumtrapz(sample_orders[-1][i], t, initial=0.))
den = integral[-1].max() - integral[-1].min()
if den <= 1.e-8:
den = 1.0
integral[-1] = (integral[-1] - integral[-1].min()) / den
sample_orders[-2] = np.vstack(integral)
if self.kernel_size > 0:
series_at_order[-2] = (t[shift:-shift], sample_orders[-2])
metrics.append((-2, sample_orders[-2], t[shift:-shift]))
else:
series_at_order[-2] = (t, sample_orders[-2])
metrics.append((-2, sample_orders[-2], t))
# Compute the metrics for each order
for order, v_array, t_array in metrics:
results, meta = compute_metrics(v_array, t_array, self.metrics)
all_metrics.append(results)
for m in meta:
m["order"] = order
all_meta.extend(meta)
sample["metrics"] = np.concatenate(all_metrics)
sample["meta"] = all_meta
sample["series_at_order"] = series_at_order
sample["data"] = sample["data"].reshape(1, sample["data"].shape[0], sample["data"].shape[1])
return sample
class ToTensor():
def __call__(self, sample):
has_channels = (len(sample["data"].shape) == 3)
if not has_channels:
sample["data"] = torch.from_numpy(sample["data"].astype(np.float32)).view(1, sample["data"].shape[0], -1)
else:
sample["data"] = torch.from_numpy(sample["data"].astype(np.float32))
# print(sample["data"].shape)
sample["metrics"] = torch.from_numpy(sample["metrics"].astype(np.float32))
return sample
class Compose():
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, sample):
for transform in self.transforms:
sample = transform(sample)
return sample
def plot_sample(sample, order=0, ncols=3, fig_width=28, prediction=None, row_height=4, legend_size="x-small"):
# Number of series
n = sample["data"].shape[1]
if n < ncols:
ncols = n
nrows = int(np.ceil(float(n) / float(ncols)))
# print(prediction.shape)
# print(sample["metrics"].shape)
# Generate a figure with a grid layout. The number of columns is fixed, the number of rows depends
# on the number of series
fig = plt.figure(figsize=(fig_width, row_height * nrows))
x = np.linspace(0., 1., sample["data"].shape[2])
if order == 0:
y = sample["data"][0]
else:
x = sample["series_at_order"][order][0]
y = sample["series_at_order"][order][1]
for i in range(n):
ax = fig.add_subplot(nrows, ncols, i+1)
ax.plot(x, y[i], label="series", c="k")
# ax.plot(np.linspace(0., 1., sample["data"].shape[2]), sample["data"][2][i], c="k", ls=":")
# if order == 0:
# ax.plot(sample["series_at_order"][0][0], sample["series_at_order"][0][1][i], c="k", ls="--", label="convolved")
for j, meta in enumerate(sample["meta"]):
if meta["order"] != order:
continue
if meta["axis"] == "y":
ax.axhline(sample["metrics"][j*n+i], label=meta["label"], c=meta["color"])
if prediction is not None:
ax.axhline(prediction[j*n+i], label=meta["label"] + " predicted", c=meta["color"], ls=":")
elif meta["axis"] == "x":
ax.axvline(sample["metrics"][j*n+i], label=meta["label"], c=meta["color"], ls="--")
if prediction is not None:
ax.axvline(prediction[j*n+i], label=meta["label"] + " predicted", c=meta["color"], ls="-.")
ax.legend(fontsize=legend_size, bbox_to_anchor=(1.04,0.5), loc="center left")
#ax.set_ylim((-0.1, 1.1))
ax.grid()
|
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from task import Task
from twisted.internet.defer import inlineCallbacks, TimeoutError, failure, returnValue
from twisted.internet import reactor
from common.utils.asleep import asleep
from voltha.extensions.omci.database.mib_db_dict import *
from voltha.extensions.omci.omci_entities import OntData
from voltha.extensions.omci.omci_defs import AttributeAccess
AA = AttributeAccess
class AlarmCopyException(Exception):
pass
class AlarmDownloadException(Exception):
pass
class AlarmResyncException(Exception):
pass
class AlarmResyncTask(Task):
"""
OpenOMCI ALARM resynchronization Task
This task should get a copy of the ALARM and compare compare it to a
copy of the database. When the ALARM Upload command is sent to the ONU,
it should make a copy and source the data requested from this database.
The ONU can still source AVC's and the the OLT can still send config
commands to the actual.
"""
task_priority = 240
name = "ALARM Resynchronization Task"
max_retries = 3
retry_delay = 7
max_alarm_upload_next_retries = 3
alarm_upload_next_delay = 10 # Max * delay < 60 seconds
def __init__(self, omci_agent, device_id):
"""
Class initialization
:param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
:param device_id: (str) ONU Device ID
"""
super(AlarmResyncTask, self).__init__(AlarmResyncTask.name,
omci_agent,
device_id,
priority=AlarmResyncTask.task_priority,
exclusive=False)
self._local_deferred = None
self._device = omci_agent.get_device(device_id)
self._db_active = MibDbVolatileDict(omci_agent)
self._db_active.start()
def cancel_deferred(self):
super(AlarmResyncTask, self).cancel_deferred()
d, self._local_deferred = self._local_deferred, None
try:
if d is not None and not d.called:
d.cancel()
except:
pass
def start(self):
"""
Start ALARM Re-Synchronization task
"""
super(AlarmResyncTask, self).start()
self._local_deferred = reactor.callLater(0, self.perform_alarm_resync)
self._db_active.start()
self._db_active.add(self.device_id)
def stop(self):
"""
Shutdown ALARM Re-Synchronization task
"""
self.log.debug('stopping')
self.cancel_deferred()
self._device = None
self._db_active.stop()
self._db_active = None
super(AlarmResyncTask, self).stop()
def stop_if_not_running(self):
if not self.running:
raise AlarmResyncException('Resync Task was cancelled')
@inlineCallbacks
def perform_alarm_resync(self):
"""
Perform the ALARM Resynchronization sequence
The sequence to be performed is:
- get a copy of the current ALARM database
- perform ALARM upload commands to get ONU's database and save this
to a local DB.
During the alarm upload process, the maximum time between alarm upload next
requests is 1 minute.
"""
self.log.info('perform-alarm-resync')
try:
command_sequence_number = yield self.snapshot_alarm()
# Start the ALARM upload sequence, save alarms to the table
commands_retrieved, alarm_table = yield self.upload_alarm(command_sequence_number)
if commands_retrieved < command_sequence_number:
e = AlarmDownloadException('Only retrieved {} of {} instances'.
format(commands_retrieved, command_sequence_number))
self.deferred.errback(failure.Failure(e))
self.deferred.callback(
{
'commands_retrieved': commands_retrieved,
'alarm_table': alarm_table
})
except Exception as e:
self.log.exception('resync', e=e)
self.deferred.errback(failure.Failure(e))
@inlineCallbacks
def snapshot_alarm(self):
"""
Snapshot the ALARM on the ONU and create a copy of our local ALARM database
:return: (pair) (command_sequence_number)
"""
command_sequence_number = None
try:
max_tries = AlarmResyncTask.max_retries - 1
for retries in xrange(0, max_tries + 1):
# Send ALARM Upload so ONU snapshots its ALARM
try:
command_sequence_number = yield self.send_alarm_upload()
self.stop_if_not_running()
if command_sequence_number is None:
if retries >= max_tries:
break
except TimeoutError as e:
self.log.warn('timeout', e=e)
if retries >= max_tries:
raise
yield asleep(AlarmResyncTask.retry_delay)
self.stop_if_not_running()
continue
except Exception as e:
self.log.exception('alarm-resync', e=e)
raise
# Handle initial failures
if command_sequence_number is None:
raise AlarmCopyException('Failed to snapshot ALARM copy after {} retries'.
format(AlarmResyncTask.max_retries))
returnValue(command_sequence_number)
@inlineCallbacks
def send_alarm_upload(self):
"""
Perform ALARM upload command and get the number of entries to retrieve
:return: (int) Number of commands to execute or None on error
"""
########################################
# Begin ALARM Upload
try:
results = yield self._device.omci_cc.send_get_all_alarm()
self.stop_if_not_running()
command_sequence_number = results.fields['omci_message'].fields['number_of_commands']
if command_sequence_number is None or command_sequence_number <= 0:
raise ValueError('Number of commands was {}'.format(command_sequence_number))
returnValue(command_sequence_number)
except TimeoutError as e:
self.log.warn('alarm-resync-get-timeout', e=e)
raise
@inlineCallbacks
def upload_alarm(self, command_sequence_number):
########################################
# Begin ALARM Upload
seq_no = None
for seq_no in xrange(command_sequence_number):
max_tries = AlarmResyncTask.max_alarm_upload_next_retries
alarm_class_id = {}
alarm_entity_id = {}
attributes = {}
for retries in xrange(0, max_tries):
try:
response = yield self._device.omci_cc.get_all_alarm_next(seq_no)
self.stop_if_not_running()
omci_msg = response.fields['omci_message'].fields
alarm_class_id[seq_no] = omci_msg['alarmed_entity_class']
alarm_entity_id[seq_no] = omci_msg['alarmed_entity_id']
# Filter out the 'alarm_data_sync' from the database. We save that at
# the device level and do not want it showing up during a re-sync
# during data comparison
if alarm_class_id[seq_no] == OntData.class_id:
break
attributes[seq_no] = omci_msg['alarm_bit_map']
# Save to the database
self._db_active.set(self.device_id, alarm_class_id[seq_no],
alarm_entity_id[seq_no], attributes[seq_no])
break
except TimeoutError:
self.log.warn('alarm-resync-timeout', seq_no=seq_no,
command_sequence_number=command_sequence_number)
if retries < max_tries - 1:
yield asleep(AlarmResyncTask.alarm_upload_next_delay)
else:
raise
except Exception as e:
self.log.exception('resync', e=e, seq_no=seq_no,
command_sequence_number=command_sequence_number)
returnValue((seq_no + 1, alarm_class_id, alarm_entity_id, attributes)) # seq_no is zero based and alarm table.
|
import requests
payload = {'lat': '12.2334', 'lon': '39.23467' , 'crime_id' : '3'}
req = requests.post("http://192.168.1.2/AutomatedDrone/api/location.php", data=payload)
print(req.text)
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import RequestFactory, TestCase
from vs_listener.views import ListenerView
class ListenerViewTest(TestCase):
def test_missing_secret(self):
request = RequestFactory().post('/listener')
response = ListenerView.as_view()(request)
self.assertEqual(response.status_code, 403)
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import sys
from tests.op_test import OpTest
import paddle
import paddle.fluid as fluid
paddle.enable_static()
SEED = 2021
class TestReduceSum(OpTest):
def setUp(self):
np.random.seed(SEED)
self.set_npu()
self.init_dtype()
self.place = paddle.CustomPlace('ascend', 0)
self.init_op_type()
self.initTestCase()
self.use_mkldnn = False
self.attrs = {
'dim': self.axis,
'keep_dim': self.keep_dim,
'reduce_all': self.reduce_all
}
self.inputs = {'X': np.random.random(self.shape).astype(self.dtype)}
if self.attrs['reduce_all']:
self.outputs = {'Out': self.inputs['X'].sum()}
else:
self.outputs = {
'Out': self.inputs['X'].sum(axis=self.axis,
keepdims=self.attrs['keep_dim'])
}
def set_npu(self):
self.__class__.use_custom_device = True
def init_dtype(self):
self.dtype = np.float32
def init_op_type(self):
self.op_type = "reduce_sum"
self.use_mkldnn = False
self.keep_dim = False
self.reduce_all = False
def initTestCase(self):
self.shape = (5, 6)
self.axis = (0, ) # self.axis = (0) will failed
def test_check_output(self):
self.check_output_with_place(self.place)
# TODO(ascendrc): Add grad test
# def test_check_grad(self):
# if self.dtype == np.float16:
# return
# self.check_grad(['X'], 'Out')
#
class TestReduceSum2(OpTest):
def init_dtype(self):
self.dtype = np.int32
# TODO(windstamp)
@unittest.skipIf(True, "Right now failed maybe caused by other reasons")
class TestReduceSumNet(unittest.TestCase):
def set_reduce_sum_function(self, x):
# keep_dim = False
return paddle.fluid.layers.reduce_sum(x, dim=-1)
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
a_np = np.random.random(size=(2, 3, 4)).astype('float32')
b_np = np.random.random(size=(2, 3, 4)).astype('float32')
label_np = np.random.randint(2, size=(2, 1)).astype('int64')
with paddle.static.program_guard(main_prog, startup_prog):
a = paddle.static.data(name="a", shape=[2, 3, 4], dtype='float32')
b = paddle.static.data(name="b", shape=[2, 3, 4], dtype='float32')
label = paddle.static.data(
name="label", shape=[2, 1], dtype='int64')
a_1 = fluid.layers.fc(input=a, size=4, num_flatten_dims=2, act=None)
b_1 = fluid.layers.fc(input=b, size=4, num_flatten_dims=2, act=None)
z = paddle.add(a_1, b_1)
z_1 = self.set_reduce_sum_function(z)
prediction = fluid.layers.fc(input=z_1, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.reduce_mean(cost)
sgd = fluid.optimizer.SGD(learning_rate=0.01)
sgd.minimize(loss)
if run_npu:
place = paddle.CustomPlace('ascend', 0)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
print("Start run on {}".format(place))
for epoch in range(100):
pred_res, loss_res = exe.run(
main_prog,
feed={"a": a_np,
"b": b_np,
"label": label_np},
fetch_list=[prediction, loss])
if epoch % 10 == 0:
print("Epoch {} | Prediction[0]: {}, Loss: {}".format(
epoch, pred_res[0], loss_res))
return pred_res, loss_res
def test_npu(self):
cpu_pred, cpu_loss = self._test(False)
npu_pred, npu_loss = self._test(True)
self.assertTrue(np.allclose(npu_pred, cpu_pred))
self.assertTrue(np.allclose(npu_loss, cpu_loss))
# TODO(windstamp)
@unittest.skipIf(True, "Right now failed maybe caused by other reasons")
class TestReduceSumNet2(TestReduceSumNet):
def set_reduce_sum_function(self, x):
# keep_dim = True
return paddle.fluid.layers.reduce_sum(x, dim=-1, keep_dim=True)
# TODO(windstamp)
@unittest.skipIf(True, "Right now failed maybe caused by other reasons")
class TestReduceSumNet3(TestReduceSumNet):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
a_np = np.random.random(size=(2, 3, 4)).astype('float32')
b_np = np.random.random(size=(2, 3, 4)).astype('float32')
with paddle.static.program_guard(main_prog, startup_prog):
a = paddle.static.data(name="a", shape=[2, 3, 4], dtype='float32')
b = paddle.static.data(name="b", shape=[2, 3, 4], dtype='float32')
z = paddle.add(a, b)
loss = fluid.layers.reduce_sum(z)
sgd = fluid.optimizer.SGD(learning_rate=0.01)
sgd.minimize(loss)
if run_npu:
place = paddle.CustomPlace('ascend', 0)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
print("Start run on {}".format(place))
for epoch in range(100):
loss_res = exe.run(main_prog,
feed={"a": a_np,
"b": b_np},
fetch_list=[loss])
if epoch % 10 == 0:
print("Epoch {} | Loss: {}".format(epoch, loss_res))
return loss_res, loss_res
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyramid.view import view_config, view_defaults
from pyramid.httpexceptions import HTTPFound, HTTPOk
from pyramid.httpexceptions import HTTPNotFound, HTTPBadRequest, HTTPRequestTimeout
#
# Network Interface Card の 情報を取得
#
import netifaces
from netaddr import IPAddress
#
# BACnet 接続用ドライバ
#
from driver.bacnet import BACnetd
from driver.bacnet import BACnetSimpleClient
#
# Start BACnetd
#
@view_config(route_name='api::bacnetd:start', renderer='json')
def start(request):
#
# NIC の 情報を取得
#
interface = 'lo0'
if 'interface' in request.GET:
interface = request.GET['interface']
#
# NIC の 情報取得
#
bacnet_address = None
try:
#
# NIC から IPv4 アドレスの取得
#
iface_data = netifaces.ifaddresses(interface)
ipv4 = iface_data.get(netifaces.AF_INET)
if not ipv4 == None:
prefix = IPAddress(ipv4[0]['netmask']).netmask_bits()
bacnet_address = '%s/%d' %(ipv4[0]['addr'], prefix)
#
# NIC の情報が見つからなかった場合の処理
#
except ValueError:
return HTTPBadRequest()
#
# BACnet アドレスが定義されていない場合
#
if bacnet_address == None:
return HTTPBadRequest()
#
# BACnet Daemon が 起動しているか確認
#
if request.registry.bacnetd == None:
#
# BACnet Daemon の 起動
#
request.registry.bacnetd = BACnetd(bacnet_address)
request.registry.bacnetd.start()
#
# BACnet Daemon 起動結果を返す
#
return HTTPOk()
|
import datetime
class Employee:
num_of_employees = 0
raise_amount = 1.04
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = f"{first}.{last}@company.com"
Employee.num_of_employees += 1
def fullname(self):
return f"{self.first} {self.last}"
def apply_raise(self):
self.pay = int(self.pay * self.raise_amount)
@classmethod
def set_raise_amount(cls, amount):
cls.raise_amount = amount
# NOTE: Sometimes, classmethod can also be used as a constructor.
@classmethod
def from_string(cls, emp_str):
first, last, pay = emp_str.split('-')
return cls(first, last, pay)
# NOTE: Simply put, if we are not using any class variable, we set it
# as a static method.
@staticmethod
def is_workday(day):
# NOTE:
# Monday, ... , Saturday, Sunday
# 0, ... , 5, 6
if day.weekday() >= 5:
return False
return True
emp_1 = Employee("Corey", "Schafer", 100)
emp_2 = Employee("Anton", "Michael", 100)
emp_str_3 = 'John-Rascal-200'
emp_3 = Employee.from_string(emp_str_3)
Employee.set_raise_amount(1.05)
print(Employee.raise_amount)
print(emp_1.raise_amount)
print(emp_2.raise_amount)
print(emp_3.raise_amount)
dt = datetime.date(2016, 7, 10)
print(Employee.is_workday(dt))
dt = datetime.date(2016, 7, 11)
print(Employee.is_workday(dt))
|
#!/usr/bin/env python3
# Day 1: Single Number
#
# Given a non-empty array of integers, every element appears twice except for one. Find that single one.
class Solution:
def singleNumber(self, nums: [int]) -> int:
seen = []
for num in nums:
if num in seen:
seen.remove(num)
else:
seen.append(num)
return seen[0]
# Tests
assert Solution().singleNumber([2,2,1]) == 1
assert Solution().singleNumber([4,1,2,1,2]) == 4
|
# -*- coding: utf-8 -*_
#
# Copyright (c) 2020, Pureport, Inc.
# All Rights Reserved
from __future__ import absolute_import
from click import argument
from pureport_client.commands import (
CommandBase,
AccountsMixin
)
from pureport_client.util import JSON
class Command(AccountsMixin, CommandBase):
"""Manage Pureport account invites
"""
def list(self):
"""Get all invites for the provided account.
\f
:returns: a list of AccountInvite objects
:rtype: list
"""
return self.client.find_account_invites()
@argument('invite_id')
def get(self, invite_id):
"""Get an account's invite with the provided account invite id.
\f
:param invite_id: the ID of the invite to retrieve
:type invite_id: str
:returns: an AccountInvite object
:rtype: dict
"""
return self.__call__('get', 'invites/{}'.format(invite_id))
@argument('invite', type=JSON)
def create(self, invite):
"""Create an account invite using the provided account.
\f
:param invite: an AccountInvite object
:type invit: dict
:returns: an AccountInvite object
:rtype: dict
"""
return self.__call__('post', 'invites', json=invite)
@argument('invite', type=JSON)
def update(self, invite):
"""Update an account invite using the provided account.
\f
:param invite: an AccountInvite object
:type invite: dict
:returns: an AccountInvite object
:rtype: dict
"""
return self.__call__('put', 'invites/{id}'.format(**invite), json=invite)
@argument('invite_id')
def delete(self, invite_id):
"""Delete an account invite using the provided account.
\f
:param invite_id: the ID of the invite to retrieve
:type invite_id: str
:returns: None
"""
self.__call__('delete', 'invites/{}'.format(invite_id))
|
from random import randrange
from discord import Member,Role,utils
from .errors import *
class Games:
"""
A game class with different games.
Parameters
-----------
debugnum = Optional[int=None]
This is for debugging purposes.
"""
def __init__(self,debugnum=None):
self.debugnum = debugnum
self.numheads = 0
self.numtails = 0
def flipCoin(self):
"""
Flips a coin.
Returns
--------
**Returns** string ('heads'/'tails')
"""
val = randrange(0,2) if self.debugnum == None else self.debugnum
if(val == 0):
return 'heads'
else:
return 'tails'
def flipCoinGuess(self,guess):
"""
Flips a coin, and compares it against a guess
Parameters
-----------
guess : (str)
A string indicating the guess ('heads'/'tails')
Raises
-------
BadInput
This is if the guess is not correctly formatted.
Returns
-------
**Returns** A tuple (bool,str,str), the bool indicating winning or
losing. The first string to generate a win/lose message. The last
string with what the result was.
"""
side = self.flipCoin()
if('s' not in guess.lower()): #head->heads, tail->tails
guess += 's'
if(guess.lower() not in ['heads','tails']):
raise BadInput("Please use heads/tails")
if(side == guess.lower()):
return (True, "It was {}! You won.".format(side),side)
return (False,"It was {}! You lost.".format(side),side)
def rollDice(self):
"""
Rolls a dice.
Returns
--------
**Returns** An int between 1 and 6.
"""
return randrange(1,7)
|
n = "Hello"
# Your function here!
def string_function(s):
return s + 'world'
print(string_function(n))
|
# Generated by Django 3.0.3 on 2020-02-24 14:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0008_auto_20200223_1339'),
]
operations = [
migrations.CreateModel(
name='AttestationProducer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('period', models.CharField(max_length=7)),
('deadline', models.DateField()),
('producer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Entity')),
],
options={
'verbose_name': 'Attestation de Durabilité',
'verbose_name_plural': 'Attestations de Durabilité',
'db_table': 'producer_attestations',
},
),
]
|
import tensorflow.keras as keras
def BCDU_net_D3(input_size = (384,384,3)):
N = input_size[0]
#inputs1 = keras.layers.Input((256, 256, 1))
inputs1 = keras.layers.Input(input_size)
conv1 = keras.layers.Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs1)
conv1 = keras.layers.Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = keras.layers.Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = keras.layers.Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = keras.layers.Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = keras.layers.Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
drop3 = keras.layers.Dropout(0.5)(conv3)
pool3 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3)
# D1
conv4 = keras.layers.Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4_1 = keras.layers.Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4_1 = keras.layers.Dropout(0.5)(conv4_1)
# D2
conv4_2 = keras.layers.Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(drop4_1)
conv4_2 = keras.layers.Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4_2)
conv4_2 = keras.layers.Dropout(0.5)(conv4_2)
# D3
merge_dense = keras.layers.concatenate([conv4_2,drop4_1], axis = 3)
conv4_3 = keras.layers.Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge_dense)
conv4_3 = keras.layers.Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4_3)
drop4_3 = keras.layers.Dropout(0.5)(conv4_3)
up6 = keras.layers.Conv2DTranspose(256, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(drop4_3)
up6 = keras.layers.BatchNormalization(axis=3)(up6)
up6 = keras.layers.Activation('relu')(up6)
x1 = keras.layers.Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(drop3)
x2 = keras.layers.Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(up6)
merge6 = keras.layers.concatenate([x1,x2], axis = 1)
merge6 = keras.layers.ConvLSTM2D(filters = 128, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge6)
conv6 = keras.layers.Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = keras.layers.Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = keras.layers.Conv2DTranspose(128, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv6)
up7 = keras.layers.BatchNormalization(axis=3)(up7)
up7 = keras.layers.Activation('relu')(up7)
x1 = keras.layers.Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(conv2)
x2 = keras.layers.Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(up7)
merge7 = keras.layers.concatenate([x1,x2], axis = 1)
merge7 = keras.layers.ConvLSTM2D(filters = 64, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge7)
conv7 = keras.layers.Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = keras.layers.Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = keras.layers.Conv2DTranspose(64, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv7)
up8 = keras.layers.BatchNormalization(axis=3)(up8)
up8 = keras.layers.Activation('relu')(up8)
x1 = keras.layers.Reshape(target_shape=(1, N, N, 64))(conv1)
x2 = keras.layers.Reshape(target_shape=(1, N, N, 64))(up8)
merge8 = keras.layers.concatenate([x1,x2], axis = 1)
merge8 = keras.layers.ConvLSTM2D(filters = 32, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge8)
conv8 = keras.layers.Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = keras.layers.Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
conv8 = keras.layers.Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
conv9 = keras.layers.Conv2D(1, 1, activation = 'sigmoid')(conv8)
model=model = keras.models.Model(inputs = inputs1, outputs = conv9)
return model
|
"""Ensure that sort functions handle reasonably large test cases including
random, sorted, reversed, and identical data"""
from random import randint
import pytest
import algorithms.src.merge_sort as merge_sort
import algorithms.src.quick_sort as quick_sort
sorts = [merge_sort.out_of_place, merge_sort.in_place,
quick_sort.out_of_place, quick_sort.in_place]
@pytest.fixture
def random_sample():
return [randint(-100, 100) for _ in range(100)]
@pytest.fixture
def sorted_sample(random_sample):
return list(sorted(random_sample))
@pytest.fixture
def reversed_sample(random_sample):
return list(reversed(sorted(random_sample)))
@pytest.fixture
def identical_sample():
sample = randint(-100, 100)
return [sample for _ in range(100)]
def test_simple_sort():
input = [1, 5, 2, 5, 7, 3]
output = [1, 2, 3, 5, 5, 7]
for sort_func in sorts:
assert sort_func(input) == output
def test_large_random_sample(random_sample):
result = sorted(random_sample)
for sort_func in sorts:
assert sort_func(random_sample) == result
def test_large_sorted_sample(sorted_sample):
for sort_func in sorts:
assert sort_func([s for s in sorted_sample]) == sorted_sample
def test_large_reversed_sample(reversed_sample):
result = sorted(reversed_sample)
for sort_func in sorts:
assert sort_func(reversed_sample) == result
def test_large_identical_sample(identical_sample):
for sort_func in sorts:
assert sort_func([i for i in identical_sample]) == identical_sample
|
#!/usr/bin/env python3
import fire
import json
import os
import sys
import numpy as np
import tensorflow as tf
import codecs
import base64
import model, sample, encoder
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
def interact_model(
model_name='',
seed=None,
length=40,
temperature=0.5,
top_k=15,
models_dir='models',
conversation='subject: hello'
):
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join('models', model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
np.random.seed(seed)
tf.compat.v1.set_random_seed(seed)
context = tf.compat.v1.placeholder(tf.int32, [1, None])
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=1,
temperature=temperature, top_k=top_k
)
print(conversation)
while True:
saver = tf.compat.v1.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name))
saver.restore(sess, ckpt)
message = None
while not message:
message = input("other: ")
conversation = base64.b64decode(message).decode("utf-8")
sys.stdout.write("subject: ")
sys.stdout.flush()
encoded_conversation = enc.encode(conversation)
result = sess.run(output, feed_dict={
context: [encoded_conversation]
})[:, len(encoded_conversation):]
text = enc.decode(result[0])
splits = text.split('\n')
reply = splits[0]
sys.stdout.write(base64.b64encode(reply.encode("utf-8")).decode("utf-8"))
sys.stdout.write('\n')
sys.stdout.flush()
if __name__ == '__main__':
fire.Fire(interact_model)
|
import re
def process_token(token: str):
"""Makes sure a given token is valid for usage within the Discord API.
Args:
token (:class:`str`): The token to be processed.
Returns:
:class:`str`: The modified, adapted token.
Examples:
.. testsetup::
from serpcord.utils import process_token
.. doctest::
>>> process_token("Bot XXX") # normal Bot token
'Bot XXX'
>>> process_token("Bearer XXX") # normal Bearer token
'Bearer XXX'
>>> process_token("bot XXX") # corrects case
'Bot XXX'
>>> process_token("beaREr XXX") # corrects case
'Bearer XXX'
>>> process_token("XXX") # defaults to Bot token
'Bot XXX'
"""
if token.startswith("Bot ") or token.startswith("Bearer "):
return token
elif re.match(r"^bot ", token, flags=re.I):
return re.sub("^bot ", "Bot ", token, flags=re.I)
elif re.match(r"^bearer ", token, flags=re.I):
return re.sub("^bearer ", "Bearer ", token, flags=re.I)
else:
return f"Bot {token}"
|
import sympy.physics.mechanics as me
import sympy as sm
import math as m
import numpy as np
x1, x2 = me.dynamicsymbols('x1 x2')
f1 = x1*x2+3*x1**2
f2 = x1*me.dynamicsymbols._t+x2*me.dynamicsymbols._t**2
x, y = me.dynamicsymbols('x y')
xd, yd = me.dynamicsymbols('x y', 1)
yd2 = me.dynamicsymbols('y', 2)
q1, q2, q3, u1, u2 = me.dynamicsymbols('q1 q2 q3 u1 u2')
p1, p2 = me.dynamicsymbols('p1 p2')
p1d, p2d = me.dynamicsymbols('p1 p2', 1)
w1, w2, w3, r1, r2 = me.dynamicsymbols('w1 w2 w3 r1 r2')
w1d, w2d, w3d, r1d, r2d = me.dynamicsymbols('w1 w2 w3 r1 r2', 1)
r1d2, r2d2 = me.dynamicsymbols('r1 r2', 2)
c11, c12, c21, c22 = me.dynamicsymbols('c11 c12 c21 c22')
d11, d12, d13 = me.dynamicsymbols('d11 d12 d13')
j1, j2 = me.dynamicsymbols('j1 j2')
n = sm.symbols('n')
n = sm.I
|
# Copyright 2021 Tony Wu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import logging
import re
import time
import uuid
from abc import ABC, abstractmethod
from contextlib import contextmanager
from datetime import date, datetime, timezone
from functools import wraps
from operator import attrgetter, itemgetter
from sqlite3 import Connection as SQLite3Connection
from typing import (Any, Callable, Dict, List, Optional, Set, Tuple, Type,
TypeVar, Union, overload)
from urllib.parse import urlencode
import simplejson as json
import udatetime
import unidecode
from sqlalchemy import MetaData, Table, create_engine, event, types
from sqlalchemy.engine import Connection, Engine, Result
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.associationproxy import (AssociationProxy,
AssociationProxyInstance)
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import (Mapper, Query, RelationshipProperty, Session,
aliased, declarative_base, declared_attr,
relationship, scoped_session, sessionmaker,
with_polymorphic)
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.properties import ColumnProperty
from sqlalchemy.schema import (DDL, Column, ForeignKey, Index,
PrimaryKeyConstraint, UniqueConstraint)
from sqlalchemy.sql.expression import FunctionElement, column, select, table
from sqlalchemy.sql.functions import count
from sqlalchemy.sql.selectable import \
LABEL_STYLE_TABLENAME_PLUS_COL as LS_TABLE_COL
from sqlalchemy.types import CHAR, INTEGER, TypeDecorator
metadata = MetaData(
naming_convention={
'ix': 'ix_%(table_name)s_%(column_0_N_name)s',
'uq': 'uq_%(table_name)s_%(column_0_N_name)s',
'ck': 'ck_%(table_name)s_%(column_0_N_name)s',
'fk': 'fk_%(table_name)s_%(column_0_N_name)s_%(referred_table_name)s',
'pk': 'pk_%(table_name)s',
},
)
RESTRICT = 'RESTRICT'
CASCADE = 'CASCADE'
R = TypeVar('R')
T = TypeVar('T', bound='Identity')
U = TypeVar('U', bound='Identity')
InferrableSelectable = Union[Type[T], InstrumentedAttribute]
MappedSelectable = Union[Type[T], Mapper]
SessionFactory = None
__version__ = Table('__version__', metadata, Column('version', types.String(), primary_key=True))
def exclusive_to(classname: str, default=()):
def wrapper(f):
@wraps(f)
def wrapped(cls):
if cls.__name__ == classname:
return f(cls)
return default
return wrapped
return wrapper
class UUIDType(TypeDecorator):
impl = CHAR
@property
def python_type(self):
return uuid.UUID
def load_dialect_impl(self, dialect):
return dialect.type_descriptor(CHAR(36))
def process_bind_param(self, value: Optional[uuid.UUID | str], dialect) -> Optional[str]:
if value is None:
return None
return str(value)
def process_result_value(self, value: Optional[str], dialect) -> Optional[uuid.UUID]:
if value is None:
return None
if not isinstance(value, uuid.UUID):
return uuid.UUID(value)
return value
class unixepoch(FunctionElement):
type = INTEGER
@compiles(unixepoch, 'sqlite')
def sqlite_utcnow(element, compiler, **kwargs):
return "CAST((julianday('now') - 2440587.5) * 86400000 AS INTEGER)"
def ensure_datetime(o):
if o is None:
return None
if isinstance(o, datetime):
return o
if isinstance(o, date):
return datetime.combine(o, datetime.min.time(), tzinfo=timezone.utc)
if isinstance(o, (int, float)):
return udatetime.fromtimestamp(o, tz=udatetime.TZFixedOffset(0))
if isinstance(o, date):
o = datetime.combine(o, datetime.min.time())
try:
return udatetime.from_string(o)
except Exception:
return o
class TimestampType(TypeDecorator):
impl = INTEGER
@property
def python_type(self):
return datetime
def process_bind_param(self, value: Optional[int | float | datetime], dialect) -> Optional[float]:
value = ensure_datetime(value)
if value is None:
return None
if isinstance(value, (int, float)):
return int(value * 1000)
try:
return int(value.astimezone(timezone.utc).timestamp() * 1000)
except AttributeError:
pass
raise TypeError(f'expected datetime.datetime object, not {type(value)}')
def process_result_value(self, value: Optional[int], dialect) -> Optional[datetime]:
if value is None:
return None
return udatetime.utcfromtimestamp(value / 1000)
@event.listens_for(Engine, 'connect')
def sqlite_features(conn, conn_record):
if isinstance(conn, SQLite3Connection):
with conn:
conn.execute('PRAGMA foreign_keys=ON;')
conn.execute('PRAGMA journal_mode=WAL;')
Base = declarative_base(metadata=metadata)
class Identity(Base):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not getattr(self, 'uuid4', None):
self.uuid4 = uuid.uuid4()
@classmethod
def _parent_mapper(cls):
for c in cls.mro()[1:]:
if issubclass(c, Identity):
return c
@classmethod
def __init_subclass__(cls, **kwargs) -> None:
super().__init_subclass__(**kwargs)
supercls = cls._parent_mapper()
cls.id: int = Column(types.Integer(), ForeignKey(supercls.id, ondelete=CASCADE, onupdate=RESTRICT), primary_key=True)
id: int = Column(types.Integer(), primary_key=True, autoincrement=True)
uuid4: uuid.UUID = Column(UUIDType())
model: str = Column(types.String())
ctime: datetime = Column(TimestampType(), nullable=False, server_default=unixepoch())
@declared_attr
def __mapper_args__(cls):
args = {
'polymorphic_identity': cls.__name__,
}
if cls.__name__ == 'Identity':
args['polymorphic_on'] = cls.model
return args
@declared_attr
@exclusive_to('Identity')
def __table_args__(cls):
return (
Index(None, 'id', 'uuid4', unique=True),
Index(None, 'uuid4', unique=True),
)
@property
def ident(self) -> Identity:
pk = self.id
if not pk and not self.uuid4:
raise ValueError
if not pk:
return self.uuid4
return pk
def discriminator(self, sep='#') -> str:
return f'{type(self).__name__}{sep}{self.ident}'
def to_dict(self) -> Dict[str, Any]:
return {k: getattr(self, k) for k in self.__reflection__.columns}
def __str__(self):
return self.discriminator()
def __repr__(self) -> str:
return f'<{self.discriminator()} at {hex(id(self))}>'
__reflection__: Reflection
@classmethod
def _init_reflection(cls):
cls.__reflection__ = Reflection(cls)
class BundleABC(ABC):
def __init__(self, path, *, echo=False, readonly=False, thread_safe=True):
db_url = f'sqlite:///{path}'
params = {}
if readonly:
params['mode'] = 'ro'
if not thread_safe:
params['check_same_thread'] = False
if params:
db_url = f'{db_url}?{urlencode(params)}'
engine = create_engine(db_url, echo=echo, json_serializer=JSON_ENCODER.encode)
self._metadata: MetaData = metadata
self._engine: Engine = engine
set_session(scoped_session(sessionmaker(bind=engine, autoflush=False, future=True)))
self._init_logging()
self._init_events()
self._verify_version()
metadata.create_all(engine)
self._init_version()
def _init_logging(self):
self.log = logging.getLogger('rdb.bundle')
self.log_timing = logging.getLogger('rdb.bundle.timing')
logging.getLogger('sqlalchemy.engine.Engine').handlers.clear()
@property
@abstractmethod
def version(self) -> str:
raise NotImplementedError
def _verify_version(self):
stmt = __version__.select()
try:
row = self.execute(stmt).fetchone()
ver = row and row[0]
except OperationalError:
ver = None
if not ver:
stmt = select([count(column('name'))]).select_from(table('sqlite_master')).where(column('type') == 'table')
table_count = self.execute(stmt).fetchone()[0]
if table_count:
raise DatabaseNotEmptyError()
return
if ver != self.version:
raise DatabaseVersionError(ver, self.version)
def _init_version(self):
self.execute('INSERT OR REPLACE INTO __version__ (version) VALUES (?);', (self.version,))
self.commit()
def _init_events(self):
event.listen(Engine, 'before_cursor_execute', self._on_before_cursor_execute)
event.listen(Engine, 'after_cursor_execute', self._on_after_cursor_execute)
@overload
def __getitem__(self, key: int | str | uuid.UUID) -> Optional[T]:
...
@overload
def __getitem__(self, key: Tuple[Type[U], int | str | uuid.UUID]) -> Optional[U]:
...
def __getitem__(self, key):
if isinstance(key, tuple):
model, ident = key
else:
model = None
ident = key
if ident is None:
return None
if isinstance(ident, str):
ident = uuid.UUID(ident)
if model is None:
model = Identity
if isinstance(ident, uuid.UUID):
return self.session.query(model).filter_by(uuid4=ident).one()
return self.session.get(model, ident)
def id(self, uid: uuid.UUID | str) -> Optional[int]:
uid = str(uid)
return self.query(Identity.id).filter(Identity.uuid4 == uid).scalar()
def uid(self, id_: int) -> Optional[int]:
return self.query(Identity.uuid4).filter(Identity.id == id_).scalar()
def ids(self, uids: List[uuid.UUID]) -> Dict[uuid.UUID, int]:
uids = [str(u) for u in uids]
return dict(self.query(Identity.uuid4, Identity.id).filter(Identity.uuid4.in_(uids)).all())
def uids(self, ids: List[int]) -> Dict[int, uuid.UUID]:
return dict(self.query(Identity.id, Identity.uuid4).filter(Identity.id.in_(ids)).all())
def _on_before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
conn.info.setdefault('query_start_time', []).append(time.time())
def _on_after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
total = time.time() - conn.info['query_start_time'].pop(-1)
self.log_timing.debug(f'Total Time: {total}')
@property
def session(self) -> Session:
return get_session()
@property
def conn(self) -> Connection:
return self.session.connection()
def execute(self, stmt, *args, **kwargs) -> Result:
return self.conn.execute(stmt, *args, **kwargs)
@property
def query(self):
return self.session.query
@property
def flush(self):
return self.session.flush
@property
def commit(self):
return self.session.commit
@property
def rollback(self):
return self.session.rollback
@property
def __contains__(self):
return self.session.__contains__
@property
def __iter__(self):
return self.session.__iter__
def get_session(**kwargs) -> Session:
return SessionFactory(**kwargs)
def set_session(scs):
global SessionFactory
SessionFactory = scs
def del_session():
SessionFactory.remove()
class DatabaseVersionError(ValueError):
def __init__(self, identified, expected):
super().__init__(f'Database version "{identified}" is different from supported version "{expected}".')
class DatabaseNotEmptyError(ValueError):
def __init__(self):
super().__init__('Database is not empty.')
def json_format(o):
if isinstance(o, datetime):
return o.isoformat()
return str(o)
JSON_ENCODER = json.JSONEncoder(for_json=True, iterable_as_array=True, default=json_format)
class Relationship:
cached = {}
lookup: Dict[Tuple[Type[T], str], Table] = {}
@classmethod
def get_table_name(cls, src: str, dst: str) -> str:
return f'_g_{src}_{dst}'.lower()
@classmethod
def register(cls, fwd: str, rev: str, join: Table | Type[T]):
cls.cached[join.name] = join
if not isinstance(join, Table):
join = join.__table__
cls.lookup[join, fwd] = 0
cls.lookup[join, rev] = 1
@classmethod
def table(cls, rel: InferrableSelectable) -> Table:
if isinstance(rel, InstrumentedAttribute):
return rel.parent.get_property(rel.key).secondary
@classmethod
def direction(cls, rel: InferrableSelectable) -> int:
if isinstance(rel, InstrumentedAttribute):
return cls.lookup[cls.table(rel), rel.key]
@classmethod
def create_table(cls, name):
return Table(
name, metadata,
Column('id', types.Integer(), primary_key=True, autoincrement=True),
Column('src', types.Integer(), ForeignKey(Identity.id, ondelete=CASCADE, onupdate=RESTRICT), nullable=False),
Column('dst', types.Integer(), ForeignKey(Identity.id, ondelete=CASCADE, onupdate=RESTRICT), nullable=False),
Index(None, 'src', 'dst', unique=True),
)
@classmethod
def two_way(cls, tables: Dict[str, str | Type[T]], **kwargs):
(src_attr, src_model), (dst_attr, dst_model) = sorted(tables.items())
table_name = cls.get_table_name(src_attr, dst_attr)
join_table = cls.cached.get(table_name)
if join_table is None:
join_table = cls.create_table(table_name)
cls.register(src_attr, dst_attr, join_table)
return {
dst_attr: relationship(
dst_model, back_populates=src_attr, secondary=join_table,
primaryjoin=Identity.id == join_table.c.src,
secondaryjoin=Identity.id == join_table.c.dst,
cascade_backrefs=False, **kwargs,
),
src_attr: relationship(
src_model, back_populates=dst_attr, secondary=join_table,
primaryjoin=Identity.id == join_table.c.dst,
secondaryjoin=Identity.id == join_table.c.src,
cascade_backrefs=False, **kwargs,
),
}
class FTS5:
def __init__(self):
self.ident = Identity.__reflection__
self.polymorph = with_polymorphic(Identity, '*')
self.sessionmaker: Callable[[], Session]
self.selectable = Query(self.polymorph).statement.set_label_style(LS_TABLE_COL)
self.columns = self.indexed_columns()
self.rowid_c = self.translated(self.ident.mapper.c.id)
self.model_c = self.translated(self.ident.mapper.c.model)
self.idx_t = self.idx_table(self.ident.mapper)
self.idx_p = aliased(Identity, self.idx_t, adapt_on_names=True)
@property
def session(self) -> Session:
return self.sessionmaker()
@property
def initialized(self) -> bool:
return hasattr(self, 'sessionmaker')
@property
def view_name(self):
return 'identity_view'
@property
def idx_name(self):
return 'identity_idx'
def indexed_columns(self) -> List[Column]:
return [c for c in self.selectable.subquery().c]
def translated(self, target: Column) -> Column:
for col in self.selectable.subquery().c:
if col.base_columns == target.base_columns:
return col
def idx_table(self, mapper: Mapper) -> Table:
columns = []
for c in mapper.columns:
translated = self.translated(c)
args = [translated.key, c.type]
if translated.foreign_keys:
for foreign_key in translated.foreign_keys:
args.append(ForeignKey(foreign_key.column))
columns.append(Column(*args, key=c.key, primary_key=c.primary_key))
return Table(
self.idx_name, metadata,
Column('identity_idx', types.String(), key='master'),
*columns,
keep_existing=True,
)
def polymorphic_view(self) -> DDL:
template = """
CREATE VIEW IF NOT EXISTS %(name)s
AS %(select)s
"""
info = {
'name': self.view_name,
'select': self.selectable.compile(),
}
return DDL(template % info)
def fts_virtual_table(self) -> DDL:
template = """
CREATE VIRTUAL TABLE IF NOT EXISTS %(name)s
USING fts5(%(columns)s, content=%(view_name)s, content_rowid=%(rowid_name)s)
"""
info = {
'name': self.idx_name,
'columns': ', '.join([c.key for c in self.columns]),
'view_name': self.view_name,
'rowid_name': self.rowid_c.key,
}
return DDL(template % info)
def init(self, sessionmaker: Callable[[], Session]):
self.sessionmaker = sessionmaker
session = self.session
view = self.polymorphic_view()
fts = self.fts_virtual_table()
view.execute(session.bind)
fts.execute(session.bind)
event.listen(session, 'before_flush', self.preflush_delete)
event.listen(session, 'after_flush', self.postflush_update)
session.commit()
def preflush_delete(self, session: Session, context, instances):
ids = [str(item.id) for item in [*session.dirty, *session.deleted]]
stmt = """
INSERT INTO %(name)s(%(name)s, rowid, %(columns)s)
SELECT 'delete', %(rowid_name)s, * FROM %(view_name)s
WHERE %(rowid_name)s IN (%(ids)s)
"""
info = {
'name': self.idx_name,
'columns': ', '.join([c.key for c in self.columns]),
'view_name': self.view_name,
'rowid_name': self.rowid_c.key,
'ids': ', '.join(ids),
}
session.execute(stmt % info)
def postflush_update(self, session: Session, context):
ids = [str(item.id) for item in [*session.new, *session.dirty]]
stmt = """
INSERT INTO %(name)s(rowid, %(columns)s)
SELECT %(rowid_name)s, * FROM %(view_name)s
WHERE %(rowid_name)s IN (%(ids)s)
"""
info = {
'name': self.idx_name,
'columns': ', '.join([c.key for c in self.columns]),
'view_name': self.view_name,
'rowid_name': self.rowid_c.key,
'ids': ', '.join(ids),
}
session.execute(stmt % info)
def destroy(self, session: Optional[Session] = None):
session = session or self.session
session.execute(f'DROP TABLE IF EXISTS {self.idx_name}')
session.execute(f'DROP VIEW IF EXISTS {self.view_name}')
session.commit()
def rebuild(self):
session = self.session
session.execute(f"INSERT INTO {self.idx_name}({self.idx_name}) VALUES('rebuild');")
session.commit()
def query(self, q: Optional[str] = None) -> Query:
clause = self.idx_t.c.id.isnot(None)
if q is not None:
clause = clause & self.idx_t.c.master.op('match')(q)
return self.session.query(self.idx_p).filter(clause)
def tokenized(self, q: Optional[str] = None) -> str:
if q is None:
return None
return slugify(q, sep='* ') + '*'
def search(self, q: Optional[str] = None) -> Query:
return self.query(self.tokenized(q))
def instanceof(self, model: Type[T], q: Optional[str] = None) -> Query:
desc = [m.entity.__name__ for m in model.__mapper__.self_and_descendants]
targets = ' OR '.join([f'{self.model_c.key}:{d}' for d in desc])
if q is not None:
query = f'({targets}) AND {slugify(q, sep="* ")}*'
else:
query = f'({targets})'
return self.query(query)
@contextmanager
def using_mapper(self, model: Type[T]):
try:
metadata.remove(self.idx_t)
self.idx_t = self.idx_table(inspect(model))
self.idx_p = aliased(model, self.idx_t, adapt_on_names=True)
yield self.idx_p
finally:
metadata.remove(self.idx_t)
self.idx_t = self.idx_table(inspect(Identity))
self.idx_p = aliased(Identity, self.idx_t, adapt_on_names=True)
def ids(self, q: Optional[str] = None, raw_query=False) -> Query:
if not raw_query:
q = self.tokenized(q)
return self.session.query(self.idx_t.c.id).filter(self.idx_t.c.master.op('match')(q))
def all(self, model: Type[T], q: Optional[str] = None) -> List[T]:
return self.instanceof(model, q).all()
def lookup(self, model: Type[T], q: Optional[str] = None) -> Query:
return self.session.query(model).filter(model.id.in_(self.ids(q)))
def slugify(name: str, sep='-', *, limit=0) -> str:
t = re.sub(r'[\W_]+', sep, str(unidecode.unidecode(name))).strip(sep).lower()
if limit > 0:
t = sep.join(t.split(sep)[:limit])
return t
class Reflection:
mapper: Mapper
local_table: Table
mapped_table: Table
attributes: Dict[str, InferrableSelectable]
relationships: Dict[str, RelationshipProperty]
proxies: Dict[str, AssociationProxy]
atypes: Dict[str, Type]
ctypes: Dict[str, Type]
dtypes: Dict[str, Type]
autoincrement: Tuple[str, ...]
primary_key: Tuple[str, ...]
unique_columns: Tuple[Tuple[str, ...], ...]
polymorphic_on: Optional[Column]
polymorphic_ident: Optional[Any]
ancestral_columns: List[Column]
ancestral_identity: List[Column]
lineage: List[Mapper]
@property
def columns(self) -> Dict[str, Column]:
return {c.name: c for c in self.mapper.columns}
def get_unique_attrs(self, obj):
values = []
for cols in self.unique_columns:
values.append(attrgetter(*cols)(obj))
return tuple(values)
def get_unique_items(self, info):
values = []
for cols in self.unique_columns:
values.append(itemgetter(*cols)(info))
return tuple(values)
@staticmethod
def _find_unique_columns(table: Table) -> Tuple[Set[Tuple[Tuple[str, ...], ...]], Optional[Tuple[str, ...]]]:
unique = set()
primary_key = None
for c in table.constraints:
cols = tuple(sorted(c.name for c in c.columns))
if isinstance(c, PrimaryKeyConstraint):
primary_key = cols
unique.add(cols)
if isinstance(c, UniqueConstraint):
unique.add(cols)
for i in table.indexes:
if i.unique:
cols = tuple(sorted(c.name for c in i.columns))
unique.add(cols)
return unique, primary_key
@staticmethod
def _find_lineage(mapper: Mapper) -> List[Mapper]:
line = []
while mapper is not None:
line.append(mapper)
mapper = mapper.inherits
return line
def __init__(self, model: Type):
table: Table = model.__table__
mapper: Mapper = inspect(model)
self.mapper = mapper
self.local_table = mapper.local_table
self.mapped_table = mapper.persist_selectable
self.ancestral_columns = []
self.ancestral_identity = []
self.lineage = []
self.polymorphic_on = None
self.polymorphic_ident = None
unique, primary_key = self._find_unique_columns(table)
self.primary_key = primary_key
self.unique_columns = tuple(sorted(unique))
self.polymorphic_on = mapper.polymorphic_on
self.polymorphic_ident = mapper.polymorphic_identity
self.lineage = self._find_lineage(mapper)
for t in mapper.tables:
if t is table:
continue
self.ancestral_columns.extend(t.columns)
self.ancestral_identity.extend([c for c in t.columns if c.primary_key])
self.relationships = {r.key: r for r in mapper.relationships}
proxies = {r.info.get('key'): r for r in mapper.all_orm_descriptors
if isinstance(r, AssociationProxy)}
self.proxies = {k: v.for_class(model) for k, v in proxies.items() if k}
target_attr_types = (InstrumentedAttribute, AssociationProxy)
self.attributes = {**{c.key: c for c in self.mapper.all_orm_descriptors
if isinstance(c, target_attr_types) and c.key[0] != '_'},
**self.proxies}
self.atypes = atypes = {}
self.ctypes = ctypes = {}
self.dtypes = dtypes = {}
cls_annotations = getattr(model, '__annotations__')
for c, col in self.columns.items():
atypes[c] = ColumnProperty
ctypes[c] = col.type
if cls_annotations:
dt = cls_annotations.get(c)
if isinstance(dt, type):
dtypes[c] = dt
continue
dtypes[c] = object
for c, r in self.relationships.items():
atypes[c] = RelationshipProperty
dtypes[c] = self._detect_collection(r.collection_class)
for c, p in self.proxies.items():
atypes[c] = AssociationProxyInstance
dtypes[c] = self._detect_collection(p.local_attr.property.collection_class)
@classmethod
def _detect_collection(cls, type_):
type_ = type_ or list
try:
return type(type_())
except Exception:
return type_
@classmethod
def is_column(cls, attr: InferrableSelectable):
if isinstance(attr, InstrumentedAttribute):
attr = attr.prop
return isinstance(attr, ColumnProperty)
@classmethod
def is_relationship(cls, attr: InferrableSelectable):
if isinstance(attr, InstrumentedAttribute):
attr = attr.prop
return isinstance(attr, RelationshipProperty)
@classmethod
def is_proxy(cls, attr: InferrableSelectable):
return isinstance(attr, (AssociationProxy, AssociationProxyInstance))
@classmethod
def owning_class(cls, attr: InferrableSelectable):
if cls.is_column(attr) or cls.is_relationship(attr):
return attr.parent.entity
if cls.is_proxy(attr):
return attr.owning_class
@classmethod
def join_target(cls, attr: InferrableSelectable) -> Mapper | None:
if cls.is_relationship(attr):
return attr.property.entity
if cls.is_proxy(attr):
remote_prop = attr.remote_attr.property
if isinstance(remote_prop, RelationshipProperty):
return remote_prop.entity
@event.listens_for(metadata, 'after_create')
def find_models(*args, **kwargs):
for k, v in Base.registry._class_registry.items():
if isinstance(v, type) and issubclass(v, Identity):
v._init_reflection()
|
import MapReduce
import sys
"""
Word Count Example in the Simple Python MapReduce Framework
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: sequence identifier
# value: neucleotide sequence
key = record[0]
value = record[1][:-10]
mr.emit_intermediate(value, key)
def reducer(key, list_of_values):
# key: nucleotide sequence
# value: list of ids
mr.emit(key)
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
"""
Consider a set of key-value pairs where each key is sequence id and each value is a string of nucleotides, e.g., GCTTCCGAAATGCTCGAA....
Write a MapReduce query to remove the last 10 characters from each string of nucleotides, then remove any duplicates generated.
Map Input
Each input record is a 2 element list [sequence id, nucleotides] where sequence id is a string representing a unique identifier for the sequence and nucleotides is a string representing a sequence of nucleotides
Reduce Output
The output from the reduce function should be the unique trimmed nucleotide strings.
You can test your solution to this problem using dna.json:
$ python unique_trims.py dna.json
You can verify your solution by comparing your result with the file unique_trims.json.
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.