content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import copy
import argparse
import numpy as np
import matplotlib.pyplot as plt
import gdal
import ogr
from schimpy.schism_mesh import read_mesh, write_mesh
from schimpy.schism_polygon import read_polygons, Polygon, Point
from scipy.ndimage import gaussian_filter as gfilt
def create_arg_parse():
""" Create argument parser
Parameters
----------
"""
parser = argparse.ArgumentParser()
parser.add_argument("--mesh", type=str, default="hgrid.gr3",
help="mesh file for the horizontal mesh")
parser.add_argument("--density", required=True,
help="tiff file for density")
parser.add_argument("--target", type=str,
help="Target polygons to calculate density")
parser.add_argument("--output", type=str, default="sav_D.gr3",
help="output file name")
return parser
def read_density_tiff(fpath_densitiy_tiff):
""" Read geotiff values for density
It is assumed that the projected regular coordinates.
Parameters
----------
fpath_densitiy_tiff: str
filename for SAV desity diff
Returns
-------
numpy.ndarray
3D array containing x's, y's, and density. The shape is (n_x, n_y, 3)
"""
ds = gdal.Open(fpath_densitiy_tiff)
(upper_left_x, x_size, x_rotation, upper_left_y,
y_rotation, y_size) = ds.GetGeoTransform()
band = ds.GetRasterBand(1)
array = band.ReadAsArray()
# xv, yv = np.meshgrid(xrange(array.shape[1]), xrange(array.shape[0]))
# x = xv * x_size + upper_left_x + x_size / 2.
# y = yv * y_size + upper_left_y + y_size / 2.
return (upper_left_x, x_size, x_rotation, upper_left_y,
y_rotation, y_size), array
def main():
parser = create_arg_parse()
args = parser.parse_args()
tiff = gdal.Open(args.density)
(upper_left_x, x_size, x_rotation, upper_left_y,
y_rotation, y_size) = tiff.GetGeoTransform()
proj = tiff.GetProjection()
red = tiff.GetRasterBand(1).ReadAsArray()
green = tiff.GetRasterBand(2).ReadAsArray()
blue = tiff.GetRasterBand(3).ReadAsArray()
allband = np.dstack((red, green, blue))
ndvi = np.zeros_like(red)
classes = [{'color': (0, 0, 0), 'class': 0.},
{'color': (0, 80, 255), 'class': 1.},
{'color': (0, 150, 255), 'class': 2.},
{'color': (0, 255, 255), 'class': 3.},
{'color': (0, 255, 150), 'class': 4.},
{'color': (0, 255, 80), 'class': 5.},
{'color': (0, 200, 0), 'class': 6.},
{'color': (150, 255, 0), 'class': 7.},
{'color': (255, 255, 0), 'class': 8.},
{'color': (255, 150, 0), 'class': 9.},
{'color': (255, 0, 0), 'class': 10.},
]
colors = np.array([c['color'] for c in classes],dtype=[('R','<i4'),('G','<i4'),('B','<i4')])
colarr = colors.view(np.int).reshape(colors.shape + (-1,))
classval = np.array([c['class'] for c in classes],dtype='d')
order = np.argsort(colors,axis=0,order = ('R','G','B'))
refsort = colarr[order,:]
valorder = classval[order]
vals = allband.reshape(-1,3)
ndvi = -np.empty(vals.shape[0],dtype="d")
nclass = len(refsort)
for iref in range(nclass):
print("Class: %s/%s" % (iref,nclass-1))
vo = valorder[iref]
imatch = np.where((vals == refsort[iref,:]).all(axis=1))
ndvi[imatch] = vo
assert np.all(ndvi>-1.)
ndvi=ndvi.reshape(red.shape)
ndvi = gfilt(ndvi, sigma=3, order=0)
gtiff_driver = gdal.GetDriverByName('GTiff')
if gtiff_driver is None:
raise ValueError
fpath_out = 'ndvi_adj2.tif'
ds = gtiff_driver.Create(fpath_out,
ndvi.shape[1],
ndvi.shape[0],
1,
gdal.GDT_Float32,)
ds.SetGeoTransform([upper_left_x, x_size, x_rotation, upper_left_y,
y_rotation, y_size])
ds.SetProjection(proj)
ds.GetRasterBand(1).WriteArray(ndvi)
ds.FlushCache()
# mesh = read_mesh(args.mesh)
# polygons = read_polygons(args.target)
if __name__ == '__main__':
import sys
sys.argv.extend(
["--density", "delta_2016_20_28_mosaic_NDVI_tif.tif"])
sys.argv.extend(["--mesh", "hgrid.gr3"])
sys.argv.extend(["--target", "test/testdata/sav/frankstract.yaml"])
main()
|
__author__ = 'hvishwanath'
import click
import os
import sys
import base64
import json
from db import *
from libpaas import settings
from libpaas.drivers.manager import DriverManager
from config import Config
from libpaas.camp import pdpparser
@click.group(help="Manage your applications across PaaS providers")
def paascli():
Config.getInstance()
db_init()
@paascli.group(help="Manage paas providers")
def provider():
pass
@paascli.command(help="Reset configuration and local cache")
def reset():
c = raw_input("This will delete local cache and configuration. All local data will be lost. Proceed [N/y]? : ")
if c:
c= c.upper()
if c == "Y":
Config.getInstance().reset()
db_reset()
click.echo("Removed all configuration and local cache information")
return
click.echo("Command aborted.")
@paascli.command(help="Refresh local cache from cloud")
def refresh():
c = raw_input("This will delete local app cache. It will be refreshed from providers in the cloud. Proceed [N/y]? : ")
if c is None:
click.echo("Command aborted.")
return
c = c.upper()
if c != "Y":
click.echo("Command aborted.")
return
if Provider.select().count() <= 0:
click.echo("There are no configured providers. Try <paascli providers add>")
try:
click.echo("Removing local cache..")
if Application.select().count() > 0:
for app in Application.select():
app.delete_instance(recursive=True)
click.echo("Refreshing app cache from configured Providers\n----------------------------------------")
for p in Provider.select():
click.echo("Contacting %s....." % p.providername)
d = DriverManager.getInstance()
driver = d.find_driver(p.providername)
if driver is None:
click.echo("No driver available for %s" % p.providername)
continue
click.echo("Driver for %s is %s" % (provider, str(driver)))
di = driver(p.username, p.password)
r = di.list_apps()
if r is None:
click.echo("Error retrieving app details from %s" % p.providername)
continue
# Make a database Entry
for a in r:
a = Application(appid=a["appid"], giturl=a["giturl"], weburl=a["weburl"], provider=p)
a.save()
click.echo("Added entry. %s" % str(a))
click.echo("Refresh complete")
except Exception as ex:
click.echo("Error during refresh: %s. If you find cache/config data inconsistent, try <paascli reset>" % str(ex))
@provider.command(help="Add a paas provider")
@click.option('--name', required=True, prompt=True)
@click.option('--user', required=True, prompt=True)
@click.password_option()
def add(name, user, password):
p = base64.b64encode(password)
p = Provider(providername=name, username=user, password=p)
try:
p.save()
click.echo('Added provider %s successfully' % name)
except Exception as ex:
click.echo("Error adding provider : %s" % str(ex), color="red")
@provider.command(help="List configured paas providers")
def list():
if Provider.select().count() > 0:
click.echo("Configured Providers\n-----------------------")
for p in Provider.select():
click.echo("%s - Username: %s" %(p.providername, p.username))
else:
click.echo("No providers are configured yet.", color="red")
@provider.command(help="Delete a configured provider")
@click.option('--name', required=True, prompt=True)
def delete(name):
try:
p = Provider.get(Provider.providername == name)
p.delete_instance()
click.echo("Deleted provider %s" % name)
except DoesNotExist:
click.echo("No provider entry with name : %s" % name)
except Exception as ex:
click.echo("Error completing command : %s" % str(ex))
@paascli.group(help="Manage applications")
def app():
pass
@app.command(help="Install an app on a configured paas platform")
@click.option('--provider', required=True, prompt=True)
@click.option('--appid', required=True, prompt=True)
@click.option('--pdparchive', required=True, prompt=True)
def install(provider, appid, pdparchive):
try:
pdr = Provider.get(Provider.providername == provider)
except Exception as ex:
click.echo("Error getting provider info for %s" % provider)
pdr = None
if pdr is None:
return
try:
p = pdpparser.PDPParser(pdparchive)
click.echo("Parsed Plan: \n%s" % str(p.plan))
d = DriverManager.getInstance()
driver = d.find_driver(provider)
if driver is None:
click.echo("No driver available for %s" % provider)
return
click.echo("Driver for %s is %s" % (provider, str(driver)))
di = driver(pdr.username, pdr.password)
r = di.install_app(appid, p)
if r is None:
click.echo("Error during app installation")
return
# Make a database Entry
appid, giturl, weburl = r
a = Application(appid=appid, giturl=giturl, weburl=weburl, provider=pdr)
a.save()
except Exception as ex:
click.echo("Error: %s" % str(ex))
@app.command(help="Uninstall an app from a configured paas platform")
@click.option('--appid', required=True, prompt=True)
def uninstall(appid):
try:
app = Application.get(Application.appid== appid)
except Exception as ex:
click.echo("Error getting application info for %s" % appid)
click.echo("Try refreshing your config database <paascli refresh>")
app = None
return
try:
d = DriverManager.getInstance()
provider = app.provider.providername
driver = d.find_driver(provider)
if driver is None:
click.echo("No driver available for %s" % provider)
return
click.echo("Driver for %s is %s" % (provider, str(driver)))
di = driver(app.provider.username, app.provider.password)
r = di.uninstall_app(appid)
if r is None:
click.echo("Error during app uninstall")
return
app.delete_instance()
app.save()
except Exception as ex:
click.echo("Error: %s" % str(ex))
@app.command(help="Get application information")
@click.option('--appid', required=True, prompt=True)
def info(appid):
try:
app = Application.get(Application.appid == appid)
except Exception as ex:
click.echo("Error getting application info for %s" % appid)
click.echo("Try refreshing your config database <paascli refresh>")
app = None
return
try:
d = DriverManager.getInstance()
driver = d.find_driver(app.provider.providername)
if driver is None:
click.echo("No driver available for %s" % provider)
return
click.echo("Driver for %s is %s" % (app.provider.providername, str(driver)))
di = driver(app.provider.username, app.provider.password)
r = di.get_app_info(appid)
if r is None:
click.echo("Error retrieving app info")
return
click.echo(json.dumps(r.json(), indent=2))
except Exception as ex:
click.echo("Error: %s" % str(ex))
@app.command(help="Start application")
@click.option('--appid', required=True, prompt=True)
def start(appid):
try:
app = Application.get(Application.appid == appid)
except Exception as ex:
click.echo("Error getting application info for %s" % appid)
click.echo("Try refreshing your config database <paascli refresh>")
app = None
return
try:
d = DriverManager.getInstance()
driver = d.find_driver(app.provider.providername)
if driver is None:
click.echo("No driver available for %s" % provider)
return
click.echo("Driver for %s is %s" % (app.provider.providername, str(driver)))
di = driver(app.provider.username, app.provider.password)
r = di.start_app(appid)
if r is None:
click.echo("Error retrieving app info")
return
click.echo("Successfully started")
except Exception as ex:
click.echo("Error: %s" % str(ex))
@app.command(help="Stop application")
@click.option('--appid', required=True, prompt=True)
def stop(appid):
try:
app = Application.get(Application.appid == appid)
except Exception as ex:
click.echo("Error getting application info for %s" % appid)
click.echo("Try refreshing your config database <paascli refresh>")
app = None
return
try:
d = DriverManager.getInstance()
provider = app.provider.providername
driver = d.find_driver(provider)
if driver is None:
click.echo("No driver available for %s" % provider)
return
click.echo("Driver for %s is %s" % (provider, str(driver)))
di = driver(app.provider.username, app.provider.password)
r = di.stop_app(appid)
if r is None:
click.echo("Error retrieving app info")
return
click.echo("Successfully stopped")
except Exception as ex:
click.echo("Error: %s" % str(ex))
@app.command(help="List all installed applications")
@click.option('--provider')
def list(provider):
click.echo("Listing all applications")
if provider is None:
for a in Application.select():
click.echo("%s" % str(a))
else:
for a in Application.select().join(Provider).where(Provider.providername == provider):
click.echo("%s" % str(a))
|
import time
import board
from busio import I2C
from adafruit_bitmap_font import bitmap_font
import adafruit_is31fl3741
from adafruit_is31fl3741.adafruit_ledglasses import LED_Glasses
import IS31Framebuffer
#font_file = "tom_thumb.bdf" # 3x5 font
font_file = "tfont.bdf"
font = bitmap_font.load_font(font_file)
message = "CIRCUITPYTHON!"
# Manually declare I2C (not board.I2C() directly) to access 1 MHz speed...
i2c = I2C(board.SCL, board.SDA, frequency=1000000)
# Initialize the IS31 LED driver, buffered for smoother animation
glasses = LED_Glasses(i2c, allocate=adafruit_is31fl3741.MUST_BUFFER)
glasses.show()
glasses.global_current = 20
fb = IS31Framebuffer.IS31Framebuffer(glasses, 18*3, 5*3, glasses_width=18, glasses_height=5, scale=True)
#fb = IS31Framebuffer.IS31Framebuffer(glasses, 18, 5)
fb._font = IS31Framebuffer.BDFFont(font)
width = fb._font.width(message)
x=54
t = time.monotonic()
length = len(fb.buf)
while True:
# rather then framebuffer.fill this seemed slightly faster
for i in range(length):
fb.buf[i] = 0x00
fb.text(message, x, 0, 0xA000A0)
fb.display()
print(1/(time.monotonic()-t))
t = time.monotonic()
x = x - 1
if x < -width:
x = 54
while True:
pass |
from typing import Dict, List, Optional
from pydantic import BaseModel # pylint: disable=no-name-in-module
from ...utils import kubernetes
class UserDeployment(BaseModel):
name: str
image: kubernetes.Image
dagsterApiGrpcArgs: List[str]
port: int
replicaCount: Optional[int] = 1
env: Optional[Dict[str, str]]
envConfigMaps: Optional[List[kubernetes.ConfigMapEnvSource]]
envSecrets: Optional[List[kubernetes.SecretEnvSource]]
annotations: Optional[kubernetes.Annotations]
nodeSelector: Optional[kubernetes.NodeSelector]
affinity: Optional[kubernetes.Affinity]
tolerations: Optional[kubernetes.Tolerations]
podSecurityContext: Optional[kubernetes.PodSecurityContext]
securityContext: Optional[kubernetes.SecurityContext]
resources: Optional[kubernetes.Resources]
livenessProbe: Optional[kubernetes.LivenessProbe]
startupProbe: Optional[kubernetes.StartupProbe]
labels: Optional[Dict[str, str]]
class UserDeployments(BaseModel):
enabled: bool
enableSubchart: bool
deployments: List[UserDeployment]
|
"""
[True] hello
[True] vstack [!<class 'tuple_iterator'>]
[True] bstack []
[False] world
[False] vstack [!<class 'tuple_iterator'>]
[False] bstack []
"""
from pyteleport import tp_dummy
from pyteleport.tests.helpers import setup_verbose_logging, print_stack_here, print_, get_tp_args
class SomeClass:
def __init__(self):
self.messages = "hello", "world"
def teleport(self):
for m in self.messages:
print_(m)
print_stack_here(print_)
if m is self.messages[0]:
tp_dummy(**get_tp_args())
setup_verbose_logging()
instance = SomeClass()
instance.teleport()
|
#! /usr/bin/env python
import redis
# from time import strftime
# import time
# import random
def monitor():
redis_host = "10.201.67.22"
redis_port = 6363
redis_client = redis.StrictRedis(host=redis_host, port=redis_port, db=0)
while True:
x = 1
redis_client.set("Key:" + 'x', x)
redis_client.set("KeyYU:" + 'x', x)
redis_client.set("Key:" + 'x', x)
redis_client.set("KeyYU:" + 'x', x)
if __name__ == '__main__':
monitor()
|
from dataclasses import dataclass, field, asdict
from typing import List, Union
import marshmallow as ma
from .base import EntityCondensedSchema, EntityCondensed, EntitySchema, Entity, AuditingSchema, Auditing
from .tag import TagCondensedSchema, TagCondensed
from .custom_property import CustomPropertyValueSchema, CustomPropertyValue
@dataclass(unsafe_hash=True)
class UserCondensed(EntityCondensed):
"""
Represents a Qlik Sense User with limited attribution
"""
user_name: str = field(default=None, hash=True)
user_directory: str = field(default=None, hash=True)
user_directory_connector: str = field(default=None, hash=True)
class UserCondensedSchema(EntityCondensedSchema):
"""
A marshmallow schema corresponding to a Qlik Sense User object with limited attribution
"""
user_name: str = ma.fields.Str(required=True, data_key='userId')
user_directory: str = ma.fields.Str(required=True, data_key='userDirectory')
user_directory_connector: str = ma.fields.Str(
required=True, data_key='userDirectoryConnectorName'
)
@ma.pre_dump()
def pre_dump(self, data: 'Union[UserCondensed, dict]', **kwargs) -> dict:
if isinstance(data, UserCondensed):
return asdict(data)
return data
@ma.post_load()
def post_load(self, data: dict, **kwargs) -> 'UserCondensed':
return UserCondensed(**data)
@dataclass(unsafe_hash=True)
class UserAttribute(Auditing):
"""
Represents a Qlik Sense User Attribute
"""
id: str = field(default=None, hash=True)
attribute_type: str = field(default=None, hash=True)
attribute_value: str = field(default=None, hash=False)
external_id: str = field(default=None, hash=False)
class UserAttributeSchema(AuditingSchema):
"""
A marshmallow schema corresponding to a Qlik Sense User Attribute object
"""
id = ma.fields.UUID(required=False)
attribute_type = ma.fields.Str(required=True, data_key='attributeType')
attribute_value = ma.fields.Str(required=False, data_key='attributeValue')
external_id = ma.fields.Str(required=False, data_key='externalId')
@ma.pre_dump()
def pre_dump(self, data: 'Union[UserAttribute, dict]', **kwargs) -> dict:
if isinstance(data, UserAttribute):
return asdict(data)
return data
@ma.post_load()
def post_load(self, data: dict, **kwargs) -> 'UserAttribute':
return UserAttribute(**data)
@dataclass(unsafe_hash=True)
class User(UserCondensed, Entity):
"""
Represents a Qlik Sense User with full attribution
"""
custom_properties: List[CustomPropertyValue] = field(default_factory=list, hash=False)
roles: List[str] = field(default_factory=list, hash=False)
attributes: List[UserAttribute] = field(default_factory=list, hash=False)
is_inactive: bool = field(default=False, hash=False)
is_removed_externally: bool = field(default=False, hash=False)
is_blacklisted: bool = field(default=False, hash=False)
delete_is_prohibited: bool = field(default=False, hash=False)
tags: List[TagCondensed] = field(default_factory=list, hash=False)
class UserSchema(UserCondensedSchema, EntitySchema):
"""
A marshmallow schema corresponding to a Qlik Sense User object with full attribution
"""
custom_properties = ma.fields.Nested(nested=CustomPropertyValueSchema, many=True, required=False,
data_key='customProperties')
roles = ma.fields.List(cls_or_instance=ma.fields.Str, required=False, allow_none=True)
attributes = ma.fields.Nested(UserAttributeSchema, many=True, required=False)
is_inactive = ma.fields.Bool(required=False, data_key='inactive')
is_removed_externally = ma.fields.Bool(required=True, data_key='removedExternally')
is_blacklisted = ma.fields.Bool(required=True, data_key='blacklisted')
delete_is_prohibited = ma.fields.Bool(required=False, data_key='deleteProhibited')
tags = ma.fields.Nested(TagCondensedSchema, many=True, required=False)
@ma.pre_dump()
def pre_dump(self, data: 'Union[User, dict]', **kwargs) -> dict:
if isinstance(data, User):
return asdict(data)
return data
@ma.post_load()
def post_load(self, data: dict, **kwargs) -> 'User':
return User(**data)
|
# Generated by Django 3.1.13 on 2021-10-25 22:21
from django.db import migrations
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('nomenclatoare', '0024_historicalmaterialmobilier_materialmobilier'),
('app', '0082_auto_20211026_0119'),
]
operations = [
migrations.RemoveField(
model_name='componentaartisticapage',
name='mobiliere',
),
migrations.AddField(
model_name='componentaartisticapage',
name='mobiliere_new',
field=modelcluster.fields.ParentalManyToManyField(blank=True, to='nomenclatoare.MaterialMobilier', verbose_name='Mobilier'),
),
]
|
#!/Users/robertpoenaru/.pyenv/shims/python
import numpy as np
import gordan as cg
print(cg.RESULT)
# calculates the Clebsch-Gordan coefficient for a given set of parameters j1,j2,m1,m2,j,m=m1+m2
# Checks wether a spin-quantum-number is half-integer or not
def HalfInteger(x):
half = 1 / 2
x_half = x + half
if(x_half.is_integer()):
return 1
return 0
# show a quantum number j or m in proper format
def QuantumNumber(x):
if(HalfInteger(x)):
X = f'{int(2*x)}/2'
else:
X = int(x)
return X
# shows all the angular momentum states |J,M> which result from coupling the two angular momenta j1 and j2
def ShowJM_States(j1, j2):
if(not HalfInteger(j1)):
j1 = int(j1)
if(not HalfInteger(j2)):
j2 = int(j2)
j_vals = np.arange(abs(j1 - j2), j1 + j2 + 1, 1)
# print(f'j={j_vals}')
JM_printer = []
JM = []
for j in j_vals:
if(j == 0):
m_vals = np.arange(0, 1, 1)
else:
m_vals = np.arange(-j, j + 1, 1)
# print(f'm={m_vals}')
for m in m_vals:
pair = (j, m)
pair_printer = (QuantumNumber(j), QuantumNumber(m))
JM.append(pair)
JM_printer.append(pair_printer)
# for state in JM:
# print(f'|j,m> = |{state[0]},{state[1]}>')
return JM
# shows all the states that form the basis {s=|j1,j2;m1,m2>=s1+s2}
# where s1,s2 are the two subspaces which correspond to each of the two angular momenta
# i.e., s1=|j1,m1> and s2=|j2,m2>
def ShowJ1J2M1M2_States(j1, j2):
if(not HalfInteger(j1)):
j1 = int(j1)
if(not HalfInteger(j2)):
j1 = int(j2)
m1_vals = np.arange(-j1, j1 + 1, 1)
m2_vals = np.arange(-j2, j2 + 1, 1)
# print(f'm1={m1_vals}')
# print(f'm2={m2_vals}')
J12M12_printer = []
J12M12 = []
for m1 in m1_vals:
for m2 in m2_vals:
state = (j1, j2, m1, m2)
state_printer = (QuantumNumber(j1), QuantumNumber(j2),
QuantumNumber(m1), QuantumNumber(m2))
J12M12.append(state)
J12M12_printer.append(state_printer)
return J12M12
# for state in J12M12:
# print(f'j1,j2;m1,m2> = |{state[0]},{state[1]};{state[2]},{state[3]}>')
def GenerateSpinStates(j1, j2):
JM = ShowJM_States(j1, j2)
J12M12 = ShowJ1J2M1M2_States(j1, j2)
clebsch_matrix = []
for state in J12M12:
line = np.array(state)
clebsch_matrix.append(line)
clebsch_matrix = np.array(clebsch_matrix)
for line in clebsch_matrix:
print(line)
# for jm_state in JM:
# stringg = [
# f'<{j12m12_state[0]},{j12m12_state[1]},{j12m12_state[2]},{j12m12_state[3]}|{jm_state[0]},{jm_state[1]}>' for j12m12_state in J12M12]
# print(
# f'|{jm_state[0]},{jm_state[1]}>={stringg}')
GenerateSpinStates(1, 1)
def GenerateQuantumNumbers(j1, j2):
if(j1 < 0 or j2 < 0):
return -1
if(HalfInteger(j1)):
J1 = f'{int(2*j1)}/2'
else:
J1 = j1
if(HalfInteger(j2)):
J2 = f'{int(2*j2)}/2'
else:
J2 = j2
m1 = np.arange(-j1, j1 + 1, 1)
m2 = np.arange(-j2, j2 + 1, 1)
j12 = np.arange(abs(j1 - j2), j1 + j2 + 1, 1)
count_j = 1
JM = []
J1J2_M1M1 = []
for m11 in m1:
if(HalfInteger(m11)):
M1 = f'{int(2*m11)}/2'
else:
M1 = m11
for m21 in m2:
if(HalfInteger(m21)):
M2 = f'{int(m21*2)}/2'
else:
M2 = m21
print(f'|j1,j2;m1,m2> = |{J1},{J2},{M1},{M2}>')
for j in j12:
m12 = np.arange(-j, j + 1, 1)
# print(f'j_{count_j}={j}')
if(HalfInteger(j)):
J = f'{int(j*2)}/2'
else:
J = j
for m in m12:
if(HalfInteger(m)):
M = f'{int(2*m)}/2'
else:
M = m
# print(f'|j,m> = |{J},{M}>')
JM.append((j, m))
count_j = count_j + 1
print(f'The |J,M> states')
print(JM)
qn = GenerateQuantumNumbers
|
import unittest
from case_sensitive import case_sensitive, case_sensitive2
def test_get_age(benchmark):
assert benchmark(case_sensitive,'asd') == [True, []]
assert benchmark(case_sensitive2,'cellS') == [False, ['S']]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 5 00:04:41 2020
@author: shashanknigam
web parser for amazon:
Things to be extracted: 1. Title of the product span id = "productTitle"
2. Number of rating : span id = acrCustomerReviewText
3. Average rating given:span class a-icon-alt
4. Description: div id = featurebullets_feature_div.text
5. Product description: heading description format h3:a-spacing-mini :- neighboring text p class="a-spacing-base"
6. Other features if any h4 class="a-spacing-mini" p : afterwards.
-- later consideration 6.5: Comparison id=HLCXComparisonTable
item heading: tr class="comparison_table_image_row"
img.src :Name
class="a-row a-spacing-top-small"
7. Product information div id = "productDetails_detailBullets_sections1"
1. Product dimensions th label td value
2. Item weight
3. Shipping weight
4. Manufacturer
5. ASIN
6. Model Number
7. Customer reviews
8. Best sellers rank
9. Warantee if any
8. Question answers: div =class="a-section a-spacing-none askBtfTopQuestionsContainer" ; span class = "a-text-bold" next sibling id (class="a-declarative")the child question next span class= askLongText class="a-color-tertiary a-nowrap" for r the next teritory wrap
9. Customer reviews: all if possible : - class="cr-lighthouse-term " (terms)
1. data-hook="review-star-rating" user rating
2. data-hook="review-title"
3. class="a-row a-spacing-small review-data" detailed review
4. data-hook="see-all-reviews-link-foot"
5. class="a-last"
10. Price: span id = priceblock_ourprice
Hanumanji
a-section celwidget
cr-dp-lighthut
["a-fixed-left-grid","a-spacing-base"]
['a-fixed-left-grid-col', 'a-col-right']
reviews-medley-footer
id="cr-dp-desktop-lighthut"
["a-fixed-right-grid-col","cm_cr_grid_center_right"]
"""
"""
Getting each details out:
"""
from selenium import webdriver
import pandas as pd
from bs4 import BeautifulSoup as soup
import bs4
import sys
import traceback
product_dict={"ASIN":[],"Name":[]}
productDetails = {"ASIN":[],"Name":[],"Average Rating":[],"TotalRating":[],"Price":[],"Features":[]}
Description = {"ASIN":[],"ShortDescription":[],"LongDescription":[]}
productReview = {"ASIN":[],"Date":[],"Rating":[],"Title":[],"Detail":[]}
productQA = {"ASIN":[],"Question":[],"Answer":[]}
productInformation={"ASIN":[]} #Rest of the fields are optional
productRating={"ASIN":[],"5":[],"4":[],"3":[],"2":[],"1":[]}
ASIN=""
#QA= {"Question":[],"Answers":[],"ASIN":[]}
#customerReviews = {"ASIN":[],"UserRating":[],"Title":[],"detailedReview":[]}
def readWebpage(url):
browser = webdriver.Chrome('/Users/shashanknigam/Downloads/Beautiful Soup/chromedriver')
browser.get(url)
contents = browser.page_source
#time.sleep(10)
browser.close()
return contents
def getSoup(url):
s = soup(readWebpage(url),'html.parser')
return s
def get(s,tag,attr=None):
if attr is None:
return s.find_all(tag)
else:
#print("searching for attribute:"+attr)
tags = s.find_all(tag)
return [t for t in tags if attr in t.attrs.keys()]
def getNextSibling(tag):
while True:
if tag.next_sibling == '' or tag.next_sibling is None:
return None
elif tag.next_sibling in ['\n','\xa0'] or tag.next_sibling.name=='br':
tag = tag.next_sibling
else:
return tag.next_sibling
def getNextSiblingText(tag):
while True:
#print(tag)
if tag.next_sibling == '' or tag.next_sibling is None:
return ''
elif tag.next_sibling in ['\n','\xa0'] or tag.next_sibling.name=='br' or tag.next_sibling==' ':
tag = tag.next_sibling
else:
if isinstance(tag.next_sibling,bs4.element.Tag):
return tag.next_sibling.text
else:
return str(tag.next_sibling)
def parseQA(url,QA,ASIN):
s=getSoup(url)
s_div = get(s,'div','class')
qa_div = [q for q in s_div if q['class']==['celwidget']]
if len(qa_div)>1:
qa_div = qa_div[1]
else:
qa_div = qa_div[0]
qa=get(qa_div,'div','class')
qa_inner = [q for q in qa if q['class']==['a-fixed-left-grid-col', 'a-col-right']]
#print("qa_inner",len(qa_inner))
for i in qa_inner:
qa_inner_temp=get(i,'div','class')
qa_inner_inner=[q for q in qa_inner_temp if q['class']==['a-fixed-left-grid-col', 'a-col-right']]
#print(len(qa_inner_inner))
if len(qa_inner_inner)>1:
QA['ASIN'].append(ASIN)
QA['Question'].append(qa_inner_inner[0].text.strip())
QA['Answer'].append(qa_inner_inner[1].span.text.strip())
#QA[qa_inner_inner[0].text.strip()]=qa_inner_inner[1].span.text.strip()
elif len(qa_inner_inner)==1:
#print(qa_inner_inner)
QA['ASIN'].append(ASIN)
QA['Question'].append(qa_inner_inner[0].text.strip())
QA['Answer'].append('')
#QA[qa_inner_inner[0].text.strip()]=''
li = get(s,'li','class')
li_last = [l for l in li if l['class']==['a-last']]
next_url = ""
if len(li_last)!=0:
next_url='https://www.amazon.com/'+li_last[0].a['href']
return QA,next_url
def parseReview(url,review,ASIN):
#cm_cr-review_list
s=getSoup(url)
s_div = get(s,'div','id')
div_reviews = [d for d in s_div if d['id']=="cm_cr-review_list"][0]
div_review = get(div_reviews,"div","data-hook")
div_r = [r for r in div_review if r['data-hook']=='review']
for i in div_r:
try:
rating_i = get(i,'i','data-hook')
rating = [r for r in rating_i if r['data-hook']=="review-star-rating"]
rating = rating[0].text.strip()
span_d = get(i,'span','data-hook')
date = [d for d in span_d if d['data-hook']=="review-date"]
date = date[0].text.strip()
review_t = get(i,'a','data-hook')
review_title=[t for t in review_t if t['data-hook']=="review-title"]
review_title = review_title[0].text.strip()
review_b=[b for b in span_d if b['data-hook']=="review-body"]
review_b = review_b[0].text.strip()
review["ASIN"].append(ASIN)
review["Rating"].append(rating)
review["Date"].append(date)
review["Title"].append(review_title)
review["Body"].append(review_b)
except:
pass
li = get(s,'li','class')
next_url = [l for l in li if l['class']==["a-last"]]
if len(next_url)>0:
url ='https://www.amazon.com'+next_url[0].a['href']
else:
url=None
#span
# data-hook = "review-date"
# i data-hook "review-star-rating"
# span data-hook "review-title"
#a-section review aok-relative
return url,review
"a-section","celwidget"
def parseAmazon(url):
global product_dict,productDetails,Description,productQA,productInformation,ASIN,productReview
ASIN=""
s=getSoup(url)
s_span = get(s,'span','id')
try:
title = [t for t in s_span if t['id']=="productTitle"]
title = title[0].text.strip()
numberOfRating = [r for r in s_span if r['id']=="acrCustomerReviewText"]
numberOfRating = numberOfRating[0].text.strip()
averageRating = [i for i in s_span if i['id']=="acrPopover"]
averageRating = averageRating[0].text.strip()
productPrice = [p for p in s_span if (p['id']=="priceblock_ourprice" or p['id']=="priceblock_saleprice")]
productPrice = productPrice[0].text
s_div = get(s,'div','id')
features = [f for f in s_div if f['id']=="feature-bullets"]
features = features[0].text.strip().replace('\n','').replace('\t','')
product_Information =[pi for pi in s_div if pi['id']=='prodDetails']
pi_th = get(product_Information[0],'th')
pi_td = get(product_Information[0],'td')
pi_th_text = [t.text.strip() for t in pi_th if t.text.strip()!='']
pi_td_text = [t.text.strip().replace('\n','').replace('\t','') for t in pi_td if t.text.strip()!='']
label_col = []
for i in range(len(pi_th_text)):
if pi_th_text[i]!="Customer Reviews":
if pi_th_text[i]=="ASIN":
ASIN = pi_td_text[i]
label_col.append(pi_th_text[i])
if pi_th_text[i] not in productInformation.keys():
productInformation[pi_th_text[i]]=[]
productInformation[pi_th_text[i]].append(pi_td_text[i])
for i in productInformation.keys():
if i not in label_col:
productInformation[i].append("")
productDescription = [p for p in s_div if p['id']=="aplus"]
if len(productDescription)!=0:
h3_title = get(productDescription[0],'h3')
h4_title = get(productDescription[0],'h4')
p_description = get(productDescription[0],'p')
h3_title_text = [text.text.strip() for text in h3_title if text.text!="" and text.text.strip()!='']
p_description_text = [text.text.strip() for text in p_description if text.text!="" and text.text is not None and text.text.strip()!='']
h4_title_text =[text.text.strip() for text in h4_title if text.text!="" and text.text.strip()!='']
j=0
for i in range(len(h3_title_text)):
if h3_title_text[i]!="OTHER FEATURES":
Description['ASIN'].append(ASIN)
Description['ShortDescription'].append(h3_title_text[i])
Description['LongDescription'].append(p_description_text[j])
#product_description[h3_title_text[i]]=p_description_text[j]
j+=1
for i in range(len(h4_title_text)):
Description['ASIN'].append(ASIN)
Description['ShortDescription'].append(h4_title_text[i])
Description['LongDescription'].append(p_description_text[j])
#product_description[h4_title_text[i]]=p_description_text[j]
j+=1
else:
productDescription = [p for p in s_div if p['id']=="productDescription"]
productDescription_b = get(productDescription[0],'b')
for i in productDescription_b:
#print(i.text.strip(),getNextSiblingText(i).strip())
if getNextSiblingText(i).strip()!='':
Description['ASIN'].append(ASIN)
Description['ShortDescription'].append(i.text.strip())
Description['LongDescription'].append(getNextSiblingText(i).strip())
# product_description[i.text.strip()] = getNextSiblingText(i).strip()
qa_desc = [q for q in s_div if q['id']=='ask_lazy_load_div']
qa_url = qa_desc[0].a['href']
#QA = {}
while qa_url!='':
productQA,qa_url=parseQA(qa_url,productQA,ASIN)
review_summary = [d for d in s_div if d['id']=='reviewsMedley'][0]
rev_span = get(review_summary,'span','class')
global productRating
rev_span = [r for r in rev_span if r['class']==["a-size-base"]]
for i in [0,2,4,6,8]:
productRating['ASIN'].append(ASIN)
if "1" in rev_span[i].text.strip():
productRating["1"].append(rev_span[i+1].text.strip())
elif "2" in rev_span[i].text.strip():
productRating["2"].append(rev_span[i+1].text.strip())
elif "3" in rev_span[i].text.strip():
productRating["3"].append(rev_span[i+1].text.strip())
elif "4" in rev_span[i].text.strip():
productRating["4"].append(rev_span[i+1].text.strip())
else:
productRating["5"].append(rev_span[i+1].text.strip())
# rating[rev_span[i].text.strip()] = rev_span[i+1].text.strip()
rev_div = get(review_summary,'div','id')
rev_div_footer = [r for r in rev_div if r['id']=="reviews-medley-footer"]
rating_url = 'https://www.amazon.com'+rev_div_footer[0].a['href']
while rating_url is not None:
rating_url,productReview=parseReview(rating_url,productReview,ASIN)
except:
print(sys.exc_info())
print(traceback.format_exc())
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2013 dhilipsiva <dhilipsiva@gmail.com>
#
# http://dhilipsiva.github.io/django-shorturl/
#
# Distributed under terms of the MIT license.
"""
There is just one simple table here.
The KeyPath table which has a key an a path.
"""
from django.db import models
class KeyPath(models.Model):
key = models.CharField('Key', max_length=10)
path = models.CharField('Path', max_length=255)
def as_dict(self):
"""
Return a python dictionary representation of a KeyPath model
"""
return {
'key': self.key,
'path': self.path
}
|
from __future__ import print_function
import argparse
import base64
import mimetypes
import os.path
import pickle
import sys
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/gmail.send']
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("subject", type=str, help="message subject")
parser.add_argument("sender", type=str, help="email to send message from")
parser.add_argument("recipient", type=str, nargs="+", help="addressee(s) of the message")
parser.add_argument("-m", "--message", type=str, help="message to send")
parser.add_argument("-M", "--message-file", type=str, help="message to send from a file")
parser.add_argument("-a", "--attach", type=str, help="path to file to attach")
parser.add_argument("-c", "--content-id", type=str, default="<image>",
help="content id to use for attachment")
parser.add_argument("-i", "--inline", help="inline the attachment", action="store_true")
parser.add_argument("-d", "--dry-run", help="don't actually send the email", action="store_true")
parser.add_argument("--html", help="treat message as html", action="store_true")
args = parser.parse_args()
if args.message and args.message_file:
print("-m/--message and -M/--message-file are mutually exclusive")
sys.exit(2)
return args
def get_message_body(args):
if args.message:
return args.message
elif args.message_file:
with open(args.message_file, "r") as f:
return f.read()
else:
return ""
def create_message(args, recipient):
"""Create a message for an email.
Args:
args.sender: Email address of the sender.
args.to: Email address of the receiver.
args.subject: The subject of the email message.
Returns:
An object containing a base64url encoded email object.
"""
subtype = 'html' if args.html else 'us-ascii'
message_text = get_message_body(args)
message = MIMEMultipart() if args.attach else MIMEText(message_text, subtype)
if args.attach:
msg = MIMEText(message_text, subtype)
message.attach(msg)
attachment = prepare_attachment(args)
message.attach(attachment)
message['to'] = recipient
message['from'] = args.sender
message['subject'] = args.subject
return {'raw': bytes.decode(base64.urlsafe_b64encode(message.as_string().encode()))}
def prepare_attachment(args):
file_to_attach = args.attach
content_type, encoding = mimetypes.guess_type(file_to_attach)
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
main_type, sub_type = content_type.split('/', 1)
if main_type == 'text':
fp = open(file_to_attach, 'rb')
msg = MIMEText(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'image':
fp = open(file_to_attach, 'rb')
msg = MIMEImage(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'audio':
fp = open(file_to_attach, 'rb')
msg = MIMEAudio(fp.read(), _subtype=sub_type)
fp.close()
else:
fp = open(file_to_attach, 'rb')
msg = MIMEBase(main_type, sub_type)
msg.set_payload(fp.read())
fp.close()
filename = os.path.basename(file_to_attach)
disposition = 'inline' if args.inline else 'attachment'
msg.add_header('Content-Disposition', disposition, filename=filename)
if args.inline:
msg.add_header('Content-ID', args.content_id)
return msg
def send_message(service, user_id, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
message = service.users().messages().send(userId=user_id, body=message).execute()
print('Message Id: {}'.format(message['id']))
return message
def main():
args = parse_arguments()
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)
creds = flow.run_local_server()
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
# Call the Gmail API
for recipient in args.recipient:
message = create_message(args, recipient)
if args.dry_run:
print(message)
else:
send_message(service, 'me', message)
if __name__ == '__main__':
main()
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2018/11/4 下午2:27
# 3.5 理解线性回归中的损失函数
# 1. 除了损失函数外,程序的开始与以往一样,导入必要的编程库,创建一个会话,加载 数据集,创建占位符,定义变量和模型
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
sess = tf.Session()
iris = datasets.load_iris()
x_vals = np.array([x[3] for x in iris.data])
y_vals = np.array([y[0] for y in iris.data])
batch_size = 25
learning_rate = 0.1 # Will not converage with learning rate at 0.4
iterations = 50
x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
A = tf.Variable(tf.random_normal(shape=[1,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
model_output = tf.add(tf.matmul(x_data, A), b)
# 2. 损失函数改为L1正则损失函数
loss_l1 = tf.reduce_mean(tf.abs(y_target - model_output))
"""
注意,通过代入式子tf.reduce_mean(tf.square(y_target - model_output)) 可以改加L2正则损失函数。
"""
# 3. 现在继续初始化变量
init = tf.global_variables_initializer()
sess.run(init)
my_opt_l1 = tf.train.GradientDescentOptimizer(learning_rate)
train_setp_l1 = my_opt_l1.minimize(loss_l1)
loss_vec_l1 = []
for i in range(iterations):
rand_index = np.random.choice(len(x_vals), size=batch_size)
rand_x = np.transpose([x_vals[rand_index]])
rand_y = np.transpose([y_vals[rand_index]])
sess.run(train_setp_l1, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss_l1 = sess.run(loss_l1, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec_l1.append(temp_loss_l1)
if (i+1)%25 == 0:
print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))
"""
Step #25 A = [[1.6843317]] b = [[3.279116]]
Step #50 A = [[1.3079315]] b = [[3.9551158]]
"""
plt.plot(loss_vec_l1, 'k-', label='L1 Loss')
# plt.plot(loss_vec_l2, 'r--', label='L2 Loss')
plt.title('L1 and L2 Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('L1 Loss')
plt.legend(loc='upper right')
plt.show() |
import uuid
import botocore
from botocore.stub import Stubber, ANY
from db_cluster_endpoint_provider import DBClusterEndpointProvider
provider = DBClusterEndpointProvider()
provider.sleep_period_in_seconds = 0.1
def test_create_endpoint():
request = Request(
"Create",
properties = {
"DBClusterIdentifier": "aurora",
"DBClusterEndpointIdentifier": "readers",
"EndpointType": "READER",
"StaticMembers": ["instance1"],
},
)
rds = botocore.session.get_session().create_client(
"rds", region_name="eu-central-1"
)
stubber = Stubber(rds)
stubber.add_response(
"create_db_cluster_endpoint",
CreateDbClusterEndpointReponse(request),
request["ResourceProperties"],
)
stubber.add_response(
"describe_db_cluster_endpoints",
DescribeDBClusterEndpointReponse(request, "creating"),
{"DBClusterEndpointIdentifier": "readers"},
)
stubber.add_response(
"describe_db_cluster_endpoints",
DescribeDBClusterEndpointReponse(request, "available"),
{"DBClusterEndpointIdentifier": "readers"},
)
stubber.activate()
provider.rds = rds
response = provider.handle(request, ())
assert response["Status"] == "SUCCESS", response["Reason"]
stubber.assert_no_pending_responses()
def test_update_endpoint():
request = Request(
"Update",
properties = {
"DBClusterIdentifier": "aurora",
"DBClusterEndpointIdentifier": "readers",
"EndpointType": "READER",
"StaticMembers": ["instance2"],
},
old_properties = {
"DBClusterIdentifier": "aurora",
"DBClusterEndpointIdentifier": "readers",
"EndpointType": "READER",
"StaticMembers": ["instance1"],
},
)
rds = botocore.session.get_session().create_client(
"rds", region_name="eu-central-1"
)
stubber = Stubber(rds)
stubber.add_response(
"modify_db_cluster_endpoint",
CreateDbClusterEndpointReponse(request),
{'DBClusterEndpointIdentifier': 'readers', 'StaticMembers': ['instance2']},
)
stubber.add_response(
"describe_db_cluster_endpoints",
DescribeDBClusterEndpointReponse(request, "modifying"),
{"DBClusterEndpointIdentifier": "readers"},
)
stubber.add_response(
"describe_db_cluster_endpoints",
DescribeDBClusterEndpointReponse(request, "available"),
{"DBClusterEndpointIdentifier": "readers"},
)
stubber.activate()
provider.rds = rds
response = provider.handle(request, ())
assert response["Status"] == "SUCCESS", response["Reason"]
stubber.assert_no_pending_responses()
def test_invalid_update_endpoint():
request = Request(
"Update",
properties = {
"DBClusterIdentifier": "aurora-2",
"DBClusterEndpointIdentifier": "readers-1",
"EndpointType": "WRITER",
"StaticMembers": ["instance2"],
"Tags": [{"Key": "Name", "Value": "writer"}]
},
old_properties = {
"DBClusterIdentifier": "aurora",
"DBClusterEndpointIdentifier": "readers",
"EndpointType": "READER",
"StaticMembers": ["instance1"],
},
)
rds = botocore.session.get_session().create_client(
"rds", region_name="eu-central-1"
)
stubber = Stubber(rds)
stubber.activate()
provider.rds = rds
response = provider.handle(request, ())
assert response["Status"] == "FAILED", response["Reason"]
assert response["Reason"] == 'these properties cannot be updated: DBClusterEndpointIdentifier, DBClusterIdentifier, Tags'
stubber.assert_no_pending_responses()
def test_delete_endpoint():
request = Request(
"Delete",
properties = {
"DBClusterIdentifier": "aurora",
"DBClusterEndpointIdentifier": "readers",
"EndpointType": "READER",
"StaticMembers": ["instance2"],
},
)
rds = botocore.session.get_session().create_client(
"rds", region_name="eu-central-1"
)
stubber = Stubber(rds)
stubber.add_response(
"delete_db_cluster_endpoint",
CreateDbClusterEndpointReponse(request),
{'DBClusterEndpointIdentifier': 'readers'},
)
stubber.add_response(
"describe_db_cluster_endpoints",
DescribeDBClusterEndpointReponse(request, "deleting"),
{"DBClusterEndpointIdentifier": "readers"},
)
stubber.add_response(
"describe_db_cluster_endpoints",
DescribeDBClusterEndpointReponse(request, status=None),
{"DBClusterEndpointIdentifier": "readers"},
)
stubber.activate()
provider.rds = rds
response = provider.handle(request, ())
assert response["Status"] == "SUCCESS", response["Reason"]
stubber.assert_no_pending_responses()
class Request(dict):
def __init__(self, request_type, properties:dict, old_properties:dict = {}, physical_resource_id=None):
request_id = "request-%s" % uuid.uuid4()
self.update(
{
"RequestType": request_type,
"ResponseURL": "https://httpbin.org/put",
"StackId": "arn:aws:cloudformation:us-west-2:EXAMPLE/stack-name/guid",
"RequestId": request_id,
"ResourceType": "Custom::DBClusterEndpoint",
"LogicalResourceId": "Endpoint",
"ResourceProperties": properties,
}
)
if physical_resource_id:
self["PhysicalResourceId"] = physical_resource_id
elif request_type != "Create":
self["PhysicalResourceId"] = f"arn:aws:rds:eu-central-1:123456789012:{properties['DBClusterIdentifier']}:{properties['DBClusterEndpointIdentifier']}"
if request_type == "Update":
self["OldResourceProperties"] = old_properties if old_properties else {}
class CreateDbClusterEndpointReponse(dict):
def __init__(self, request):
status = {"Create": "creating", "Update": "modifying", "Delete": "deleting"}
self["ResponseMetadata"] = {
"RequestId": request["RequestId"],
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "2c7bd3fe-730c-4d24-b9a5-1942193a091a",
"content-type": "text/xml",
"content-length": "275",
"date": "Sat, 16 Nov 2019 17:58:29 GMT",
},
"RetryAttempts": 0,
}
properties = request["ResourceProperties"]
self.update(
{
"DBClusterEndpointIdentifier": properties["DBClusterEndpointIdentifier"],
"DBClusterIdentifier": properties["DBClusterIdentifier"],
"DBClusterEndpointResourceIdentifier": f"request['DBClusterEndpointIdentifier']-ANPAJ4AE5446DAEXAMPLE",
"Endpoint": f"{properties['DBClusterEndpointIdentifier']}.{properties['DBClusterIdentifier']}.eu-central-1.rds.amazonaws.com",
"Status": status[request["RequestType"]],
"EndpointType": "CUSTOM",
"CustomEndpointType": properties["EndpointType"],
"StaticMembers": properties.get("StaticMembers", []),
"ExcludedMembers": properties.get("ExcludedMembers", []),
"DBClusterEndpointArn": f"arn:aws:rds:eu-central-1:123456789012:{properties['DBClusterIdentifier']}:{properties['DBClusterEndpointIdentifier']}",
}
)
class DescribeDBClusterEndpointReponse(dict):
def __init__(self, request, status=None):
self["ResponseMetadata"] = {
"RequestId": request["RequestId"],
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "2c7bd3fe-730c-4d24-b9a5-1942193a091a",
"content-type": "text/xml",
"content-length": "275",
"date": "Sat, 16 Nov 2019 17:58:29 GMT",
},
"RetryAttempts": 0,
}
properties = request["ResourceProperties"]
if status:
self.update(
{
"DBClusterEndpoints": [
{
"DBClusterIdentifier": properties["DBClusterIdentifier"],
"Endpoint": f"{properties['DBClusterEndpointIdentifier']}.{properties['DBClusterIdentifier']}.eu-central-1.rds.amazonaws.com",
"Status": status,
"EndpointType": properties["EndpointType"],
}
]
}
)
else:
self.update({ "DBClusterEndpoints": []})
|
import re
import sys
"""
A quoi sert ce fichier ?
- a trouver tt les transactions auquelles une addresse X a participé
- a trouver une transaction avec son hash
"""
class GetTransactions:
def __init__(self, blocks):
self.blocks = blocks
blocks = []
for i in self.blocks:
if isinstance(i, dict) == True:
blocks.append(i["block"])
else:
blocks.append(i)
self.blocks = blocks
# self.blocks is a list of str blocks
# We will assume that the blocks are correct.
# Maybe the blocks should be checked in a real blockchain
# get() will return a list containing all the transactions which are in all the blocks
# This is the format of a transaction :
"""
{
"block_headers":{"height":height, "previous_block_hash":, "nonce":},
"transaction_headers":{"transaction_hash":},
"inputs":[{"addr":addr, "amount":amount, "last_tx_hash":last_tx_hash}, ...],
"outputs":[{"addr":addr, "amount":amount}, ...]
}
"""
def get(self):
list_transactions = []
for block in self.blocks:
if block == "GENESIS BLOCK":
continue
try:
header, transactions = block.split("\n\n", maxsplit=1)
## Header :
block_number, previous_block_hash, nonce = header.split("\n")
# Block number
block_number = re.findall(r"Block ([0-9]*)", block_number)
block_number = block_number[0]
# Previous block hash
previous_block_hash = re.findall(r"Previous Block Hash : ([0-9A-z]*)", previous_block_hash)
previous_block_hash = previous_block_hash[0]
# Nonce
nonce = re.findall(r"Nonce : ([0-9]*)", nonce)
nonce = nonce[0]
## Transactions :
transactions = transactions.split("\n\n")
for i in range(len(transactions)):
transaction = {
"block_headers":{"height":block_number, "previous_block_hash":previous_block_hash, "nonce":nonce}
}
tx_number, tx_hash, tx_input_ouput = transactions[i].split("\n",maxsplit=2)
# Transaction Number
tx_number = re.findall(r"Transaction ([0-9]+):", tx_number)
tx_number = tx_number[0]
# Transaction Hash
tx_hash = re.findall(r"Tx Hash : ([A-z0-9]+)", tx_hash)
tx_hash = tx_hash[0]
transaction["transaction_headers"] = {"transaction_hash":tx_hash}
# Split inputs and outputs from a transaction
tx_inputs = re.findall(r"Input :\n([A-z0-9\n\s:]*)\nOutput", tx_input_ouput)
tx_inputs = tx_inputs[0]
tx_outputs = re.findall(r"Output :\n([A-z0-9\n\s:]*)", tx_input_ouput)
tx_outputs = tx_outputs[0]
list_inputs = []
list_outputs = []
if i == 0: # The first transaction is the COINBASE
# Check input
regex_input_coinbase = r"0\nPrevious tx : Coinbase\nAmount : ([0-9]+)"
search_coinbase_input = re.search(regex_input_coinbase, tx_inputs)
amount_coinbase = search_coinbase_input.group(1)
# Check output
regex_output_coinbase = r"0\nTo : ([A-z0-9]+)\nAmount : ([0-9]+)"
search_coinbase_output = re.search(regex_output_coinbase, tx_outputs)
addr_receiver = search_coinbase_output.group(1)
amount_sent = search_coinbase_output.group(2)
list_inputs.append({
"addr":"COINBASE", "amount":str(amount_coinbase), "previous_tx":"COINBASE"
})
list_outputs.append({
"addr":addr_receiver, "amount":str(amount_sent)
})
else: # Normal transaction
# Output
sum_output = 0
regex_output_tx = r"[0-9]+\nTo : ([A-z0-9]+)\nAmount : ([0-9]+)"
search_output_tx = re.findall(regex_output_tx, tx_outputs)
for i in range(len(search_output_tx)):
amount_sent = int(search_output_tx[i][1])
sum_output += amount_sent
address = search_output_tx[i][0]
list_outputs.append({
"addr":address, "amount":str(amount_sent)
})
# Input
sum_input = 0
regex_input_tx = r"[0-9]+\nPrevious tx : ([A-z0-9]+)\nFrom : ([A-z0-9]+)\nAmount : ([0-9]+)\nPublic Key : ([A-z0-9]+)\nSignature : ([A-z0-9]+)"
search_input_tx = re.findall(regex_input_tx, tx_inputs)
for i in range(len(search_input_tx)):
amount = int(search_input_tx[i][2])
sum_input += amount
signature = search_input_tx[i][4]
public_key = search_input_tx[i][3]
address = search_input_tx[i][1]
previous_tx = search_input_tx[i][0]
list_inputs.append({
"addr":address, "amount":str(amount), "previous_tx":previous_tx
})
transaction["inputs"] = list_inputs
transaction["outputs"] = list_outputs
list_transactions.append(transaction)
except Exception as e:
# raise e
# print(e)
# continue
return "ERROR", f"Error while searching in block ({e})"
return "Ok",list_transactions
def get_transactions_for_addr(blocks, address):
transactions_with_addr = [] # This will be the list of the transactions in which there is the address
# Get all transactions from the blocks
transactions = GetTransactions(blocks).get()
if transactions[0] == "Ok":
transactions = transactions[1]
else:
return transactions
for transaction in transactions:
# Check if there is the address in inputs or outputs
is_address = False #Is the address in the block ?
# Check inputs :
inputs = transaction["inputs"]
for i in range(len(inputs)):
if inputs[i]["addr"] == address:
is_address = True
break
# Check outputs :
outputs = transaction["outputs"]
for i in range(len(outputs)):
if outputs[i]["addr"] == address:
is_address = True
break
if is_address == True:
transactions_with_addr.append(transaction)
return transactions_with_addr
def get_transactions_with_hash(blocks, hash_):
# Get all transactions from the blocks
transactions = GetTransactions(blocks).get()
if transactions[0] == "Ok":
transactions = transactions[1]
else:
return transactions
for transaction in transactions:
tx_hash = transaction["transaction_headers"]["transaction_hash"]
if tx_hash == hash_:
return ["Found",transaction]
return ["Not Found"]
def get_transactions_with_hash_in_previous_tx(blocks, hash_, address):
# Get all transactions from the blocks
transactions = GetTransactions(blocks).get()
if transactions[0] == "Ok":
transactions = transactions[1]
else:
return transactions
for transaction in transactions:
inputs = transaction["inputs"]
for i in range(len(inputs)):
if inputs[i]["previous_tx"] == hash_ and inputs[i]["addr"] == address:
return ["Found",transaction]
return ["Not Found"]
if __name__ == "__main__":
print("Do not lauch this file. Use exemple.py instead") |
#!/usr/bin/python3
import simpy
from modules.process.production_line import ProductionLine
from modules.process.fas_instance import FASInstance
from modules.faults.wear_and_tear import WearAndTear
from modules.components.clock import Clock
from simulator.logger import Logger
# Initializing
env = simpy.Environment()
logger = Logger(env)
production_line = ProductionLine(env, logger)
clock = Clock(logger, env)
clock.spawn()
# Adding a fault immediately to the CONVEYOR5
def fault():
yield env.timeout(0)
print("FAULT")
production_line.conveyor5.add_fault(
WearAndTear(env, production_line.conveyor5));
env.process(fault())
# Putting in 30 items, waiting for them to be done.
last_item = None
for i in range(0, 30):
fas_instance = FASInstance(env, production_line, logger)
last_item = fas_instance.spawn()
env.run(last_item)
print("Done.")
f = open("data/output_easy_with_wear_and_tear_fault.json", "w")
f.write(logger.getLoglines())
f.close()
|
def hello11():
print('hello from module 1, script11')
|
import os
# t_l = [(1, 2, 3,), (1, 2, 3,), (1, 2, 3,), (1, 2, 3,)]
#
# for i, j, k in t_l:
# print("i:" + str(i))
# print("j:" + str(j))
# print("k:" + str(k))
path = r"D:\pycharm\PyCharm 2020.1.1\workplace\zhihu_user_info_spider\zhihu_user_info\result"
for root, dirs, files in os.walk(path):
for f in files:
with open(os.path.join(root, f), mode="r", encoding="utf-8") as f_r:
for i in f_r.readlines():
print(i.strip("\n"))
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( a , b ) :
if a == 0 :
return b
return f_gold ( b % a , a )
#TOFILL
if __name__ == '__main__':
param = [
(46,89,),
(26,82,),
(40,12,),
(58,4,),
(25,44,),
(2,87,),
(8,65,),
(21,87,),
(82,10,),
(17,61,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
import re
#------------------chercher dans une chaine-----------------#
re.search(r"abc", "abcdef")
# renvoie <_sre.SRE_Match object at 0x00AC1640> ou None si ça n'existe pas
if re.match(expression, chaine) is not None:
# Si l'expression est dans la chaîne
# Ou alors, plus intuitivement
pass
if re.match(expression, chaine):
pass
# numéro de téléphone
# ^0[0-9]([ .-]?[0-9]{2}){4}$
# ^ on cherche l'espression '0' en debut de chaine
# on doit trouver un chiffre entre 0 et 9
# soit un espace, un point ou un tiret, de maniere optionnelle
# 2 chiffres accollés, compris entre 0 et 9
#le groupe de 2 chffres accolés et séparés, doit se retrouver 4 fois
chaine = ""
expression = r"^0[0-9]([ .-]?[0-9]{2}){4}$"
while re.search(expression, chaine) is None:
chaine = input("Saisissez un numéro de téléphone (valide) :")
#---------------------remplacer une chaine------------------------#
# re.sub() 3 parametres: l'expression à rechercher,
# par quoi remplacer cette expression,
# la chaîne d'origine.
re.sub(r"(ab)", r" \1 ", "abcdef") # appeler nos groupes grâce à '\<numéro du groupe>'
' ab cdef'
#---------------------compilation------------------------#
# conserver votre expression régulière sous la forme d'un objet
chn_mdp = r"^[A-Za-z0-9]{6,}$"
exp_mdp = re.compile(chn_mdp)
mot_de_passe = ""
while exp_mdp.search(mot_de_passe) is None:
mot_de_passe = input("Tapez votre mot de passe : ") |
"""
config_dir
----------
Just stores a variable with the name of the current directory, that
is the base directory of the entire filesystem.
"""
import os
base_dir = os.path.abspath(os.path.dirname(__file__)) |
__all__ = [
'Measurement',
'make_counter',
'make_rater',
'make_timer',
]
import collections
import functools
import time
Measurement = collections.namedtuple('Measurement', 'when value duration')
def make_counter(metry, name):
return functools.partial(count, metry.measure, name)
def make_rater(metry, name):
return functools.partial(rate, metry.measure, name)
def make_timer(metry, name):
return Timer(metry.measure, name)
def count(measure, name, value=1):
measure(name, Measurement(time.time(), value, None))
def rate(measure, name, value):
return MeasureContext(measure, name, value)
class Timer:
def __init__(self, measure, name):
self.measure = measure
self.name = name
def __call__(self, func):
@functools.wraps(func)
def timed_func(*args, **kwargs):
with self.time():
return func(*args, **kwargs)
return timed_func
def time(self):
return MeasureContext(self.measure, self.name, None)
class MeasureContext:
def __init__(self, measure, measure_name, value):
self.measure = measure
self.measure_name = measure_name
self.value = value
self._time = None
self._start = None
def __enter__(self):
self.start()
return self
def __exit__(self, *_):
self.stop()
def start(self):
self._time = time.time()
self._start = time.perf_counter()
def stop(self):
if self._start is None:
return
elapsed = time.perf_counter() - self._start
measurement = Measurement(self._time, self.value, elapsed)
self.measure(self.measure_name, measurement)
self._start = None # Disable context.
|
# -*- coding: utf-8 -*-
""""
Folium Features Tests
---------------------
"""
import os
from folium import Map, Popup
from folium import features
from folium.six import text_type
from folium.element import Element
tmpl = """
<!DOCTYPE html>
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script>
<script src="https://rawgithub.com/lvoogdt/Leaflet.awesome-markers/2.0/develop/dist/leaflet.awesome-markers.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/leaflet.markercluster-src.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/leaflet.markercluster.js"></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.css" />
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css" />
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css" />
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.min.css" />
<link rel="stylesheet" href="https://rawgit.com/lvoogdt/Leaflet.awesome-markers/2.0/develop/dist/leaflet.awesome-markers.css" />
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/MarkerCluster.Default.css" />
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/MarkerCluster.css" />
<link rel="stylesheet" href="https://raw.githubusercontent.com/python-visualization/folium/master/folium/templates/leaflet.awesome.rotate.css" />
<style>
html, body {
width: 100%;
height: 100%;
margin: 0;
padding: 0;
}
#map {
position:absolute;
top:0;
bottom:0;
right:0;
left:0;
}
</style>
</head>
<body>
</body>
<script>
</script>
""" # noqa
# Figure
def test_figure_creation():
f = features.Figure()
assert isinstance(f, Element)
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_figure_rendering():
f = features.Figure()
out = f.render()
assert type(out) is text_type
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_figure_html():
f = features.Figure()
out = f.render()
out = os.linesep.join([s for s in out.splitlines() if s.strip()])
print(out)
assert out.strip() == tmpl.strip()
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_figure_double_rendering():
f = features.Figure()
out = f.render()
out2 = f.render()
assert out == out2
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_marker_popups():
m = Map()
features.Marker([45, -180], popup='-180').add_to(m)
features.Marker([45, -120], popup=Popup('-120')).add_to(m)
features.RegularPolygonMarker([45, -60], popup='-60').add_to(m)
features.RegularPolygonMarker([45, 0], popup=Popup('0')).add_to(m)
features.CircleMarker([45, 60], popup='60').add_to(m)
features.CircleMarker([45, 120], popup=Popup('120')).add_to(m)
m._repr_html_()
bounds = m.get_bounds()
assert bounds == [[45, -180], [45, 120]], bounds
def test_polyline_popups():
m = Map([43, -100], zoom_start=4)
features.PolyLine([[40, -80], [45, -80]], popup="PolyLine").add_to(m)
features.PolyLine([[40, -90], [45, -90]],
popup=Popup("PolyLine")).add_to(m)
features.MultiPolyLine([[[40, -110], [45, -110]]],
popup="MultiPolyLine").add_to(m)
features.MultiPolyLine([[[40, -120], [45, -120]]],
popup=Popup("MultiPolyLine")).add_to(m)
m._repr_html_()
bounds = m.get_bounds()
assert bounds == [[40, -120], [45, -80]], bounds
# DivIcon.
def test_divicon():
html = """<svg height="100" width="100">
<circle cx="50" cy="50" r="40" stroke="black" stroke-width="3" fill="red" />
</svg>""" # noqa
div = features.DivIcon(html=html)
assert isinstance(div, Element)
assert div.className == 'empty'
assert div.html == html
# WmsTileLayer.
def test_wms_service():
m = Map([40, -100], zoom_start=4)
url = 'http://mesonet.agron.iastate.edu/cgi-bin/wms/nexrad/n0r.cgi'
w = features.WmsTileLayer(url,
name='test',
format='image/png',
layers='nexrad-n0r-900913',
attr=u"Weather data © 2012 IEM Nexrad",
transparent=True)
w.add_to(m)
m._repr_html_()
bounds = m.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
|
# -*- coding: utf-8 -*-
"""
lytics.settings
~~~~~~~~~~~~~~~
Settings file for lytics app.
Usage:
from lytics import settings
settings.DATABASE_PATH
>>> /home/lytics/db/db.sqlite3
:copyright: (c) 2016 by Patrick Spencer.
:license: Apache 2.0, see LICENSE for more details.
"""
from os import path
BASE_PATH = path.dirname(path.abspath(__file__))
DATABASE_DIR = path.join(BASE_PATH,'db')
DATABASE_URI = "sqlite:///%s" % path.join(DATABASE_DIR,'db.sqlite3')
TEST_DATABASE_URI = "sqlite:///%s" % path.join(DATABASE_DIR,'test_db.sqlite3')
class Config(object):
DEBUG = False
TESTING = False
class ProductionConfig(Config):
"""
Usage:
from lytics import app
app.config.from_object('lytics.settings.ProductionConfig')
"""
DEBUG = False
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
|
class Solution:
def Fibonacci(self, n):
def dfs(n):
if n <= 1:
return n
else:
return dfs(n - 1) + dfs(n - 2)
return dfs(n)
def Fibonacci(self, n):
dp = [0, 1] + [0] * (n - 1)
for i in range(2, n + 1):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[n]
def Fibonacci(self, n):
if n <= 1:
return n
a, b = 0, 1
for i in range(2, n + 1):
a, b = b, a + b
return b
|
import json
file_path = "sample.json"
questionnaire_dic = {}
with open(file_path, 'r', encoding="utf8") as j:
contents = json.loads(j.read())
#print(contents['pages'][0]['elements'])
#element_data = contents['pages'][0]['elements']
if not (contents is None) and "pages" in contents:
for page in contents['pages']:
if not page['name'] in questionnaire_dic:
print(page['name'])
#for each_element in element_data:
#if each_element['type'] == 'html':
#print(each_element)
|
"""
Author: Zahra Gharaee.
This code is written for the 3D-Human-Action-Recognition Project, started March 14 2014.
"""
import numpy as np
from Preprocessing.Nomalization import make_normalization
from Preprocessing.Attection import make_attention
from Preprocessing.Ego_Transfromation import make_egoCenteredCoordinateT
from Preprocessing.Dynamics import get_dynamics
from InputData.read_files import read_MSR
class DATA:
"""
This class generates preprocessed input data.
"""
def __init__(self, input_dim=60, mainpath=None, dataset=None):
self.mainpath = mainpath
self.Dataset = dataset
self.input_dim = input_dim
self.actionSet = []
if self.Dataset == 'MSR_Action3D_1':
self.actionSet = ['High-Wave',
'Front-Wave',
'Using-Hammer',
'Hand-Catch',
'Forward-Punch',
'High-Throw',
'Draw-Xsign',
'Draw-TickSign',
'Draw-Circle',
'Tennis-Swing',
]
self.prepro_attention = True
self.prepro_ego = True
self.prepro_norm = False
self.prepro_scaling = True
self.prepro_dyn = True
elif self.Dataset == 'MSR_Action3D_2':
self.actionSet = ['Hand_Clap',
'Two-Hand-Wave',
'Side-Boxing',
'Forward-Bend',
'Forward-Kick',
'Side-Kick',
'Still-Jogging',
'Tennis-Serve',
'Golf-Swing',
'PickUp-Throw',
]
self.prepro_attention = True
self.prepro_ego = True
self.prepro_norm = False
self.prepro_scaling = False
self.prepro_dyn = True
elif self.Dataset == 'MSR_Action3D_all':
self.actionSet = ['High-Wave',
'Front-Wave',
'Using-Hammer',
'Hand-Catch',
'Forward-Punch',
'High-Throw',
'Draw-Xsign',
'Draw-TickSign',
'Draw-Circle',
'Tennis-Swing',
'Hand_Clap',
'Two-Hand-Wave',
'Side-Boxing',
'Forward-Bend',
'Forward-Kick',
'Side-Kick',
'Still-Jogging',
'Tennis-Serve',
'Golf-Swing',
'PickUp-Throw',
]
self.prepro_attention = True
self.prepro_ego = True
self.prepro_norm = False
self.prepro_scaling = True
self.prepro_dyn = True
self.pos_all = []
self.pos_all_n = []
self.vel_all = []
self.acc_all = []
self.class_all = []
def read_data(self):
"""
This function reads data from files.
"""
if self.Dataset == 'MSR_Action3D_1':
self.pos_all, self.class_all = read_MSR(self.mainpath, self.pos_all, self.class_all, set=1)
elif self.Dataset == 'MSR_Action3D_2':
self.pos_all, self.class_all = read_MSR(self.mainpath, self.pos_all, self.class_all, set=2)
elif self.Dataset == 'MSR_Action3D_all':
self.pos_all, self.class_all = read_MSR(self.mainpath, self.pos_all, self.class_all, set=1)
self.pos_all, self.class_all = read_MSR(self.mainpath, self.pos_all, self.class_all, set=2)
l_act = []
[l_act.append(np.array([0, 0, 0, 0])) for k in range(276)]
[l_act.append(np.array([276, 0, 10, 0])) for k in range(276, len(self.class_all))]
self.class_all = [self.class_all[k] + l_act[k] for k in range(len(self.class_all))]
def make_preprocessing(self):
"""
This function runs pre-processing module consists of:
(1) Normalization
(2) Ego-Centered Coordinate Transformation
(3) Scaling Transformation
(4) Attention Mechanisms
(5) Dynamics Extraction
"""
for nseq in range(len(self.pos_all)): # sequence
data_seq = self.pos_all[nseq]
class_seq = self.class_all[nseq]
n_act = class_seq[2]
data_seq_n = np.zeros((np.size(data_seq, 0), self.input_dim))
for nfr in range(np.size(data_seq, 0)): # frame
if self.prepro_norm:
data_seq[nfr, :] = make_normalization(data_seq[nfr, :])
if self.prepro_ego:
data_seq[nfr, :] = make_egoCenteredCoordinateT(data_seq[nfr, :], self.Dataset)
if self.prepro_attention:
vec = make_attention(data_seq[nfr, :], n_act, self.Dataset)
data_seq_n[nfr, :] = vec[0, :]
if self.prepro_attention:
self.pos_all_n.append(data_seq_n)
else:
self.pos_all_n.append(data_seq)
if self.prepro_dyn:
self.vel_all, self.acc_all = get_dynamics(self.pos_all_n)
def get_input(self):
# Read data from files
self.read_data()
# Do the pre-processing
self.make_preprocessing()
|
class Node:
def __init__(self, data=None, left=None, right=None):
self.data = data
self.left = left
self.right = right
class Binary_Search_Tree:
def __init__(self):
self.root = None
def _insert(self, data, current_node):
if current_node.data == data:
print(f"Insertion Error: Value {data} already present.")
return
elif current_node.data > data:
# add node to left side
if current_node.left is None:
current_node.left = Node(data)
else:
self._insert(data, current_node.left)
else:
# add node to right side
if current_node.right is None:
current_node.right = Node(data)
else:
self._insert(data, current_node.right)
def insert(self, data):
if self.root is None:
self.root = Node(data)
return
else:
self._insert(data, self.root)
def _search_tree(self, data, current_node):
if current_node.data == data:
print(f'Found element {data}.')
return True
print("IN HERE")
elif current_node.data > data and current_node.left is not None:
# search to the left
self._search_tree(data, current_node.left)
elif current_node.data < data and current_node.right is not None:
# search to the right
self._search_tree(data, current_node.right)
return False
def search_tree(self, data):
if self.root is None:
return
else:
found = self._search_tree(data, self.root)
print("in here")
print(found)
if found:
print("hello")
return True
def delete_node(self, data):
pass
def _print_tree_in_order_traversal(self, current_node):
if current_node is not None:
self._print_tree_in_order_traversal(current_node.left)
print(current_node.data, end=" ")
self._print_tree_in_order_traversal(current_node.right)
def print_tree_in_order_traversal(self):
if self.root is None:
print("Tree is Empty.")
return
else:
self._print_tree_in_order_traversal(self.root)
print("\b")
def _print_tree_pre_order_traversal(self, current_node):
if current_node is not None:
print(current_node.data, end=" ")
self._print_tree_pre_order_traversal(current_node.left)
self._print_tree_pre_order_traversal(current_node.right)
def print_tree_pre_order_traversal(self):
if self.root is None:
print("Tree is Empty.")
return
else:
self._print_tree_pre_order_traversal(self.root)
print("\b")
def _print_tree_post_order_traversal(self, current_node):
if current_node is not None:
self._print_tree_post_order_traversal(current_node.left)
self._print_tree_post_order_traversal(current_node.right)
print(current_node.data, end=" ")
def print_tree_post_order_traversal(self):
if self.root is None:
print("Tree is Empty.")
return
else:
self._print_tree_post_order_traversal(self.root)
print("\b")
if __name__ == '__main__':
bst = Binary_Search_Tree()
bst.insert(15)
bst.insert(27)
bst.insert(12)
bst.insert(7)
bst.insert(14)
bst.insert(20)
bst.insert(88)
bst.insert(23)
bst.print_tree_in_order_traversal()
bst.print_tree_pre_order_traversal()
bst.print_tree_post_order_traversal()
bst.search_tree(88)
|
import json
import pytest
from hypothesis import HealthCheck, given, settings
from hypothesis import strategies as st
from hypothesis_jsonschema import from_schema
from jigu.core.proposal import (
CommunityPoolSpendProposal,
ParameterChangeProposal,
RewardWeightUpdateProposal,
TaxRateUpdateProposal,
TextProposal,
)
from testtools import assert_serdes_consistent, assert_serdes_exact
class TestTextProposal:
@pytest.mark.serdes
@pytest.mark.slow
@settings(suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
@given(m=from_schema(TextProposal.__schema__))
def test_serdes_consistent(self, m):
assert_serdes_consistent(TextProposal, m)
def test_matches_meta(self):
assert TextProposal.type == "gov/TextProposal"
class TestTaxRateUpdateProposal:
@pytest.mark.slow
@settings(suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
@given(m=from_schema(TaxRateUpdateProposal.__schema__))
def test_serdes_consistent(self, m):
assert_serdes_consistent(TaxRateUpdateProposal, m)
def test_matches_meta(self):
assert TaxRateUpdateProposal.type == "treasury/TaxRateUpdateProposal"
class TestRewardWeightUpdateProposal:
@pytest.mark.slow
@settings(suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
@given(m=from_schema(RewardWeightUpdateProposal.__schema__))
def test_serdes_consistent(self, m):
assert_serdes_consistent(RewardWeightUpdateProposal, m)
def test_matches_meta(self):
assert RewardWeightUpdateProposal.type == "treasury/RewardWeightUpdateProposal"
class TestCommunityPoolSpendProposal:
@pytest.mark.slow
@settings(suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
@given(m=from_schema(CommunityPoolSpendProposal.__schema__))
def test_serdes_consistent(self, m):
assert_serdes_consistent(CommunityPoolSpendProposal, m)
def test_matches_meta(self):
assert (
CommunityPoolSpendProposal.type == "distribution/CommunityPoolSpendProposal"
)
class TestParameterChangeProposal:
@pytest.mark.slow
@settings(suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
@given(m=from_schema(ParameterChangeProposal.__schema__))
def test_serdes_consistent(self, m):
assert_serdes_consistent(ParameterChangeProposal, m)
def test_matches_meta(self):
assert ParameterChangeProposal.type == "params/ParameterChangeProposal"
|
import uuid
from PyQt5 import QtCore
from PyQt5.QtCore import QMimeData
from PyQt5.QtGui import QDrag
from PyQt5.QtWidgets import QPushButton
class FuncWidget(QPushButton):
def __init__(self):
super().__init__()
self.setText(str(uuid.uuid4()))
self.setAcceptDrops(True)
def mousePressEvent(self, event):
print(f'{event}: {event.pos()} : {self.geometry()} {self.geometry().contains(event.pos())}')
if event.button() == QtCore.Qt.LeftButton and self.geometry().contains(event.pos()):
print('drag')
drag = QDrag(self)
mime_data = QMimeData()
mime_data.setText('coucou')
drag.setMimeData(mime_data)
dropAction = drag.exec()
def dragEnterEvent(self, e):
if e.mimeData().hasText():
e.accept()
else:
e.ignore()
def dropEvent(self, e):
self.setText(e.mimeData().text())
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .testenv import testenv # pylint: disable=unused-import
from .. import helpers
def test_nodecellar(testenv):
nodecellar_template_uri = helpers.get_service_template_uri(
'tosca-simple-1.0', 'node-cellar', 'node-cellar.yaml')
service_name = testenv.install_service(nodecellar_template_uri, dry=True)
_verify_deployed_service_in_storage(service_name, testenv.model_storage)
# testing dry execution of custom workflows
testenv.execute_workflow(service_name, 'maintenance_on', dry=True)
testenv.execute_workflow(service_name, 'maintenance_off', dry=True)
testenv.uninstall_service(dry=True)
testenv.verify_clean_storage()
def _verify_deployed_service_in_storage(service_name, model_storage):
service_templates = model_storage.service_template.list()
assert len(service_templates) == 1
assert len(service_templates[0].services) == 1
service = service_templates[0].services[service_name]
assert service.name == service_name
assert len(service.executions) == 0 # dry executions leave no traces
assert len(service.nodes) == 10
|
#!/usr/bin/env python3
n, *h = map(int, open(0).read().split())
dp = [0] * n
dp[0] = 0
a = abs
for i in range(1, n):
dp[i] = min(dp[i], dp[i-1] + a(h[i] - h[i-1]))
dp[i+1] = min(dp[i+1], dp[i-1] + a(h[i] - h[i-2]))
print(dp[n-1])
|
import logging
from typing import List
from PIL import Image
import numpy as np
try:
import face_recognition
_FACE_RECOGNITION_LOADED = True
except ImportError:
_FACE_RECOGNITION_LOADED = False
class FaceEmbedder:
def __init__(self, model: str = "large", num_jitters: int = 5):
logging.info("Loading FaceEmbedder")
self.model = model
self.num_jitters = num_jitters
def embed_image(self, image: Image.Image) -> List:
if _FACE_RECOGNITION_LOADED:
encodings = face_recognition.face_encodings(np.array(image), model=self.model, num_jitters=self.num_jitters)
return encodings
return []
|
import os
import collections
import subprocess
import warnings
import lamnfyc.context_managers
import lamnfyc.settings
import lamnfyc.packages.base
def installer(package, temp, env):
warnings.warn("This is VERY unteseted")
with lamnfyc.context_managers.chdir(os.path.join(temp, 'freetds-{}'.format(package.version))):
command = ('./configure --prefix={0} '
'--with-unixodbc={0} '
'--mandir={0}/man '
'--with-tdsver=7.3 '
'--with-openssl={0} && make install')
subprocess.call(command.format(lamnfyc.settings.environment_path), shell=True)
VERSIONS = collections.OrderedDict()
VERSIONS['1.00.27'] = lamnfyc.packages.base.TarPacket('ftp://ftp.freetds.org/pub/freetds/stable/freetds-1.00.27.tar.gz', # noqa
installer=installer,
md5_signature='093b1e7d1411a84f4264d3111aeead32',
depends_on=[
lamnfyc.packages.base.RequiredPacket(name='unixodbc', version='2.3.4'), # noqa
lamnfyc.packages.base.RequiredPacket(name='openssl', version='1.0.2g'), # noqa
])
for version, item in VERSIONS.iteritems():
item.name = 'freedts'
item.version = version
|
"""A set of stories-pytest' exceptions."""
from _stories_pytest.exceptions import StoryPytestError
__all__ = ["StoryPytestError"]
|
# -*- coding: UTF-8 -*-
# ------------------------(max to 80 columns)-----------------------------------
# author by : (学员ID)
# created: 2019.11
# Description:
# 初步学习 WinForm 编程 ( button, messagebox )
# ------------------------(max to 80 columns)-----------------------------------
import tkinter as tk
from PIL import Image, ImageTk
# create top_win window
top_win = tk.Tk()
# naming top_win window
top_win.title('Hello World Window')
# resize root window
top_win.geometry('800x600')
# 没有回调函数的按钮是没有用的,当你按下这个按钮时它什么也不做。
# 你可能在开发一个应用程序的时候想实现这种按钮,比如为了不干扰你的beta版的测试者:
btn_help = tk.Button(top_win, text="Help", command=None)
btn_help.pack()
button_relieves = ('flat', 'groove', 'raised', 'ridge', 'solid', 'sunken')
# 按钮样式 与前景背景色
for r in button_relieves:
tk.Button(top_win, text=r, relief=r, fg='white', bg='blue').pack()
# 按钮边框
for b in [0, 1, 2, 3, 4]:
text = 'Button border = %d' % b
tk.Button(top_win, text=text, bd=b).pack()
# 状态
for st in ['norma', 'active', 'disabled']:
tk.Button(top_win, text=st, state=st).pack()
# 图片按钮
image = Image.open(r'btn1_shutdown.jpg')
bk_img = ImageTk.PhotoImage(image)
tk.Button(top_win, text='try pic', compound='center', image=bk_img).pack()
# show window and waiting for event
top_win.mainloop()
|
from babbage.query.parser import Parser
from babbage.model.binding import Binding
from babbage.exc import QueryException
class Aggregates(Parser):
""" Handle parser output for aggregate/drilldown specifications. """
start = "aggregates"
def aggregate(self, ast):
refs = [a.ref for a in self.cube.model.aggregates]
if ast not in refs:
raise QueryException('Invalid aggregate: %r' % ast)
self.results.append(ast)
def apply(self, q, bindings, aggregates):
info = []
for aggregate in self.parse(aggregates):
info.append(aggregate)
table, column = self.cube.model[aggregate].bind(self.cube)
bindings.append(Binding(table, aggregate))
q = q.column(column)
if not len(self.results):
# If no aggregates are specified, aggregate on all.
for aggregate in self.cube.model.aggregates:
info.append(aggregate.ref)
table, column = aggregate.bind(self.cube)
bindings.append(Binding(table, aggregate.ref))
q = q.column(column)
return info, q, bindings
|
"""Marsha storage modules."""
from django.conf import settings
from django.utils.module_loading import import_string
# pylint: disable=unused-import
from . import dummy, s3 # noqa isort:skip
def get_initiate_backend():
"""Select and return the selected storage backend."""
return import_string(settings.STORAGE_BACKEND)
|
"""
Run process using 'python TwitterProcess.py'
"""
import TwitterProcessModule as tpmod
import datetime as dt
def start_database():
#
generalDbHelper = tpmod.GeneralDbHelper(version = 1)
#Connecting to sqlite3 database
social_network_path_file = generalDbHelper.get_store_dir(store_dir_name="data", file_name="social_network.db")
dbConn = tpmod.Sqlite3Db(social_network_path_file)
#Creating tweet Dates table
dbConn.query('''CREATE TABLE IF NOT EXISTS TweetDates(firstdate timestamp, lastdate timestamp)''')
#Creating tweets table
dbConn.query('''CREATE TABLE IF NOT EXISTS Tweets(insert_timestamp timestamp, tweet_timestamp timestamp, tweet_term TEXT, tweet TEXT, place_type TEXT, place_name TEXT, place_full_name TEXT, place_country_code TEXT, place_country TEXT)''')
return dbConn
def twitter_authentication():
twitterAuthentication = tpmod.TwitterAuthentication(version = "1.0")
api = twitterAuthentication.connect()
return api
def download_tweets(api, dbConn):
twitterProcess = tpmod.TwitterProcess(api = api, dbConn = dbConn)
limit, remaining, next_reset_time = twitterProcess.get_api_limits()
print(limit, remaining, next_reset_time)
#Exit when we cannot continue
if remaining == 0:
quit()
dbConn.query('''SELECT count(1) cant FROM TweetDates; ''')
sql_stm_res = dbConn.cursor.fetchall()
sqlite3DbHelper = tpmod.Sqlite3DbHelper(dbConn = dbConn)
query_stm_res = sqlite3DbHelper.get_count_stm(sql_stm_res)
query_stm_res
firstdate, lastdate = sqlite3DbHelper.get_process_dates(query_stm_res)
noOfSearch = 10
print(firstdate)
print(lastdate)
print(noOfSearch)
twitterProcess.ins_twitter_tweets_loop(firstdate = firstdate, lastdate = lastdate, noOfSearch = noOfSearch)
dbConn.close()
def main():
"""Execute routines"""
print(f"Started new execution at: {dt.datetime.now()}")
dbConn = start_database()
api = twitter_authentication()
download_tweets(api = api, dbConn = dbConn)
print(f"Terminated new execution at: {dt.datetime.now()}")
if __name__ == "__main__":
main()
|
from tkinter import *
window = Tk()
window.title("Special Midterm Exam in OOP")
window.geometry('600x600+20+10')
def bgchange():
btn.configure(bg="yellow")
btn = Button(window, text="Click to Change Color", command=bgchange)
btn.place(relx=0.5, rely=0.5, anchor="center")
window.mainloop()
|
#!/usr/bin/env python
# Checks that all commits have been signed off and fails if not.
# (Adapted from l33t Docker script for checking for their DCO)
# Requires Python 2.7
import re
import os
import sys
import yaml
import subprocess
if 'TRAVIS' not in os.environ:
print('TRAVIS is not defined; this should run in TRAVIS. Sorry.')
sys.exit(127)
if os.environ['TRAVIS_PULL_REQUEST'] != 'false':
commit_range = ['upstream/' + os.environ['TRAVIS_BRANCH'], 'FETCH_HEAD']
else:
try:
subprocess.check_call([
'git', 'log', '-1', '--format=format:',
os.environ['TRAVIS_COMMIT_RANGE'], '--',
])
commit_range = os.environ['TRAVIS_COMMIT_RANGE'].split('...')
# if it didn't split, it must have been separated by '..' instead
if len(commit_range) == 1:
commit_range = commit_range[0].split('..')
except subprocess.CalledProcessError:
print('TRAVIS_COMMIT_RANGE is invalid. This seems to be a force '
'push. We will just assume it must be against upstream '
'master and compare all commits in between.')
commit_range = ['upstream/master', 'HEAD']
commit_format = '-%n hash: "%h"%n author: %aN <%aE>%n message: |%n%w(0,2,2).%B'
gitlog = subprocess.check_output([
'git', 'log', '--reverse',
'--format=format:'+commit_format,
'..'.join(commit_range), '--',
])
commits = yaml.load(gitlog)
# what? how can we have no commits?
if not commits:
sys.exit()
p = re.compile(r'^Signed-off-by: ([^<]+) <([^<>@]+@[^<>]+)>$',
re.MULTILINE | re.UNICODE)
failed_commits = []
for commit in commits:
commit['message'] = commit['message'][1:]
# trim off our '.' that exists just to prevent fun YAML parsing issues
# see https://github.com/dotcloud/docker/pull/3836#issuecomment-33723094
# and https://travis-ci.org/dotcloud/docker/builds/17926783
commit['stat'] = subprocess.check_output([
'git', 'log', '--format=format:', '--max-count=1',
'--name-status', commit['hash'], '--',
])
if commit['stat'] == '':
print('Commit {0} has no changed content, '
'skipping.'.format(commit['hash']))
continue
m = p.search(commit['message'])
if not m:
failed_commits.append(commit['hash'])
continue
# print all failed commits
if failed_commits:
print('{0} commit(s) have not been signed off:'
.format(len(failed_commits)))
print('\n'.join(failed_commits))
sys.exit(1)
print('All commits have been signed off.')
|
from abc import ABC
from datetime import datetime
from typing import Optional, Iterable
from gather_vision.process import item as app_items
from gather_vision import models as app_models
class PlaylistSource(ABC):
"""A service that provides playlists."""
@property
def collections(self) -> list:
"""
The internal codes for the playlists available from this service.
"""
raise NotImplementedError()
def get_playlist(
self, identifier: str, name: str, title: str
) -> app_items.Playlist:
"""Get a playlist."""
raise NotImplementedError()
def get_playlist_tracks(
self,
identifier: str,
name: str,
title: str,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
limit: Optional[int] = None,
) -> app_items.Playlist:
"""Get a playlist and tracks."""
raise NotImplementedError()
def get_model_track(
self,
info: app_models.InformationSource,
track: app_items.Track,
) -> app_models.PlaylistTrack:
"""Convert from a track from this source to the model used to store a track."""
raise NotImplementedError()
class PlaylistTarget(ABC):
"""A service that can store playlists."""
def set_playlist_tracks(
self,
identifier: str,
new_tracks: Iterable[app_items.Track],
old_tracks: Iterable[app_items.Track],
) -> bool:
"""Set the tracks for a playlist."""
raise NotImplementedError()
def set_playlist_details(
self,
collection_name: str,
playlist_id: str,
title: str = None,
description: str = None,
is_public: bool = None,
) -> bool:
"""Set playlist details."""
raise NotImplementedError()
def search_tracks(
self,
playlist_name: str,
track: str,
artists: list[str],
limit: int = 5,
) -> Iterable[app_items.Track]:
"""Search the tracks available from a service."""
raise NotImplementedError()
class AuthRequiredService(ABC):
"""A service that requires authentication."""
def login_init(self, *args, **kwargs) -> None:
"""Get the initial set of login details."""
raise NotImplementedError()
def login_next(self, *args, **kwargs) -> None:
"""Get the next set of login details."""
raise NotImplementedError()
class PlaylistDetails(ABC):
"""Provided basic information about a playlist service."""
@property
def code(self) -> str:
"""The internal code for the service."""
raise NotImplementedError()
@property
def title(self) -> str:
"""The displayed title for the service."""
raise NotImplementedError()
|
"""IR interpreter."""
from __future__ import print_function, division, absolute_import
import ctypes
try:
import exceptions
except ImportError:
import builtins as exceptions
from itertools import chain
from collections import namedtuple
from functools import partial
from . import defs, ops, tracing, types
from .ir import Function
from .traversal import ArgLoader
from .utils import linearize
#===------------------------------------------------------------------===
# Interpreter
#===------------------------------------------------------------------===
Undef = "Undef" # Undefined/uninitialized value
State = namedtuple('State', ['refs']) # State shared by stack frames
class Reference(object):
"""
Models a reference to an object
"""
def __init__(self, obj, refcount, producer):
self.obj = obj
self.refcount = refcount
self.producer = producer
class UncaughtException(Exception):
"""
Raised by the interpreter when code raises an exception that isn't caught
"""
class Interp(object):
"""
Interpret the function given as a ir.Function. See the run() function
below.
func: The ir.Function we interpret
exc_model: ExceptionModel that knows how to deal with exceptions
argloader: InterpArgloader: knows how pykit Values are associated
with runtime (stack) values (loads from the store)
ops: Flat list of instruction targets (['%0'])
blockstarts: Dict mapping block labels to address offsets
prevblock: Previously executing basic block
pc: Program Counter
lastpc: Last value of Program Counter
exc_handlers: List of exception target blocks to try
exception: Currently raised exception
refs: { id(obj) : Reference }
"""
def __init__(self, func, env, exc_model, argloader, tracer):
self.func = func
self.env = env
self.exc_model = exc_model
self.argloader = argloader
self.state = {
'env': env,
'exc_model': exc_model,
'tracer': tracer,
}
self.ops, self.blockstarts = linearize(func)
self.lastpc = 0
self._pc = 0
self.prevblock = None
self.exc_handlers = None
self.exception = None
# __________________________________________________________________
# Utils
def incr_pc(self):
"""Increment program counter"""
self.pc += 1
def decr_pc(self):
"""Decrement program counter"""
self.pc -= 1
def halt(self):
"""Stop interpreting"""
self.pc = -1
@property
def op(self):
"""Return the current operation"""
return self.getop(self.pc)
def getop(self, pc):
"""PC -> Op"""
return self.ops[pc]
def setpc(self, newpc):
self.lastpc = self.pc
self._pc = newpc
pc = property(lambda self: self._pc, setpc, doc="Program Counter")
def blockswitch(self, oldblock, newblock, valuemap):
self.prevblock = oldblock
self.exc_handlers = []
self.execute_phis(newblock, valuemap)
def execute_phis(self, block, valuemap):
"""
Execute all phis in parallel, i.e. execute them before updating the
store.
"""
new_values = {}
for op in block.leaders:
if op.opcode == 'phi':
new_values[op.result] = self.execute_phi(op)
valuemap.update(new_values)
def execute_phi(self, op):
for i, block in enumerate(op.args[0]):
if block == self.prevblock:
values = op.args[1]
return self.argloader.load_op(values[i])
raise RuntimeError("Previous block %r not a predecessor of %r!" %
(self.prevblock.name, op.block.name))
noop = lambda *args: None
# __________________________________________________________________
# Core operations
# unary, binary and compare operations set below
def convert(self, arg):
return types.convert(arg, self.op.type)
# __________________________________________________________________
# Var
def alloca(self, numitems=None):
return { 'value': Undef, 'type': self.op.type }
def load(self, var):
#assert var['value'] is not Undef, self.op
return var['value']
def store(self, value, var):
if isinstance(value, dict) and set(value) == set(['type', 'value']):
value = value['value']
var['value'] = value
def phi(self):
"See execute_phis"
return self.argloader.load_op(self.op)
# __________________________________________________________________
# Functions
def function(self, funcname):
return self.func.module.get_function(funcname)
def call(self, func, args):
if isinstance(func, Function):
# We're calling another known pykit function,
try:
return run(func, args=args, **self.state)
except UncaughtException as e:
# make sure to handle any uncaught exceptions properly
self.exception, = e.args
self._propagate_exc()
else:
return func(*args)
def call_math(self, fname, *args):
return defs.math_funcs[fname](*args)
# __________________________________________________________________
# Attributes
def getfield(self, obj, attr):
if obj['value'] is Undef:
return Undef
return obj['value'][attr] # structs are dicts
def setfield(self, obj, attr, value):
if obj['value'] is Undef:
obj['value'] = {}
obj['value'][attr] = value
# __________________________________________________________________
print = print
# __________________________________________________________________
# Pointer
def ptradd(self, ptr, addend):
value = ctypes.cast(ptr, ctypes.c_void_p).value
itemsize = ctypes.sizeof(type(ptr)._type_)
return ctypes.cast(value + itemsize * addend, type(ptr))
def ptrload(self, ptr):
return ptr[0]
def ptrstore(self, value, ptr):
ptr[0] = value
def ptr_isnull(self, ptr):
return ctypes.cast(ptr, ctypes.c_void_p).value == 0
def func_from_addr(self, ptr):
type = self.op.type
return ctypes.cast(ptr, types.to_ctypes(type))
# __________________________________________________________________
# Control flow
def ret(self, arg):
self.halt()
if self.func.type.restype != types.Void:
return arg
def cbranch(self, test, true, false):
if test:
self.pc = self.blockstarts[true.name]
else:
self.pc = self.blockstarts[false.name]
def jump(self, block):
self.pc = self.blockstarts[block.name]
# __________________________________________________________________
# Exceptions
def new_exc(self, exc_name, exc_args):
return self.exc_model.exc_instantiate(exc_name, *exc_args)
def exc_catch(self, types):
self.exception = None # We caught it!
def exc_setup(self, exc_handlers):
self.exc_handlers = exc_handlers
def exc_throw(self, exc):
self.exception = exc
self._propagate_exc() # Find exception handler
def _exc_match(self, exc_types):
"""
See whether the current exception matches any of the exception types
"""
return any(self.exc_model.exc_match(self.exception, exc_type)
for exc_type in exc_types)
def _propagate_exc(self):
"""Propagate installed exception (`self.exception`)"""
catch_op = self._find_handler()
if catch_op:
# Exception caught! Transfer control to block
catch_block = catch_op.parent
self.pc = self.blockstarts[catch_block.name]
else:
# No exception handler!
raise UncaughtException(self.exception)
def _find_handler(self):
"""Find a handler for an active exception"""
exc = self.exception
for block in self.exc_handlers:
for leader in block.leaders:
if leader.opcode != ops.exc_catch:
continue
args = [arg.const for arg in leader.args[0]]
if self._exc_match(args):
return leader
# __________________________________________________________________
# Generators
def yieldfrom(self, op):
pass # TODO:
def yieldval(self, op):
pass # TODO:
# Set unary, binary and compare operators
for opname, evaluator in chain(defs.unary.items(), defs.binary.items(),
defs.compare.items()):
setattr(Interp, opname, staticmethod(evaluator))
#===------------------------------------------------------------------===
# Exceptions
#===------------------------------------------------------------------===
class ExceptionModel(object):
"""
Model that governs the exception hierarchy
"""
def exc_op_match(self, exc_type, op):
"""
See whether `exception` matches `exc_type`
"""
assert exc_type.opcode == 'constant'
if op.opcode == 'constant':
return self.exc_match(exc_type.const, op.const)
raise NotImplementedError("Dynamic exception checks")
def exc_match(self, exc_type, exception):
"""
See whether `exception` matches `exc_type`
"""
return (isinstance(exc_type, exception) or
issubclass(exception, exc_type))
def exc_instantiate(self, exc_name, *args):
"""
Instantiate an exception
"""
exc_type = getattr(exceptions, exc_name)
return exc_type(*args)
#===------------------------------------------------------------------===
# Run
#===------------------------------------------------------------------===
class InterpArgLoader(ArgLoader):
def load_GlobalValue(self, arg):
assert not arg.external, "Not supported yet"
return arg.value.const
def load_Undef(self, arg):
return Undef
def run(func, env=None, exc_model=None, _state=None, args=(),
tracer=tracing.DummyTracer()):
"""
Interpret function. Raises UncaughtException(exc) for uncaught exceptions
"""
assert len(func.args) == len(args)
tracer.push(tracing.Call(func, args))
# -------------------------------------------------
# Set up interpreter
valuemap = dict(zip(func.argnames, args)) # { '%0' : pyval }
argloader = InterpArgLoader(valuemap)
interp = Interp(func, env, exc_model or ExceptionModel(),
argloader, tracer)
if env:
handlers = env.get("interp.handlers") or {}
else:
handlers = {}
# -------------------------------------------------
# Eval loop
curblock = None
while True:
# -------------------------------------------------
# Block transitioning
op = interp.op
if op.block != curblock:
interp.blockswitch(curblock, op.block, valuemap)
curblock = op.block
# -------------------------------------------------
# Find handler
if op.opcode in handlers:
fn = partial(handlers[op.opcode], interp)
else:
fn = getattr(interp, op.opcode)
# -------------------------------------------------
# Load arguments
args = argloader.load_args(op)
# -------------------------------------------------
# Execute...
tracer.push(tracing.Op(op, args))
oldpc = interp.pc
try:
result = fn(*args)
except UncaughtException as e:
tracer.push(tracing.Exc(e))
raise
valuemap[op.result] = result
tracer.push(tracing.Res(op, args, result))
# -------------------------------------------------
# Advance PC
if oldpc == interp.pc:
interp.incr_pc()
elif interp.pc == -1:
# Returning...
tracer.push(tracing.Ret(result))
return result
|
from curator import es_repo_mgr
from mock import patch, Mock
from . import CuratorTestCase
class TestRepoMgr(CuratorTestCase):
def test_repository_will_be_created_and_listed_and_deleted(self):
es_repo_mgr.create_repository(self.client, repository=self.args['repository'], repo_type='fs', location=self.args['location'])
pre = es_repo_mgr.get_repository(self.client, self.args['repository'])
self.assertEqual('fs', pre[self.args['repository']]['type'])
self.assertEqual(self.args['repository'], list(pre.keys())[0])
es_repo_mgr.delete_repository(self.client, repository=self.args['repository'])
post = es_repo_mgr.get_repository(self.client, self.args['repository'])
self.assertEqual(None, post)
|
import enum
import re
from marshmallow import ValidationError
from webargs.flaskparser import abort, parser
@enum.unique
class Status(enum.Enum):
"""API body response statuses."""
CREATED = "created"
SUCCESS = "success"
EXPIRED = "expired"
INVALID = "invalid"
ERROR = "error"
@parser.error_handler
def handle_parsing_error(err, req, schema, *, error_status_code, error_headers):
"""Handle request parsing errors."""
abort(error_status_code,
response=dict(details=err.messages, status=Status.ERROR.value))
def strength(passphrase):
"""Passphrase strength validation handler.
Minimum 8 characters containing at least one number and one uppercase.
"""
if passphrase:
regex = re.compile(r"^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9]).{8,}$")
if not regex.search(passphrase) is not None:
raise ValidationError(
"Passphrase too weak. Minimun 8 characters, including "
"1 number and 1 uppercase.")
def secret(secret):
"""Secret validation handler."""
if not secret:
raise ValidationError("Missing a secret to encrypt.")
if len(secret) > 150:
raise ValidationError(
"The secret needs to have less than 150 characters.")
def passphrase(passphrase):
"""Passphrase validation handler."""
if not passphrase:
raise ValidationError("Missing a passphrase.")
def days(days):
"""Expiration validation handler."""
if days > 7:
raise ValidationError(
"The maximum number of days to keep the secret alive is 7.")
def slug(slug):
"""Link validation handler."""
if not slug:
raise ValidationError("Missing a secret link.")
|
#!/usr/bin/python -Wall
# ================================================================
# This is a utility module for helping to parse command-line arguments of
# the form "N=10", "eps=1e-6", etc.
#
# John Kerl
# kerl.john.r@gmail.com
# 2009-09-16
# ================================================================
import sys
import re
# ----------------------------------------------------------------
# If there is a match, returns [True, {value}]. Else, returns [False, 0].
# E.g. if called with name_eq_value_pair "N=10", name = "N", and value_scanner
# = int, returns [True, 10].
# xxx fix cmt to match reality
def arg_eq_match(name_eq_value_pair, name, value_scanner, value_list):
name_eq = name + '='
len_of_name_eq = len(name_eq)
regexp = '^' + name_eq
if re.match(regexp, name_eq_value_pair):
value_string = name_eq_value_pair[len_of_name_eq:]
try:
value = value_scanner(value_string)
except:
print >> sys.stderr, 'Couldn\'t parse RHS of \'%s\' as %s.' \
% (name_eq_value_pair, value_scanner.__name__)
return False
value_list[0] = value
return True
else:
return False
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/enums/share_ex_raid_pass_result.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/enums/share_ex_raid_pass_result.proto',
package='pogoprotos.enums',
syntax='proto3',
serialized_pb=_b('\n0pogoprotos/enums/share_ex_raid_pass_result.proto\x12\x10pogoprotos.enums*\x98\x03\n\x15ShareExRaidPassResult\x12\x1c\n\x18SHARE_EX_RAID_PASS_UNSET\x10\x00\x12\x1e\n\x1aSHARE_EX_RAID_PASS_SUCCESS\x10\x01\x12-\n)ERROR_FRIEND_ALREADY_INVITED_TO_SAME_RAID\x10\x02\x12%\n!ERROR_EX_RAID_PASS_ALREADY_SHARED\x10\x03\x12\x38\n4ERROR_FRIEND_ALREADY_HAS_SHARED_EX_PASS_IN_INVENTORY\x10\x04\x12\"\n\x1e\x45RROR_TOO_LOW_FRIENDSHIP_LEVEL\x10\x05\x12\x1a\n\x16\x45RROR_FRIEND_NOT_FOUND\x10\x06\x12!\n\x1d\x45RROR_EX_RAID_ALREADY_STARTED\x10\x07\x12\x19\n\x15\x45RROR_EX_RAID_INVALID\x10\x08\x12 \n\x1c\x45RROR_EX_RAID_PASS_NOT_FOUND\x10\t\x12\x11\n\rERROR_UNKNOWN\x10\nb\x06proto3')
)
_SHAREEXRAIDPASSRESULT = _descriptor.EnumDescriptor(
name='ShareExRaidPassResult',
full_name='pogoprotos.enums.ShareExRaidPassResult',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SHARE_EX_RAID_PASS_UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHARE_EX_RAID_PASS_SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_FRIEND_ALREADY_INVITED_TO_SAME_RAID', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_EX_RAID_PASS_ALREADY_SHARED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_FRIEND_ALREADY_HAS_SHARED_EX_PASS_IN_INVENTORY', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_TOO_LOW_FRIENDSHIP_LEVEL', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_FRIEND_NOT_FOUND', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_EX_RAID_ALREADY_STARTED', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_EX_RAID_INVALID', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_EX_RAID_PASS_NOT_FOUND', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_UNKNOWN', index=10, number=10,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=71,
serialized_end=479,
)
_sym_db.RegisterEnumDescriptor(_SHAREEXRAIDPASSRESULT)
ShareExRaidPassResult = enum_type_wrapper.EnumTypeWrapper(_SHAREEXRAIDPASSRESULT)
SHARE_EX_RAID_PASS_UNSET = 0
SHARE_EX_RAID_PASS_SUCCESS = 1
ERROR_FRIEND_ALREADY_INVITED_TO_SAME_RAID = 2
ERROR_EX_RAID_PASS_ALREADY_SHARED = 3
ERROR_FRIEND_ALREADY_HAS_SHARED_EX_PASS_IN_INVENTORY = 4
ERROR_TOO_LOW_FRIENDSHIP_LEVEL = 5
ERROR_FRIEND_NOT_FOUND = 6
ERROR_EX_RAID_ALREADY_STARTED = 7
ERROR_EX_RAID_INVALID = 8
ERROR_EX_RAID_PASS_NOT_FOUND = 9
ERROR_UNKNOWN = 10
DESCRIPTOR.enum_types_by_name['ShareExRaidPassResult'] = _SHAREEXRAIDPASSRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
# @@protoc_insertion_point(module_scope)
|
from django.db import models
from apps.carrito.models import carrito
from apps.producto.models import producto
from django.contrib import admin
# Create your models here.
class repartidor(models.Model):
id_repartidor = models.AutoField(primary_key=True)
nombre = models.CharField(max_length=45)
apellido = models.CharField(max_length=45)
class factura(models.Model):
id_factura = models.AutoField(primary_key=True)
precio_total = models.IntegerField()
fecha = models.DateField()
id_carrito_carrito = models.ForeignKey(carrito, on_delete=models.CASCADE)
id_repartidor_repartidor = models.ForeignKey(repartidor, on_delete=models.CASCADE)
class factura_producto(models.Model):
class Meta:
unique_together = (('id_factura_factura', 'id_producto_producto'),)
id_factura_factura = models.ForeignKey(factura, on_delete=models.CASCADE)
id_producto_producto = models.ForeignKey(producto, on_delete=models.CASCADE)
precio_total = models.IntegerField()
cantidad = models.IntegerField()
class metodo_pago(models.Model):
id_metodo = models.AutoField(primary_key=True)
nombre_metodo = models.CharField(max_length=45)
id_factura_factura = models.ForeignKey(factura, on_delete=models.CASCADE)
admin.site.register(repartidor)
admin.site.register(factura)
admin.site.register(factura_producto)
admin.site.register(metodo_pago)
|
import string
def get_free(used):
"""
Returns variable name that is not used yet
Args:
used: iterable of used variable names
Returns:
some unused variable name
"""
for el in string.ascii_lowercase:
if not el in used:
return el
raise ValueError()
class Var:
"""
Variable term
"""
def __init__(self, name):
self.name = name
def replace(self, var, subst):
"""
Return term with variable replaced by term
Args:
var: variable name to be replaced
subst: replacement term
Returns:
new term
"""
if self.name == var:
return subst
return self
def normalize(self):
"""
Returns normalized term
"""
return self
def __str__(self):
return self.name
def rename(self, old, new):
"""
Renames variable in the term
Args:
old: old variable name
new: new variable name
Returns:
new term
"""
if self.name == old:
self.name = new
def safe(self, used=None):
"""
Renames variables to avoid collisions between variables
inside the term and variables from 'used' set
Args:
used: set of already used variables
Returns:
new term
"""
if used is None:
used = set()
used.add(self.name)
class Lambda:
"""
Lambda term
Represents term (λx.A)
x - var
A - body
"""
def __init__(self, var, body):
self.var = var
self.body = body
def replace(self, var, subst):
return Lambda(self.var, self.body.replace(var, subst))
def call(self, arg):
return self.body.replace(self.var, arg)
def normalize(self):
self.body.normalize()
return self
def __str__(self):
return "(λ{}.{})".format(self.var, self.body)
def rename(self, old, new):
if self.var != old:
self.body.rename(old, new)
def safe(self, used=None):
if used is None:
used = set()
if self.var in used:
old = self.var
self.var = get_free(used)
self.body.rename(old, self.var)
used.add(self.var)
self.body.safe(used)
class Call:
"""
Function call term
Represents term (A B)
A - func
B - arg
"""
def __init__(self, func, arg):
self.func = func
self.arg = arg
def replace(self, var, subst):
return Call(self.func.replace(var, subst), self.arg.replace(var, subst))
def normalize(self):
self.func = self.func.normalize()
self.arg = self.arg.normalize()
if type(self.func) is Lambda:
return self.func.call(self.arg).normalize()
return self
def __str__(self):
return "({} {})".format(self.func, self.arg)
def safe(self, used=None):
if not used:
used = set()
self.func.safe(used)
self.arg.safe(used)
def rename(self, old, new):
self.func.rename(old, new)
self.arg.rename(old, new)
|
#
# This file is part of do-mpc
#
# do-mpc: An environment for the easy, modular and efficient implementation of
# robust nonlinear model predictive control
#
# Copyright (c) 2014-2019 Sergio Lucia, Alexandru Tatulea-Codrean
# TU Dortmund. All rights reserved
#
# do-mpc is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# do-mpc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with do-mpc. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from casadi import *
from casadi.tools import *
import pdb
import sys
sys.path.append('../../')
import do_mpc
import math
def xytri(t):
r = 1.0
T = 1
omga = 2 * math.pi / T
x = r * sin(omga * t)
y = r * cos(omga * t)
# print(x, y)
return x, y
def template_mpc(model):
"""
--------------------------------------------------------------------------
template_mpc: tuning parameters
--------------------------------------------------------------------------
"""
mpc = do_mpc.controller.MPC(model)
setup_mpc = {
'n_horizon': 100,
'n_robust': 0,
'open_loop': 0,
't_step': 0.04,
'state_discretization': 'collocation',
'collocation_type': 'radau',
'collocation_deg': 3,
'collocation_ni': 1,
'store_full_solution': True,
# Use MA27 linear solver in ipopt for faster calculations:
#'nlpsol_opts': {'ipopt.linear_solver': 'ma27'}
}
mpc.set_param(**setup_mpc)
# mterm = 100*(model.aux['E_kin'] - model.aux['E_pot'])
# lterm = (model.aux['E_kin'] - model.aux['E_pot'])+10*(model.x['pos']-model.tvp['pos_set'])**2 # stage cost
q1 = 1000
q2 = 1000
r = 0.001
lterm = q1 * (model.x['x_b'] - model.tvp['xtraj']) ** 2 + q2 * (model.x['y_b'] - model.tvp['ytraj']) ** 2
mterm = lterm
mpc.set_objective(mterm=mterm, lterm=lterm)
mpc.set_rterm(ux=1e-4, uy=1e-4)
mpc.bounds['lower', '_x', 'x_b'] = -1000
mpc.bounds['upper', '_x', 'x_b'] = 1000
mpc.bounds['lower', '_x', 'y_b'] = -1000
mpc.bounds['upper', '_x', 'y_b'] = 1000
mpc.bounds['lower', '_u', 'ux'] = -1000
mpc.bounds['upper', '_u', 'ux'] = 1000
mpc.bounds['lower', '_u', 'uy'] = -1000
mpc.bounds['upper', '_u', 'uy'] = 1000
tvp_template = mpc.get_tvp_template()
# When to switch setpoint:
t_switch = 4 # seconds
ind_switch = t_switch // setup_mpc['t_step']
def tvp_fun(t_ind):
ind = t_ind // setup_mpc['t_step']
# if ind <= ind_switch:
# tvp_template['_tvp',:, 'pos_set'] = -0.8
# else:
# tvp_template['_tvp',:, 'pos_set'] = 0.8
for k in range(setup_mpc['n_horizon'] + 1):
t_pre = t_ind + k * setup_mpc['t_step']
# tvp_template['_tvp',k, 'xtraj'] = -0.8
tvp_template['_tvp', k, 'xtraj'], tvp_template['_tvp', k, 'ytraj'] = xytri(t_pre)
return tvp_template
mpc.set_tvp_fun(tvp_fun)
mpc.setup()
return mpc
|
# -*- encoding: utf-8 -*-
# @file: appstore
# @author: theol
# @Date: 2018/1/23 16:38
# @Updated: 2018/1/2316:38
import json
import requests
from requests.exceptions import RequestException
class InAppPurchaseValidationError(Exception):
raw_response = None
def __init__(self, raw_response=None):
super(InAppPurchaseValidationError, self).__init__()
self.raw_response = raw_response
api_ok = 0
api_result_errors = {
21000: InAppPurchaseValidationError('Bad Json'),
21002: InAppPurchaseValidationError('Bad Receipt data'),
21003: InAppPurchaseValidationError('Unauthenticated receipt'),
21004: InAppPurchaseValidationError('Unmatch shared secret'),
21005: InAppPurchaseValidationError('Recipt Server not available'),
21006: InAppPurchaseValidationError('Subscription has expired'),
21007: InAppPurchaseValidationError('Test receipt sent to production env'),
21008: InAppPurchaseValidationError('Production receipt sent to test env'),
21010: InAppPurchaseValidationError('Unauthorized receipt')
}
TEST_URL = 'https://sandbox.itunes.apple.com/verifyReceipt'
PROD_URL = 'https://buy.itunes.apple.com/verifyReceipt'
class AppStoreValidator(object):
"""
AppStore 应用内购买凭证验证
"""
bundle_id = None
validate_url = None
sandbox = None
auto_retry_wrong_env_request = False
def __init__(self, bundle_id, sandbox=False, auto_retry_wrong_env_request=False):
self.bundle_id = bundle_id
self.sandbox = sandbox
if not self.bundle_id:
raise InAppPurchaseValidationError('`bundle_id` can not be empty')
self.auto_retry_wrong_env_request = auto_retry_wrong_env_request
self._adjust_url_by_sandbox()
def validate(self, receipt, is_final=False, shared_secret=None):
"""
验证用户应用内购买收据凭证
:param receipt: 收据数据
:is_final: 接受的数据是否为最终数据
:param shared_secret: 可选的共享密码
:return: 验证结果或异常
"""
# 根据APP传递的支付收据数据来确定收据验证 URL
if not is_final:
if isinstance(receipt, (str,)):
receipt = json.loads(receipt)
env = receipt.get('environment', 'prod').lower()
self.sandbox = True if env == 'sandbox' else False
receipt_json = {'receipt-data': receipt}
else:
receipt_json = receipt
if shared_secret:
receipt_json['password'] = shared_secret
api_response = self.post_json(receipt_json)
status = api_response['status']
if status in [21007, 21008] and self.auto_retry_wrong_env_request:
self.sandbox = not self.sandbox
api_response = self.post_json(receipt_json)
status = api_response['status']
if status != api_ok:
error = api_result_errors.get(status, InAppPurchaseValidationError('Unknown API status'))
error.raw_response = api_response
raise error
return api_response
def post_json(self, request_json):
self._adjust_url_by_sandbox()
try:
return requests.post(self.url, json=request_json).json()
except (ValueError, RequestException):
raise InAppPurchaseValidationError('HTTP error')
def _adjust_url_by_sandbox(self):
"""
根据sandbox的值来选择相应的App Store 验证URL
"""
self.url = TEST_URL if self.sandbox else PROD_URL
if __name__ == '__main__':
bundle_id = 'com.focusonecc.test'
validator = AppStoreValidator(bundle_id)
receipt_str = '''
{
"signature" : "XXXX",
"purchase-info" : "XXXX",
"environment" : "Sandbox",
"pod" : "100",
"signing-status" : "0"
}
'''
try:
validate_result = validator.validate(receipt_str)
except InAppPurchaseValidationError as ex:
response_from_apple = ex.raw_response
print(response_from_apple)
pass
|
/home/runner/.cache/pip/pool/62/11/8e/0308778093ea17b7a6e57034ae6a51e36cf56cb87cd28a049730f252f9 |
# Copyright 2019 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A client that manages Cheeps Virtual Device on compute engine.
** CheepsComputeClient **
CheepsComputeClient derives from AndroidComputeClient. It manges a google
compute engine project that is setup for running Cheeps Virtual Devices.
It knows how to create a host instance from a Cheeps Stable Host Image, fetch
Android build, and start Android within the host instance.
** Class hierarchy **
base_cloud_client.BaseCloudApiClient
^
|
gcompute_client.ComputeClient
^
|
android_compute_client.AndroidComputeClient
^
|
cheeps_compute_client.CheepsComputeClient
"""
import logging
from acloud import errors
from acloud.internal import constants
from acloud.internal.lib import android_compute_client
from acloud.internal.lib import gcompute_client
logger = logging.getLogger(__name__)
class CheepsComputeClient(android_compute_client.AndroidComputeClient):
"""Client that manages Cheeps based Android Virtual Device.
Cheeps is a VM that run Chrome OS which runs on GCE.
"""
# This is the timeout for betty to start.
BOOT_TIMEOUT_SECS = 15*60
# This is printed by betty.sh.
BOOT_COMPLETED_MSG = "VM successfully started"
# systemd prints this if betty.sh returns nonzero status code.
BOOT_FAILED_MSG = "betty.service: Failed with result 'exit-code'"
def CheckBootFailure(self, serial_out, instance):
"""Overrides superclass. Determines if there's a boot failure."""
if self.BOOT_FAILED_MSG in serial_out:
raise errors.DeviceBootError("Betty failed to start")
# pylint: disable=too-many-locals,arguments-differ
def CreateInstance(self, instance, image_name, image_project, avd_spec):
""" Creates a cheeps instance in GCE.
Args:
instance: name of the VM
image_name: the GCE image to use
image_project: project the GCE image is in
avd_spec: An AVDSpec instance.
"""
metadata = self._metadata.copy()
metadata[constants.INS_KEY_AVD_TYPE] = constants.TYPE_CHEEPS
# Update metadata by avd_spec
if avd_spec:
metadata["cvd_01_x_res"] = avd_spec.hw_property[constants.HW_X_RES]
metadata["cvd_01_y_res"] = avd_spec.hw_property[constants.HW_Y_RES]
metadata["cvd_01_dpi"] = avd_spec.hw_property[constants.HW_ALIAS_DPI]
metadata[constants.INS_KEY_DISPLAY] = ("%sx%s (%s)" % (
avd_spec.hw_property[constants.HW_X_RES],
avd_spec.hw_property[constants.HW_Y_RES],
avd_spec.hw_property[constants.HW_ALIAS_DPI]))
if avd_spec.username:
metadata["user"] = avd_spec.username
metadata["password"] = avd_spec.password
if avd_spec.remote_image[constants.BUILD_ID]:
metadata['android_build_id'] = avd_spec.remote_image[constants.BUILD_ID]
if avd_spec.remote_image[constants.BUILD_TARGET]:
metadata['android_build_target'] = avd_spec.remote_image[constants.BUILD_TARGET]
gcompute_client.ComputeClient.CreateInstance(
self,
instance=instance,
image_name=image_name,
image_project=image_project,
disk_args=None,
metadata=metadata,
machine_type=self._machine_type,
network=self._network,
zone=self._zone)
|
import numpy as np
from scipy.spatial import Voronoi
from ..graph import Graph
from ..dual import DualGraph
from .voronoi import VoronoiGraph
from ...utils.jaggedarray import JaggedArray
# from .voronoi_helpers import (get_nodes, get_nodes_at_link,
# get_links_at_patch, get_corner_at_patch,
# get_corners_at_link)
from .voronoi_helpers import VoronoiConverter
class DualVoronoiGraph(DualGraph, VoronoiGraph):
"""Dual graph of a voronoi grid."""
def __init__(self, nodes, min_cell_size=3, **kwds):
"""Create a voronoi grid.
Parameters
----------
nodes : tuple of array_like
Coordinates of every node. First *y*, then *x*.
Examples
--------
>>> from landlab.graph import DualVoronoiGraph
>>> node_x = [0, 1, 2, 3,
... 0.2, 1.2, 2.2, 3.2,
... 0.4, 1.4, 2.4, 3.4]
>>> node_y = [0, 0, 0, 0,
... 1, 1, 1, 1,
... 2, 2, 2, 2]
>>> graph = DualVoronoiGraph((node_y, node_x))
>>> graph.x_of_corner
array([ 1.5, 2.5, 0.7, 1.7, 2.7, 0.7, 1.7, 2.7, 0.9, 1.9])
>>> graph.y_of_corner # doctest: +NORMALIZE_WHITESPACE
array([ 0.42, 0.42, 0.58, 0.58, 0.58, 1.42, 1.42, 1.42, 1.58,
1.58])
>>> graph.corners_at_face # doctest: +NORMALIZE_WHITESPACE
array([[2, 0], [0, 3], [3, 1], [1, 4],
[2, 5], [3, 6], [4, 7],
[5, 8], [8, 6], [6, 9], [9, 7]])
>>> graph.faces_at_corner # doctest: +NORMALIZE_WHITESPACE
array([[ 1, 0, -1], [ 3, 2, -1],
[ 4, 0, -1], [ 5, 1, 2], [ 6, 3, -1],
[ 7, 4, -1], [ 9, 8, 5], [10, 6, -1],
[ 7, 8, -1], [ 9, 10, -1]])
>>> graph.node_at_cell
array([5, 6])
"""
voronoi = Voronoi(list(zip(nodes[1], nodes[0])))
converter = VoronoiConverter(voronoi, min_patch_size=min_cell_size)
corners = converter.get_nodes()
corners = (corners[:, 1], corners[:, 0])
faces = converter.get_nodes_at_link()
cells = converter.get_links_at_patch()
cells = [cell for cell in JaggedArray(cells)]
node_at_cell = converter.get_corner_at_patch()
nodes_at_face = converter.get_corners_at_link()
self._dual = Graph(corners, links=faces,
patches=cells, sorting={'xy': False, 'ne': True,
'ccw': True})
super(DualVoronoiGraph, self).__init__(
nodes, cells=cells, node_at_cell=node_at_cell,
nodes_at_face=nodes_at_face, sorting=False, **kwds)
|
def swap(a, x, y):
'swapping of a[x] and a[y]'
a[x], a[y] = a[y], a[x]
def SelectionSort(a):
"perform selection sort on array a"
for i in range(0, len(a), +1):
least = i
for j in range(i + 1, len(a), +1):
if (a[j] < a[least]):
least = j
swap(a, i, least)
# main
n = int(input('Enter no of elements:'))
a = []
for i in range(0, n, +1):
a.append(int(input("Enter an element:")))
print('Initial array:', a)
SelectionSort(a)
print('##########PERFORMING SELECTION SORT##########')
print('Selection sorted array:', a)
|
# Generated by Django 2.0.3 on 2018-04-07 15:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('certificados', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='certificados',
name='estudiante',
),
]
|
# Author: Martin McBride
# Created: 2022-01-09
# Copyright (C) 2022, Martin McBride
# License: MIT
from generativepy.drawing import make_image, setup
from generativepy.color import Color
from generativepy.geometry import Text, Transform, Line
def draw(ctx, pixel_width, pixel_height, frame_no, frame_count):
setup(ctx, pixel_width, pixel_height, background=Color(0.8))
blue = Color('blue')
red = Color('red')
green = Color('green')
thickness = 4
Text(ctx).of('F', (40, 100)).size(100).fill(blue)
Line(ctx).of_start_end((100, 20), (100, 110)).stroke(green, thickness)
with Transform(ctx).scale(-1, 1, (100, 0)):
Text(ctx).of('F', (40, 100)).size(100).fill(red)
Text(ctx).of('W', (240, 100)).size(100).fill(blue)
Line(ctx).of_start_end((240, 70), (340, 70)).stroke(green, thickness)
with Transform(ctx).scale(1, -1, (0, 60)):
Text(ctx).of('W', (240, 100)).size(100).fill(red.with_a(0.6))
make_image("mirror-tutorial.png", draw, 450, 150) |
import math
import torch
from torch import nn
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(channels)
self.prelu = nn.PReLU(channels)
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(channels)
def forward(self, x):
short_cut = x
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.conv2(x)
x = self.bn2(x)
return x + short_cut
class UpsampleBLock(nn.Module):
def __init__(self, in_channels, up_scale):
super(UpsampleBLock, self).__init__()
self.conv = nn.Conv2d(in_channels, in_channels * up_scale ** 2, kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(up_scale)
self.prelu = nn.PReLU(in_channels)
def forward(self, x):
x = self.conv(x)
x = self.pixel_shuffle(x)
x = self.prelu(x)
return x
class Generator(nn.Module):
def __init__(self, scale_factor, num_residual=16):
upsample_block_num = int(math.log(scale_factor, 2))
super(Generator, self).__init__()
self.block_in = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=9, padding=4),
nn.PReLU(64)
)
self.blocks = []
for _ in range(num_residual):
self.blocks.append(ResidualBlock(64))
self.blocks = nn.Sequential(*self.blocks)
self.block_out = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64)
)
self.upsample = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)]
self.upsample.append(nn.Conv2d(64, 3, kernel_size=9, padding=4))
self.upsample = nn.Sequential(*self.upsample)
def forward(self, x):
x = self.block_in(x)
short_cut = x
x = self.blocks(x)
x = self.block_out(x)
upsample = self.upsample(x + short_cut)
return torch.tanh(upsample)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.net = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(512, 1024, kernel_size=1),
nn.LeakyReLU(0.2),
nn.Conv2d(1024, 1, kernel_size=1)
)
def forward(self, x):
batch_size = x.size(0)
return torch.sigmoid(self.net(x).view(batch_size))
if __name__ == "__main__":
from torchsummary import summary
# 需要使用device来指定网络在GPU还是CPU运行
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Generator(4).to(device)
summary(model, input_size=(3,56,56))
|
from math import gcd
N, H, *A = map(int, open(0).read().split())
for a in A:
t = gcd(a, H)
H //= t
if H == 1:
break
if H == 1:
print('YES')
else:
print('NO')
|
from .UnsupervisedRanker import MasseyRanker, ColleyRanker, KeenerRanker, MarkovRanker, ODRanker, DifferenceRanker
from .TimeSeriesRanker import EloRanker, TrueSkillRanker, GlickoRanker
__all__ = ["MasseyRanker", "ColleyRanker", "KeenerRanker", "MarkovRanker", "ODRanker", "DifferenceRanker", "EloRanker", "TrueSkillRanker", "GlickoRanker"] |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
def LoadSupport(input_api):
if 'cloud_storage' not in globals():
# Avoid leaking changes to global sys.path.
_old_sys_path = sys.path
try:
telemetry_path = os.path.join(os.path.dirname(os.path.dirname(
input_api.PresubmitLocalPath())), 'telemetry')
sys.path = [telemetry_path] + sys.path
from telemetry.page import cloud_storage
globals()['cloud_storage'] = cloud_storage
finally:
sys.path = _old_sys_path
return globals()['cloud_storage']
def _GetFilesNotInCloud(input_api):
"""Searches for .sha1 files and uploads them to Cloud Storage.
It validates all the hashes and skips upload if not necessary.
"""
hash_paths = []
for affected_file in input_api.AffectedFiles(include_deletes=False):
hash_path = affected_file.AbsoluteLocalPath()
_, extension = os.path.splitext(hash_path)
if extension == '.sha1':
hash_paths.append(hash_path)
if not hash_paths:
return []
cloud_storage = LoadSupport(input_api)
# Look in both buckets, in case the user uploaded the file manually. But this
# script focuses on WPR archives, so it only uploads to the internal bucket.
hashes_in_cloud_storage = cloud_storage.List(cloud_storage.PUBLIC_BUCKET)
try:
hashes_in_cloud_storage += cloud_storage.List(cloud_storage.INTERNAL_BUCKET)
except (cloud_storage.PermissionError, cloud_storage.CredentialsError):
pass
files = []
for hash_path in hash_paths:
file_hash = cloud_storage.ReadHash(hash_path)
if file_hash not in hashes_in_cloud_storage:
files.append((hash_path, file_hash))
return files
def _SyncFilesToCloud(input_api, output_api):
"""Searches for .sha1 files and uploads them to Cloud Storage.
It validates all the hashes and skips upload if not necessary.
"""
cloud_storage = LoadSupport(input_api)
results = []
for hash_path, file_hash in _GetFilesNotInCloud(input_api):
file_path, _ = os.path.splitext(hash_path)
if not re.match('^([A-Za-z0-9]{40})$', file_hash):
results.append(output_api.PresubmitError(
'Hash file does not contain a valid SHA-1 hash: %s' % hash_path))
continue
if not os.path.exists(file_path):
results.append(output_api.PresubmitError(
'Hash file exists, but file not found: %s' % hash_path))
continue
if cloud_storage.CalculateHash(file_path) != file_hash:
results.append(output_api.PresubmitError(
'Hash file does not match file\'s actual hash: %s' % hash_path))
continue
try:
bucket_input = raw_input('Uploading to Cloud Storage: %s\n'
'Is this file [P]ublic or Google-[i]nternal?'
% file_path).lower()
if 'public'.startswith(bucket_input):
bucket = cloud_storage.PUBLIC_BUCKET
elif ('internal'.startswith(bucket_input) or
'google-internal'.startswith(bucket_input)):
bucket = cloud_storage.INTERNAL_BUCKET
else:
results.append(output_api.PresubmitError(
'Response was neither "public" nor "internal": %s' % bucket_input))
return results
cloud_storage.Insert(bucket, file_hash, file_path)
results.append(output_api.PresubmitNotifyResult(
'Uploaded file to Cloud Storage: %s' % file_path))
except cloud_storage.CloudStorageError, e:
results.append(output_api.PresubmitError(
'Unable to upload to Cloud Storage: %s\n\n%s' % (file_path, e)))
return results
def _VerifyFilesInCloud(input_api, output_api):
"""Searches for .sha1 files and uploads them to Cloud Storage.
It validates all the hashes and skips upload if not necessary.
"""
results = []
for hash_path, _ in _GetFilesNotInCloud(input_api):
results.append(output_api.PresubmitError(
'Attemping to commit hash file, but corresponding '
'data file is not in Cloud Storage: %s' % hash_path))
return results
def _IsNewJsonPageSet(affected_file):
return (affected_file.Action() == 'A' and
'page_sets/data/' not in affected_file.AbsoluteLocalPath()
and affected_file.AbsoluteLocalPath().endswith('.json'))
def _GetNewJsonPageSets(input_api):
return input_api.AffectedFiles(file_filter=_IsNewJsonPageSet)
def CheckChangeOnUpload(input_api, output_api):
results = _SyncFilesToCloud(input_api, output_api)
return results
def CheckChangeOnCommit(input_api, output_api):
results = _VerifyFilesInCloud(input_api, output_api)
return results
|
"""Calculate the errors on the fitted quantities."""
import logging
from pathlib import Path
import numpy as np
import scipy.stats
import astropy.units as u
from astropy.table import Table
from .utils import load_yaml
from .fit_models import Log10Parabola
from .conf import config
log = logging.getLogger(__name__)
def main():
Path("results/errorbands").mkdir(exist_ok=True, parents=True)
datasets = list(config.all_datasets) + ["joint"]
for dataset in datasets:
errorband_for_dataset(dataset)
def errorband_for_dataset(dataset, size=500, seed=0):
log.info(f"Computing errorband for {dataset}")
fit = load_yaml(f"results/fit/fit_{dataset}.yaml")
mean = np.array([_["value"] for _ in fit["parameters"]])
names = [_["name"] for _ in fit["parameters"]]
units = [u.Unit(_["unit"]) for _ in fit["parameters"]]
cov = np.array(fit["covariance"])
# Sample parameters
rng = np.random.RandomState(seed=seed)
parameter_samples = rng.multivariate_normal(mean, cov, size)
table = Table(parameter_samples, names=names)
# Compute fluxes
energy = config.energy_bins
dnde = []
for parameter_sample in parameter_samples:
model = Log10Parabola(
parameter_sample[0] * units[0],
parameter_sample[1] * units[1],
parameter_sample[2] * units[2],
parameter_sample[3] * units[3],
)
dnde.append(model(energy))
dnde = np.array(dnde) * u.Unit("cm-2 s-1 TeV-1")
table["energy"] = energy[None, :]
table["dnde"] = dnde
path = f"results/errorbands/samples_{dataset}.fits.gz"
log.info(f"Writing {path}")
table.write(path, overwrite=True)
# Compute error band as flux quantiles
sigma = 1
sed = Table()
sed["energy"] = energy
sed["dnde_mean"] = np.mean(dnde, axis=0)
sed["dnde_median"] = np.percentile(dnde.value, 50, axis=0) * dnde.unit
q = 100 * scipy.stats.norm.cdf(-sigma)
sed["dnde_lo"] = np.percentile(dnde.value, q, axis=0) * dnde.unit
q = 100 * scipy.stats.norm.cdf(+sigma)
sed["dnde_hi"] = np.percentile(dnde.value, q, axis=0) * dnde.unit
# Compute and add best-fit SED curve
model = Log10Parabola(
mean[0] * units[0], mean[1] * units[1], mean[2] * units[2], mean[3] * units[3]
)
model.parameters.covariance = cov
sed["dnde_fit"] = model(energy)
for name in ["fit", "mean", "median", "lo", "hi"]:
value = (energy ** 2) * sed[f"dnde_{name}"]
sed[f"e2dnde_{name}"] = value.to("erg cm-2 s-1")
for name in sed.columns:
sed[name].format = "3g"
path = f"results/errorbands/sed_{dataset}.ecsv"
log.info(f"Writing {path}")
sed.write(path, overwrite=True)
|
#!/usr/bin/env python
"""
This file takes in a list of gerrit changes to build into the supplied OBS
project.
"""
import argparse
import contextlib
import glob
import os
import re
import shutil
import sys
import tempfile
import time
import urllib
import sh
try:
from xml.etree import cElementTree as ET
except ImportError:
import cElementTree as ET
sys.path.append(os.path.dirname(__file__))
from gerrit_settings import gerrit_project_map, obs_project_settings # noqa: E402
from gerrit import GERRIT_URL, GerritChange, GerritApiCaller # noqa: E402
@contextlib.contextmanager
def cd(dir):
pwd = os.getcwd()
try:
os.chdir(dir)
yield
finally:
os.chdir(pwd)
def cleanup_path(path):
if os.path.exists(path):
shutil.rmtree(path)
class OBSPackage:
"""
Manage the workspace of a package.
"""
def __init__(self, gerrit_project, url, target_branch, source_workspace):
self.gerrit_project = gerrit_project
self.name = gerrit_project_map()[gerrit_project]
self.url = url
self.target_branch = target_branch
self.test_branch = 'test-merge'
self.source_workspace = source_workspace
self.source_dir = os.path.join(
self.source_workspace, '%s.git' % self.gerrit_project)
self._workspace_ready = False
self._applied_changes = set()
def prep_workspace(self):
if self._workspace_ready:
return
with cd(self.source_workspace):
if not os.path.exists('%s.git/.git' % self.gerrit_project):
print("Cloning gerrit project %s" % self.gerrit_project)
sh.git('clone', self.url, '%s.git' % self.gerrit_project)
with cd(self.source_dir):
# If another change is already checked out on this branch,
# don't clobber it. This shouldn't happen when building in a clean
# workspace so long as there is only one Package per
# gerrit_project.
try:
sh.git('checkout', self.test_branch)
except sh.ErrorReturnCode_1:
sh.git('checkout', '-b', self.test_branch,
'origin/%s' % self.target_branch)
self._workspace_ready = True
def add_change(self, change):
"""
Merge a given GerritChange into the git source_workspace if possible
"""
print("Attempting to add %s to %s" % (change, self))
if change in self._applied_changes:
print("Change %s has already been applied" % change)
return
if change.branch != self.target_branch:
raise Exception(
"Cannot merge change %s from branch %s onto target branch %s "
"in package %s" %
(change, change.branch, self.target_branch, self))
# Check change isn't already merged.
if change.status == "MERGED":
print("Change %s has already been merged in gerrit" % change)
return
elif change.status == "ABANDONED":
raise Exception("Can not merge abandoned change %s" % change)
self.prep_workspace()
with cd(self.source_dir):
# If another change has already applied this change by having it as
# one of its ancestry commits then the following merge will do a
# harmless null operation
print("Fetching ref %s" % change.ref)
sh.git('fetch', self.url, change.ref)
sh.git('merge', '--no-edit', 'FETCH_HEAD')
self._applied_changes.add(change)
def applied_change_numbers(self):
return ", ".join([change.id for change in self._applied_changes])
def has_applied_changes(self):
return bool(self._applied_changes)
def __repr__(self):
return "<OBSPackage %s>" % self.name
def find_in_osc_file(description):
def wrapper(find_func):
def wrapped_f(project, osc_filename=None,
package=None, osc_data=None):
if osc_data:
return find_func(project, osc_data)
osc_data = sh.osc(
'-A', 'https://api.suse.de', 'cat',
project.obs_linked_project,
package.name,
osc_filename)
osc_data_item = find_func(project, str(osc_data))
if not osc_data_item:
raise ValueError(
"Could not find a %s in "
"https://build.suse.de/package/view_file/%s/%s/%s"
% (description, project.obs_linked_project,
package.name, osc_filename))
return osc_data_item
return wrapped_f
return wrapper
class OBSProject(GerritApiCaller):
"""
Manage the OBS Project
"""
def __init__(self, obs_test_project_name, obs_linked_project,
obs_repository, obs_project_description):
self.obs_test_project_name = obs_test_project_name
self.obs_linked_project = obs_linked_project
self.obs_repository = obs_repository
self.obs_project_description = obs_project_description
self._create_test_project()
self.packages = set()
def _create_test_project(self):
repo_metadata = """
<project name="%(obs_test_project_name)s">
<title>Autogenerated CI project</title>
<description>
%(obs_project_description)s
</description>
<link project="%(obs_linked_project)s"/>
<person userid="opensuseapibmw" role="maintainer"/>
<publish>
<enable repository="standard"/>
</publish>
<repository name="standard" rebuild="direct" block="local"
linkedbuild="localdep">
<path project="%(obs_linked_project)s" repository="%(obs_repository)s"/>
<arch>x86_64</arch>
</repository>
</project>
""" % {
'obs_test_project_name': self.obs_test_project_name,
'obs_linked_project': self.obs_linked_project,
'obs_repository': self.obs_repository,
'obs_project_description': self.obs_project_description
}
with tempfile.NamedTemporaryFile() as meta:
meta.write(repo_metadata)
meta.flush()
print("Creating test project %s linked to project %s" %
(self.obs_test_project_name, self.obs_linked_project))
sh.osc('-A', 'https://api.suse.de', 'api', '-T', meta.name,
'/source/%s/_meta' % self.obs_test_project_name)
sh.osc('-A', 'https://api.suse.de', 'deleterequest',
self.obs_test_project_name, '--accept-in-hours', 720,
'-m', 'Auto delete after 30 days.')
@find_in_osc_file('obs_scm filename')
def _get_obsinfo_basename(self, service_def):
root = ET.fromstring(service_def)
nodes = root.findall(
'./service[@name="obs_scm"]/param[@name="filename"]')
if len(nodes) != 1 or not nodes[0].text:
return None
return nodes[0].text
@find_in_osc_file('obsinfo commit value')
def _get_obsinfo_commit(self, obsinfo):
matches = re.findall('^commit: (\S+)$', obsinfo, re.MULTILINE)
if len(matches) != 1:
return None
return matches[0]
def get_target_branch_head(self, package):
gerrit_query = "/projects/{}/branches/{}".format(
urllib.quote_plus('ardana/{}'.format(package.gerrit_project)),
urllib.quote_plus(package.target_branch))
head_commit = self._query_gerrit(gerrit_query)['revision']
return head_commit
def is_current(self, package):
if package.has_applied_changes():
return False
obsinfo_basename = self._get_obsinfo_basename('_service', package)
ibs_package_commit = self._get_obsinfo_commit(
'%s.obsinfo' % obsinfo_basename, package)
gerrit_branch_commit = self.get_target_branch_head(package)
return ibs_package_commit == gerrit_branch_commit
def add_test_package(self, package):
"""
Create a package in the OBS Project
- Copy the given package into the OBS Project
- Update the service file to use the local git checkout of the package
source
- Grab the local source
- Commit the package to be built into the project
"""
if self.is_current(package):
print(
"Skipping %s as the inherited package is the same."
% package.name)
return
print("Creating test package %s" % package.name)
package.prep_workspace()
# Clean up any checkouts from previous builds
cleanup_path(os.path.join(self.obs_test_project_name, package.name))
# Copy the package from the upstream project into our teste project
sh.osc('-A', 'https://api.suse.de', 'copypac', '--keep-link',
self.obs_linked_project, package.name,
self.obs_test_project_name)
# Checkout the package from obs
sh.osc('-A', 'https://api.suse.de', 'checkout',
self.obs_test_project_name, package.name)
# cd into the checked out package
with cd(os.path.join(self.obs_test_project_name, package.name)):
with open('_service', 'r+') as service_file:
# Update the service file to use the git state in our workspace
service_def = service_file.read()
obsinfo_basename = self._get_obsinfo_basename(
osc_data=service_def)
service_def = re.sub(
r'<param name="url">.*</param>',
'<param name="url">%s</param>' % package.source_dir,
service_def)
service_def = re.sub(
r'<param name="revision">.*</param>',
'<param name="revision">%s</param>' % package.test_branch,
service_def)
service_file.seek(0)
service_file.write(service_def)
service_file.truncate()
# Run the osc service and commit the changes to OBS
sh.osc('rm', glob.glob('%s*.obscpio' % obsinfo_basename))
env = os.environ.copy()
# TODO use proper api, once available from:
# https://github.com/openSUSE/obs-service-tar_scm/issues/258
# Workaround to make obs_scm work with a local path.
# Otherwise it only works with remote URLs.
env['TAR_SCM_TESTMODE'] = '1'
sh.osc('service', 'disabledrun', _env=env)
sh.osc('add', glob.glob('%s*.obscpio' % obsinfo_basename))
sh.osc('commit', '-m',
'Testing gerrit changes applied to %s'
% package.applied_change_numbers())
self.packages.add(package)
def wait_for_package(self, package):
"""
Wait for a particular package to complete building
"""
print("Waiting for %s to build" % package.name)
# cd into the checked out package
with cd(os.path.join(self.obs_test_project_name, package.name)):
while 'unknown' in sh.osc('results'):
print("Waiting for build to be scheduled")
time.sleep(3)
print("Waiting for build results")
for attempt in range(3):
results = sh.osc('results', '--watch')
print("Build results: %s" % results)
if 'broken' in results:
# Sometimes results --watch ends too soon, give it a few
# retries before actually failing
print("Sleeping for 10s before rechecking")
time.sleep(10)
continue
else:
break
if 'succeeded' not in results:
print("Package build failed.")
return False
return True
def wait_for_all_results(self):
"""
Wait for all the packages to complete building
"""
# Check all packages are built
# NOTE(jhesketh): this could be optimised to check packages in
# parallel. However, the worst case scenario at the moment is
# "time for longest package" + "time for num_of_package checks" which
# isn't too much more than the minimum
# ("time for longest package" + "time for one check")
for package in self.packages:
result = self.wait_for_package(package)
if not result:
return False
return True
def cleanup_test_packages(self):
"""
Removes from disk the osc copies of any packages
"""
for package in self.packages:
cleanup_path(
os.path.join(self.obs_test_project_name, package.name))
def test_project_name(home_project, build_number):
return '%s:ardana-ci-%s' % \
(home_project, build_number)
def build_test_packages(change_ids, obs_linked_project, home_project,
obs_repository, build_number):
print('Attempting to build packages for changes {}'.format(
', '.join(change_ids)))
# The target branch associated with the first change is used for
# all changes
branch = None
# Grab each change for the supplied change_ids
changes = []
for id in change_ids:
c = GerritChange(id, branch=branch)
branch = branch or c.branch
changes.append(c)
# Add the dependent changes to the changes list to process
changes.extend(c.get_dependencies())
# Use the default OBS linked project and repository configured for
# the target branch, if not supplied as arguments
project_settings = obs_project_settings()[branch]
obs_linked_project = obs_linked_project or \
project_settings['develproject']
obs_repository = obs_repository or project_settings['repository']
# The Jenkins workspace we are building in
workspace = os.getcwd()
# The location for package sources
source_workspace = os.path.join(workspace, 'source')
cleanup_path(source_workspace)
if not os.path.exists(source_workspace):
os.mkdir(source_workspace)
obs_test_project_name = test_project_name(home_project, build_number)
obs_test_project_description = "Packages built with gerrit changes: %s" % \
(', '.join(change_ids).replace('/', '-'))
obs_project = OBSProject(
obs_test_project_name, obs_linked_project, obs_repository,
obs_test_project_description)
# Keep track of processed changes
processed_changes = []
# Keep track of the packages to build as a dict of
# 'gerrit_project': Package()
packages = {}
# We process the supplied changes, as well as their dependencies.
# If a change has already been processed we skip it to avoid circular
# dependencies.
for c in changes:
if c in processed_changes:
# Duplicate dependency, skipping..
continue
processed_changes.append(c)
# skip packages that don't have asssociated RPMs
if c.gerrit_project not in gerrit_project_map():
print("Warning: Project %s has no RPM, Skipping"
% c.gerrit_project)
else:
# Create the package if it doesn't exist already
if c.gerrit_project not in packages:
# NOTE: The first change processed for a package determines
# the target branch for that package. All subsquent
# changes must match the target branch.
packages[c.gerrit_project] = OBSPackage(
c.gerrit_project, c.url, c.branch, source_workspace)
# Merge the change into the package
packages[c.gerrit_project].add_change(c)
for project_name, package in gerrit_project_map().items():
if project_name in packages:
continue
url = GERRIT_URL + "/ardana/" + project_name
packages[project_name] = OBSPackage(
project_name, url, branch, source_workspace)
# Add the packages into the obs project and begin building them
for project_name, package in packages.items():
obs_project.add_test_package(package)
# Wait for, and grab, the obs results
results = obs_project.wait_for_all_results()
# Cleanup created files
obs_project.cleanup_test_packages()
cleanup_path(source_workspace)
return results
def main():
parser = argparse.ArgumentParser(
description='Build OBS packages corresponding to one or more '
'Gerrit changes and their dependencies. '
'If --develproject or --repository are not supplied, they '
'will be determined automatically based on the Gerrit '
'change target branch and the gerrit-settings.json file')
parser.add_argument('-c', '--changes', action='append', required=True,
help='Gerrit change number (e.g. 1234) or change '
'number and patchset number (e.g. 1234/2)')
parser.add_argument('--homeproject', default=None, required=True,
help='Project in OBS that will act as the parent '
'project for the newly generated test project '
'(e.g. home:username)')
parser.add_argument('--buildnumber', default='NA', required=False,
help='A unique number used for the build homeproject. '
'When ran from Jenkins this is the job build '
'number.')
parser.add_argument('--develproject', default=None,
help='The OBS development project that will be linked '
'against (e.g. Devel:Cloud:9:Staging)')
parser.add_argument('--repository', default=None,
help='Name of the repository in OBS against which to '
'build the test packages (e.g. SLE_12_SP4)')
args = parser.parse_args()
results = build_test_packages(
args.changes, args.develproject, args.homeproject, args.repository,
args.buildnumber)
if not results:
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
|
# Copyright 2014-2015 Zuercher Hochschule fuer Angewandte Wissenschaften
# Copyright (c) 2013-2015, Intel Performance Learning Solutions Ltd, Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from distutils import dir_util
import json
from mako.template import Template # XXX unneeded
import os
import random
import shutil
import tempfile
import time
from threading import Thread
from urlparse import urlparse
import uuid
from occi.core_model import Resource, Link
from sm.config import CONFIG
from sm.log import LOG
from sm.retry_http import http_retriable_request
__author__ = 'andy'
HTTP = 'http://'
WAIT = int(CONFIG.get('cloud_controller', 'wait_time', 2000))
ATTEMPTS = int(CONFIG.get('cloud_controller', 'max_attempts', 5))
class ServiceParameters:
def __init__(self):
self.service_params = {}
service_params_file_path = CONFIG.get('service_manager', 'service_params', '')
if len(service_params_file_path) > 0:
try:
with open(service_params_file_path) as svc_params_content:
self.service_params = json.load(svc_params_content)
svc_params_content.close()
except ValueError: # as e:
LOG.error("Invalid JSON sent as service config file")
except IOError: # as e:
LOG.error('Cannot find the specified parameters file: ' + service_params_file_path)
else:
LOG.warn("No service parameters file found in config file, setting internal params to empty.")
def service_parameters(self, state='', content_type='text/occi'):
# takes the internal parameters defined for the lifecycle phase...
# and combines them with the client supplied parameters
if content_type == 'text/occi':
params = []
# get the state specific internal parameters
try:
params = self.service_params[state]
except KeyError: # as err:
LOG.warn('The requested states parameters are not available: "' + state + '"')
# get the client supplied parameters if any
try:
for param in self.service_params['client_params']:
params.append(param)
except KeyError: # as err:
LOG.info('No client params')
header = ''
for param in params:
if param['type'] == 'string':
value = '"' + param['value'] + '"'
else:
value = str(param['value'])
header = header + param['name'] + '=' + value + ', '
return header[0:-2]
else:
LOG.error('Content type not supported: ' + content_type)
def add_client_params(self, params={}):
# adds user supplied parameters from the instantiation request of a service
client_params = []
for k, v in params.items():
param_type = 'number'
if (v.startswith('"') or v.startswith('\'')) and (v.endswith('"') or v.endswith('\'')):
param_type = 'string'
v = v[1:-1]
param = {'name': k, 'value': v, 'type': param_type}
client_params.append(param)
self.service_params['client_params'] = client_params
if __name__ == '__main__':
sp = ServiceParameters()
cp = {
'test': '1',
'test.test': '"astring"'
}
sp.add_client_params(cp)
p = sp.service_parameters('initialise')
print p
class AsychExe(Thread):
"""
Only purpose of this thread is to execute a list of tasks sequentially
as a background "thread".
"""
def __init__(self, tasks, registry=None):
super(AsychExe, self).__init__()
self.registry = registry
self.tasks = tasks
def run(self):
super(AsychExe, self).run()
LOG.debug('Starting AsychExe thread')
for task in self.tasks:
entity, extras = task.run()
if self.registry:
LOG.debug('Updating entity in registry')
self.registry.add_resource(key=entity.identifier, resource=entity, extras=extras)
class Task:
def __init__(self, entity, extras, state):
self.entity = entity
self.extras = extras
self.state = state
self.start_time = ''
def run(self):
raise NotImplemented()
# instantiate container
class InitSO(Task):
def __init__(self, entity, extras):
Task.__init__(self, entity, extras, state='initialise')
self.nburl = CONFIG.get('cloud_controller', 'nb_api', '')
if self.nburl[-1] == '/':
self.nburl = self.nburl[0:-1]
LOG.info('CloudController Northbound API: ' + self.nburl)
if len(entity.attributes) > 0:
LOG.info('Client supplied parameters: ' + entity.attributes.__repr__())
# XXX check that these parameters are valid according to the kind specification
self.extras['srv_prms'].add_client_params(entity.attributes)
else:
LOG.warn('No client supplied parameters.')
def run(self):
#LOG.debug('INIT SO START')
self.start_time = time.time()
self.extras['occi.init.starttime'] = self.start_time
if not self.entity.extras:
self.entity.extras = {}
ops_version = self.__detect_ops_version()
self.entity.extras['ops_version'] = ops_version
self.entity.attributes['mcn.service.state'] = 'initialise'
# create an app for the new SO instance
LOG.debug('Creating SO container...')
self.__create_app()
# adding tenant to entity.extras for future checks later when retrieving resource
self.entity.extras['tenant_name'] = self.extras['tenant_name']
return self.entity, self.extras
def __detect_ops_version(self):
# make a call to the cloud controller and based on the app kind, heuristically select version
version = 'v2'
heads = {
'Content-Type': 'text/occi',
'Accept': 'text/occi'
}
url = self.nburl + '/-/'
LOG.debug('Requesting CC Query Interface: ' + url)
LOG.info('Sending headers: ' + heads.__repr__())
r = http_retriable_request('GET', url, headers=heads, authenticate=True)
if r.headers['category'].find('occi.app.image') > -1 and r.headers['category'].find('occi.app.env') > -1:
LOG.info('Found occi.app.image and occi.app.env - this is OpenShift V3')
version = 'v3'
else:
LOG.info('This is OpenShift V2')
return version
def __create_app(self):
# will generate an appname 24 chars long - compatible with v2 and v3
# e.g. soandycd009b39c28790f3
app_name = 'so' + self.entity.kind.term[0:4] + \
''.join(random.choice('0123456789abcdef') for _ in range(16))
heads = {'Content-Type': 'text/occi'}
url = self.nburl + '/app/'
if self.entity.extras['ops_version'] == 'v2':
heads['category'] = 'app; scheme="http://schemas.ogf.org/occi/platform#", ' \
'python-2.7; scheme="http://schemas.openshift.com/template/app#", ' \
'small; scheme="http://schemas.openshift.com/template/app#"'
heads['X-OCCI-Attribute'] = str('occi.app.name=' + app_name)
LOG.debug('Ensuring SM SSH Key...')
self.__ensure_ssh_key()
elif self.entity.extras['ops_version'] == 'v3':
# for OpSv3 bundle location is the repo id of the container image
bundle_loc = CONFIG.get('service_manager', 'bundle_location', '')
if bundle_loc == '':
LOG.error('No bundle_location parameter supplied in sm.cfg')
raise Exception('No bundle_location parameter supplied in sm.cfg')
if bundle_loc.startswith('/'):
LOG.warn('Bundle location does not look like an image reference!')
LOG.debug('Bundle to execute: ' + bundle_loc)
design_uri = CONFIG.get('service_manager', 'design_uri', '')
if design_uri == '':
raise Exception('No design_uri parameter supplied in sm.cfg')
LOG.debug('Design URI: ' + design_uri)
heads['category'] = 'app; scheme="http://schemas.ogf.org/occi/platform#"'
# TODO provide a means to provide additional docker env params
attrs = 'occi.app.name="' + app_name + '", ' + \
'occi.app.image="' + bundle_loc + '", ' + \
'occi.app.env="DESIGN_URI=' + design_uri + '"'
heads['X-OCCI-Attribute'] = str(attrs)
else:
LOG.error('Unknown OpenShift version. ops_version: ' + self.entity.extras['ops_version'])
raise Exception('Unknown OpenShift version. ops_version: ' + self.entity.extras['ops_version'])
LOG.debug('Requesting container to execute SO Bundle: ' + url)
LOG.info('Sending headers: ' + heads.__repr__())
r = http_retriable_request('POST', url, headers=heads, authenticate=True)
loc = r.headers.get('Location', '')
if loc == '':
LOG.error("No OCCI Location attribute found in request")
raise AttributeError("No OCCI Location attribute found in request")
self.entity.attributes['occi.so.url'] = loc
app_uri_path = urlparse(loc).path
LOG.debug('SO container created: ' + app_uri_path)
LOG.debug('Updating OCCI entity.identifier from: ' + self.entity.identifier + ' to: ' +
app_uri_path.replace('/app/', self.entity.kind.location))
self.entity.identifier = app_uri_path.replace('/app/', self.entity.kind.location)
LOG.debug('Setting occi.core.id to: ' + app_uri_path.replace('/app/', ''))
self.entity.attributes['occi.core.id'] = app_uri_path.replace('/app/', '')
# its a bit wrong to put this here, but we do not have the required information before.
# this keeps things consistent as the timing is done right
infoDict = {
'so_id': self.entity.attributes['occi.core.id'].split('/'),
'sm_name': self.entity.kind.term,
'so_phase': 'init',
'phase_event': 'start',
'response_time': 0,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
# OpSv2 only: get git uri. this is where our bundle is pushed to
# XXX this is fugly
# TODO use the same name for the app URI
if self.entity.extras['ops_version'] == 'v2':
self.entity.extras['repo_uri'] = self.__git_uri(app_uri_path)
elif self.entity.extras['ops_version'] == 'v3':
self.entity.extras['loc'] = self.__git_uri(app_uri_path)
def __git_uri(self, app_uri_path):
url = self.nburl + app_uri_path
headers = {'Accept': 'text/occi'}
LOG.debug('Requesting container\'s URL ' + url)
LOG.info('Sending headers: ' + headers.__repr__())
r = http_retriable_request('GET', url, headers=headers, authenticate=True)
attrs = r.headers.get('X-OCCI-Attribute', '')
if attrs == '':
raise AttributeError("No occi attributes found in request")
repo_uri = ''
for attr in attrs.split(', '):
if attr.find('occi.app.repo') != -1:
repo_uri = attr.split('=')[1][1:-1] # scrubs trailing wrapped quotes
break
elif attr.find('occi.app.url') != -1:
repo_uri = attr.split('=')[1][1:-1] # scrubs trailing wrapped quotes
break
if repo_uri == '':
raise AttributeError("No occi.app.repo or occi.app.url attribute found in request")
LOG.debug('SO container URL: ' + repo_uri)
return repo_uri
def __ensure_ssh_key(self):
url = self.nburl + '/public_key/'
heads = {'Accept': 'text/occi'}
resp = http_retriable_request('GET', url, headers=heads, authenticate=True)
locs = resp.headers.get('x-occi-location', '')
# Split on spaces, test if there is at least one key registered
if len(locs.split()) < 1:
LOG.debug('No SM SSH registered. Registering default SM SSH key.')
occi_key_name, occi_key_content = self.__extract_public_key()
create_key_headers = {'Content-Type': 'text/occi',
'Category': 'public_key; scheme="http://schemas.ogf.org/occi/security/credentials#"',
'X-OCCI-Attribute': 'occi.key.name="' + occi_key_name + '", occi.key.content="' +
occi_key_content + '"'
}
http_retriable_request('POST', url, headers=create_key_headers, authenticate=True)
else:
LOG.debug('Valid SM SSH is registered with OpenShift.')
def __extract_public_key(self):
ssh_key_file = CONFIG.get('service_manager', 'ssh_key_location', '')
if ssh_key_file == '':
raise Exception('No ssh_key_location parameter supplied in sm.cfg')
LOG.debug('Using SSH key file: ' + ssh_key_file)
with open(ssh_key_file, 'r') as content_file:
content = content_file.read()
content = content.split()
if content[0] == 'ssh-dsa':
raise Exception("The supplied key is not a RSA ssh key. Location: " + ssh_key_file)
key_content = content[1]
key_name = 'servicemanager'
if len(content) == 3:
key_name = content[2]
return key_name, key_content
# instantiate SO
class ActivateSO(Task):
def __init__(self, entity, extras):
Task.__init__(self, entity, extras, state='activate')
if self.entity.extras['ops_version'] == 'v2':
self.repo_uri = self.entity.extras['repo_uri']
self.host = urlparse(self.repo_uri).netloc.split('@')[1]
if os.system('which git') != 0:
raise EnvironmentError('Git is not available.')
elif self.entity.extras['ops_version'] == 'v3':
self.host = self.entity.extras['loc']
def __is_complete(self, url):
# XXX copy/paste code - merge the two places!
heads = {
'Content-type': 'text/occi',
'Accept': 'application/occi+json',
'X-Auth-Token': self.extras['token'],
'X-Tenant-Name': self.extras['tenant_name'],
}
LOG.info('Checking app state at: ' + url)
LOG.info('Sending headers: ' + heads.__repr__())
r = http_retriable_request('GET', url, headers=heads, authenticate=True)
attrs = json.loads(r.content)
if len(attrs['attributes']) > 0:
attr_hash = attrs['attributes']
app_state = ''
try:
app_state = attr_hash['occi.app.state']
except KeyError:
pass
LOG.info('Current service state: ' + str(app_state))
if app_state == 'active':
# check if it returns something valid instead of 503
try:
tmpUrl = 'http://' + attr_hash['occi.app.url']
except KeyError:
LOG.info(('App is not ready. app url is not yet set.'))
return False
r = http_retriable_request('GET', tmpUrl, headers=heads, authenticate=True)
if r.status_code == 200:
LOG.info('App is ready')
elapsed_time = time.time() - self.extras['occi.init.starttime']
del self.extras['occi.init.starttime']
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'init',
'phase_event': 'done',
'response_time': elapsed_time,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
return True
else:
LOG.info('App is not ready. app url returned: ' + r.status_code)
else:
LOG.info('App is not ready. Current state state: ' + app_state)
return False
def run(self):
# this is wrong but required...
if self.entity.extras['ops_version'] == 'v3':
url = self.entity.attributes['occi.so.url']
while not self.__is_complete(url):
time.sleep(3)
LOG.debug('ACTIVATE SO START')
self.start_time = time.time()
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'activate',
'phase_event': 'start',
'response_time': 0,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
if self.entity.extras['ops_version'] == 'v2':
# get the code of the bundle and push it to the git facilities
# offered by OpenShift
LOG.debug('Deploying SO Bundle to: ' + self.repo_uri)
self.__deploy_app()
LOG.debug('Activating the SO...')
self.__init_so()
self.entity.attributes['mcn.service.state'] = 'activate'
return self.entity, self.extras
def __deploy_app(self):
"""
Deploy the local SO bundle
assumption here
- a git repo is returned
- the bundle is not managed by git
"""
# create temp dir...and clone the remote repo provided by OpS
tmp_dir = tempfile.mkdtemp()
LOG.debug('Cloning git repository: ' + self.repo_uri + ' to: ' + tmp_dir)
cmd = ' '.join(['git', 'clone', self.repo_uri, tmp_dir])
os.system(cmd)
# Get the SO bundle
bundle_loc = CONFIG.get('service_manager', 'bundle_location', '')
if bundle_loc == '':
raise Exception('No bundle_location parameter supplied in sm.cfg')
LOG.debug('Bundle to add to repo: ' + bundle_loc)
dir_util.copy_tree(bundle_loc, tmp_dir)
self.__add_openshift_files(bundle_loc, tmp_dir)
# add & push to OpenShift
os.system(' '.join(['cd', tmp_dir, '&&', 'git', 'add', '-A']))
os.system(' '.join(['cd', tmp_dir, '&&', 'git', 'commit', '-m', '"deployment of SO for tenant ' +
self.extras['tenant_name'] + '"', '-a']))
LOG.debug('Pushing new code to remote repository...')
os.system(' '.join(['cd', tmp_dir, '&&', 'git', 'push']))
shutil.rmtree(tmp_dir)
def __add_openshift_files(self, bundle_loc, tmp_dir):
# put OpenShift stuff in place
# build and pre_start_python comes from 'support' directory in bundle
LOG.debug('Adding OpenShift support files from: ' + bundle_loc + '/support')
# 1. Write build
LOG.debug('Writing build to: ' + os.path.join(tmp_dir, '.openshift', 'action_hooks', 'build'))
shutil.copyfile(bundle_loc+'/support/build', os.path.join(tmp_dir, '.openshift', 'action_hooks', 'build'))
# 1. Write pre_start_python
LOG.debug('Writing pre_start_python to: ' +
os.path.join(tmp_dir, '.openshift', 'action_hooks', 'pre_start_python'))
pre_start_template = Template(filename=bundle_loc+'/support/pre_start_python')
design_uri = CONFIG.get('service_manager', 'design_uri', '')
content = pre_start_template.render(design_uri=design_uri)
LOG.debug('Writing pre_start_python content as: ' + content)
pre_start_file = open(os.path.join(tmp_dir, '.openshift', 'action_hooks', 'pre_start_python'), "w")
pre_start_file.write(content)
pre_start_file.close()
os.system(' '.join(['chmod', '+x', os.path.join(tmp_dir, '.openshift', 'action_hooks', '*')]))
# example request to the SO
# curl -v -X PUT http://localhost:8051/orchestrator/default \
# -H 'Content-Type: text/occi' \
# -H 'Category: orchestrator; scheme="http://schemas.mobile-cloud-networking.eu/occi/service#"' \
# -H 'X-Auth-Token: '$KID \
# -H 'X-Tenant-Name: '$TENANT
# -H 'X-OCCI-Attribute: occi.mcn.app.url="http://"'
def __init_so(self):
url = HTTP + self.host + '/orchestrator/default'
heads = {
'Category': 'orchestrator; scheme="http://schemas.mobile-cloud-networking.eu/occi/service#"',
'Content-Type': 'text/occi',
'X-Auth-Token': self.extras['token'],
'X-Tenant-Name': self.extras['tenant_name'],
'X-OCCI-Attribute': 'occi.mcn.app.url="' + HTTP + self.host + '/orchestrator/default"'
}
occi_attrs = self.extras['srv_prms'].service_parameters(self.state)
if len(occi_attrs) > 0:
LOG.info('Adding service-specific parameters to call... X-OCCI-Attribute: ' + occi_attrs)
heads['X-OCCI-Attribute'] = occi_attrs
LOG.debug('Initialising SO with: ' + url)
LOG.info('Sending headers: ' + heads.__repr__())
http_retriable_request('PUT', url, headers=heads)
elapsed_time = time.time() - self.start_time
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'activate',
'phase_event': 'done',
'response_time': elapsed_time,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
#LOG.debug('ACTIVATE SO DONE, elapsed: %f' % elapsed_time)
class DeploySO(Task):
def __init__(self, entity, extras):
Task.__init__(self, entity, extras, state='deploy')
if self.entity.extras['ops_version'] == 'v2':
self.repo_uri = self.entity.extras['repo_uri']
self.host = urlparse(self.repo_uri).netloc.split('@')[1]
elif self.entity.extras['ops_version'] == 'v3':
self.host = self.entity.extras['loc']
# example request to the SO
# curl -v -X POST http://localhost:8051/orchestrator/default?action=deploy \
# -H 'Content-Type: text/occi' \
# -H 'Category: deploy; scheme="http://schemas.mobile-cloud-networking.eu/occi/service#"' \
# -H 'X-Auth-Token: '$KID \
# -H 'X-Tenant-Name: '$TENANT
def run(self):
# Deployment is done without any control by the client...
# otherwise we won't be able to hand back a working service!
#LOG.debug('DEPLOY SO START')
self.start_time = time.time()
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'deploy',
'phase_event': 'start',
'response_time': 0,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
#LOG.debug('Deploying the SO bundle...')
url = HTTP + self.host + '/orchestrator/default'
params = {'action': 'deploy'}
heads = {
'Category': 'deploy; scheme="http://schemas.mobile-cloud-networking.eu/occi/service#"',
'Content-Type': 'text/occi',
'X-Auth-Token': self.extras['token'],
'X-Tenant-Name': self.extras['tenant_name']}
occi_attrs = self.extras['srv_prms'].service_parameters(self.state)
if len(occi_attrs) > 0:
LOG.info('Adding service-specific parameters to call... X-OCCI-Attribute:' + occi_attrs)
heads['X-OCCI-Attribute'] = occi_attrs
LOG.debug('Deploying SO with: ' + url)
LOG.info('Sending headers: ' + heads.__repr__())
http_retriable_request('POST', url, headers=heads, params=params)
# also sleep here to keep phases consistent during greenfield
while not self.deploy_complete(url):
time.sleep(7)
self.entity.attributes['mcn.service.state'] = 'deploy'
LOG.debug('SO Deployed ')
elapsed_time = time.time() - self.start_time
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'deploy',
'phase_event': 'done',
'response_time': elapsed_time,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
#LOG.debug('DEPLOY SO DONE, elapsed: %f' % elapsed_time)
return self.entity, self.extras
def deploy_complete(self, url):
# XXX fugly - code copied from Resolver
heads = {
'Content-type': 'text/occi',
'Accept': 'application/occi+json',
'X-Auth-Token': self.extras['token'],
'X-Tenant-Name': self.extras['tenant_name'],
}
LOG.info('checking service state at: ' + url)
LOG.info('sending headers: ' + heads.__repr__())
r = http_retriable_request('GET', url, headers=heads)
attrs = json.loads(r.content)
if len(attrs['attributes']) > 0:
attr_hash = attrs['attributes']
stack_state = ''
try:
stack_state = attr_hash['occi.mcn.stack.state']
except KeyError:
pass
LOG.info('Current service state: ' + str(stack_state))
if stack_state == 'CREATE_COMPLETE' or stack_state == 'UPDATE_COMPLETE':
LOG.info('Stack is ready')
return True
else:
LOG.info('Stack is not ready. Current state state: ' + stack_state)
return False
class ProvisionSO(Task):
def __init__(self, entity, extras):
Task.__init__(self, entity, extras, state='provision')
if self.entity.extras['ops_version'] == 'v2':
self.repo_uri = self.entity.extras['repo_uri']
self.host = urlparse(self.repo_uri).netloc.split('@')[1]
elif self.entity.extras['ops_version'] == 'v3':
self.host = self.entity.extras['loc']
def run(self):
# this can only run until the deployment has complete!
# this will block until run() returns
#LOG.debug('PROVISION SO START')
self.start_time = time.time()
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'provision',
'phase_event': 'start',
'response_time': 0,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
url = HTTP + self.host + '/orchestrator/default'
# with stuff like this, we need to have a callback mechanism... this will block otherwise
while not self.deploy_complete(url):
time.sleep(13)
params = {'action': 'provision'}
heads = {
'Category': 'provision; scheme="http://schemas.mobile-cloud-networking.eu/occi/service#"',
'Content-Type': 'text/occi',
'X-Auth-Token': self.extras['token'],
'X-Tenant-Name': self.extras['tenant_name']}
occi_attrs = self.extras['srv_prms'].service_parameters(self.state)
if len(occi_attrs) > 0:
LOG.info('Adding service-specific parameters to call... X-OCCI-Attribute: ' + occi_attrs)
heads['X-OCCI-Attribute'] = occi_attrs
LOG.debug('Provisioning SO with: ' + url)
LOG.info('Sending headers: ' + heads.__repr__())
http_retriable_request('POST', url, headers=heads, params=params)
elapsed_time = time.time() - self.start_time
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'provision',
'phase_event': 'done',
'response_time': elapsed_time,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
#LOG.debug('PROVISION SO DONE, elapsed: %f' % elapsed_time)
self.entity.attributes['mcn.service.state'] = 'provision'
return self.entity, self.extras
def deploy_complete(self, url):
# XXX fugly - code copied from Resolver
heads = {
'Content-type': 'text/occi',
'Accept': 'application/occi+json',
'X-Auth-Token': self.extras['token'],
'X-Tenant-Name': self.extras['tenant_name'],
}
LOG.info('checking service state at: ' + url)
LOG.info('sending headers: ' + heads.__repr__())
r = http_retriable_request('GET', url, headers=heads)
attrs = json.loads(r.content)
if len(attrs['attributes']) > 0:
attr_hash = attrs['attributes']
stack_state = ''
try:
stack_state = attr_hash['occi.mcn.stack.state']
except KeyError:
pass
LOG.info('Current service state: ' + str(stack_state))
if stack_state == 'CREATE_COMPLETE' or stack_state == 'UPDATE_COMPLETE':
LOG.info('Stack is ready')
return True
elif stack_state == 'CREATE_FAILED':
raise RuntimeError('Heat stack creation failed.')
else:
LOG.info('Stack is not ready. Current state state: ' + stack_state)
return False
class RetrieveSO(Task):
def __init__(self, entity, extras):
Task.__init__(self, entity, extras, 'retrieve')
if self.entity.extras['ops_version'] == 'v2':
self.repo_uri = self.entity.extras['repo_uri']
self.host = urlparse(self.repo_uri).netloc.split('@')[1]
elif self.entity.extras['ops_version'] == 'v3':
self.host = self.entity.extras['loc']
self.registry = self.extras['registry']
def run(self):
# example request to the SO
# curl -v -X GET http://localhost:8051/orchestrator/default \
# -H 'X-Auth-Token: '$KID \
# -H 'X-Tenant-Name: '$TENANT
self.start_time = time.time()
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'retrieve',
'phase_event': 'start',
'response_time': 0,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
if self.entity.attributes['mcn.service.state'] in ['activate', 'deploy', 'provision', 'update']:
heads = {
'Content-Type': 'text/occi',
'Accept': 'text/occi',
'X-Auth-Token': self.extras['token'],
'X-Tenant-Name': self.extras['tenant_name']}
LOG.info('Getting state of service orchestrator with: ' + self.host + '/orchestrator/default')
LOG.info('Sending headers: ' + heads.__repr__())
r = http_retriable_request('GET', HTTP + self.host + '/orchestrator/default', headers=heads)
attrs = r.headers['x-occi-attribute'].split(', ')
for attr in attrs:
kv = attr.split('=')
if kv[0] != 'occi.core.id':
if kv[1].startswith('"') and kv[1].endswith('"'):
kv[1] = kv[1][1:-1] # scrub off quotes
self.entity.attributes[kv[0]] = kv[1]
LOG.debug('OCCI Attribute: ' + kv[0] + ' --> ' + kv[1])
# Assemble the SIG
svcinsts = ''
try:
svcinsts = self.entity.attributes['mcn.so.svcinsts']
del self.entity.attributes['mcn.so.svcinsts'] # remove this, not be be used anywhere else
except KeyError:
LOG.warn('There was no service instance endpoints - ignore if not a composition.')
pass
if self.registry is None:
LOG.error('No registry!')
if len(svcinsts) > 0:
svcinsts = svcinsts.split() # all instance EPs
for svc_loc in svcinsts:
# TODO get the service instance resource representation
# source resource is self.entity
compos = svc_loc.split('/')
key = '/' + compos[3] + '/' + compos[4]
target = Resource(key, Resource.kind, []) # target resource
target.attributes['mcn.sm.endpoint'] = svc_loc
self.registry.add_resource(key, target, None)
key = '/link/'+str(uuid.uuid4())
link = Link(key, Link.kind, [], self.entity, target)
self.registry.add_resource(key, link, None)
self.entity.links.append(link)
else:
LOG.debug('Cannot GET entity as it is not in the activated, deployed or provisioned, updated state')
elapsed_time = time.time() - self.start_time
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'retrieve',
'phase_event': 'done',
'response_time': elapsed_time,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
return self.entity, self.extras
# can only be executed when provisioning is complete
class UpdateSO(Task):
def __init__(self, entity, extras, updated_entity):
Task.__init__(self, entity, extras, state='update')
if self.entity.extras['ops_version'] == 'v2':
self.repo_uri = self.entity.extras['repo_uri']
self.host = urlparse(self.repo_uri).netloc.split('@')[1]
elif self.entity.extras['ops_version'] == 'v3':
self.host = self.entity.extras['loc']
self.new = updated_entity
def run(self):
# take parameters from EEU and send them down to the SO instance
# Trigger update on SO + service instance:
#
# $ curl -v -X POST http://localhost:8051/orchestrator/default \
# -H 'Content-Type: text/occi' \
# -H 'X-Auth-Token: '$KID \
# -H 'X-Tenant-Name: '$TENANT \
# -H 'X-OCCI-Attribute: occi.epc.attr_1="foo"'
self.start_time = time.time()
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'update',
'phase_event': 'start',
'response_time': 0,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
url = HTTP + self.host + '/orchestrator/default'
heads = {
'Content-Type': 'text/occi',
'X-Auth-Token': self.extras['token'],
'X-Tenant-Name': self.extras['tenant_name']}
occi_attrs = self.extras['srv_prms'].service_parameters(self.state)
if len(occi_attrs) > 0:
LOG.info('Adding service-specific parameters to call... X-OCCI-Attribute:' + occi_attrs)
heads['X-OCCI-Attribute'] = occi_attrs
if len(self.new.attributes) > 0:
LOG.info('Adding updated parameters... X-OCCI-Attribute: ' + self.new.attributes.__repr__())
for k, v in self.new.attributes.items():
occi_attrs = occi_attrs + ', ' + k + '=' + v
self.entity.attributes[k] = v
heads['X-OCCI-Attribute'] = occi_attrs
LOG.debug('Updating (Provisioning) SO with: ' + url)
LOG.info('Sending headers: ' + heads.__repr__())
http_retriable_request('POST', url, headers=heads)
self.entity.attributes['mcn.service.state'] = 'update'
#start thread here
thread = Thread(target = deploy_complete, args = (url, self.start_time, self.extras, self.entity ))
thread.start()
return self.entity, self.extras
def deploy_complete(url, start_time, extras, entity):
done = False
while done == False:
# XXX fugly - code copied from Resolver
heads = {
'Content-type': 'text/occi',
'Accept': 'application/occi+json',
'X-Auth-Token': extras['token'],
'X-Tenant-Name': extras['tenant_name'],
}
LOG.info('checking service state at: ' + url)
LOG.info('sending headers: ' + heads.__repr__())
r = http_retriable_request('GET', url, headers=heads)
attrs = json.loads(r.content)
if len(attrs['attributes']) > 0:
attr_hash = attrs['attributes']
stack_state = ''
try:
stack_state = attr_hash['occi.mcn.stack.state']
except KeyError:
pass
LOG.info('Current service state: ' + str(stack_state))
if stack_state == 'CREATE_COMPLETE' or stack_state == 'UPDATE_COMPLETE':
LOG.info('Stack is ready')
elapsed_time = time.time() - start_time
infoDict = {
'so_id': entity.attributes['occi.core.id'],
'sm_name': entity.kind.term,
'so_phase': 'update',
'phase_event': 'done',
'response_time': elapsed_time,
'tenant': extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
done = True
else:
LOG.info('Stack is not ready. Current state state: ' + stack_state)
done = False
time.sleep(3)
class DestroySO(Task):
def __init__(self, entity, extras):
Task.__init__(self, entity, extras, state='destroy')
if self.entity.extras['ops_version'] == 'v2':
self.repo_uri = self.entity.extras['repo_uri']
self.host = urlparse(self.repo_uri).netloc.split('@')[1]
elif self.entity.extras['ops_version'] == 'v3':
self.host = self.entity.extras['loc']
self.nburl = CONFIG.get('cloud_controller', 'nb_api', '')
def run(self):
# 1. dispose the active SO, essentially kills the STG/ITG
# 2. dispose the resources used to run the SO
# example request to the SO
# curl -v -X DELETE http://localhost:8051/orchestrator/default \
# -H 'X-Auth-Token: '$KID \
# -H 'X-Tenant-Name: '$TENANT
self.start_time = time.time()
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'destroy',
'phase_event': 'start',
'response_time': 0,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
url = HTTP + self.host + '/orchestrator/default'
heads = {'X-Auth-Token': self.extras['token'],
'X-Tenant-Name': self.extras['tenant_name']}
occi_attrs = self.extras['srv_prms'].service_parameters(self.state)
if len(occi_attrs) > 0:
LOG.info('Adding service-specific parameters to call... X-OCCI-Attribute:' + occi_attrs)
heads['X-OCCI-Attribute'] = occi_attrs
LOG.info('Disposing service orchestrator with: ' + url)
LOG.info('Sending headers: ' + heads.__repr__())
http_retriable_request('DELETE', url, headers=heads)
url = self.nburl + self.entity.identifier.replace('/' + self.entity.kind.term + '/', '/app/')
heads = {'Content-Type': 'text/occi',
'X-Auth-Token': self.extras['token'],
'X-Tenant-Name': self.extras['tenant_name']}
LOG.info('Disposing service orchestrator container via CC... ' + url)
LOG.info('Sending headers: ' + heads.__repr__())
http_retriable_request('DELETE', url, headers=heads, authenticate=True)
elapsed_time = time.time() - self.start_time
infoDict = {
'so_id': self.entity.attributes['occi.core.id'],
'sm_name': self.entity.kind.term,
'so_phase': 'destroy',
'phase_event': 'done',
'response_time': elapsed_time,
'tenant': self.extras['tenant_name']
}
tmpJSON = json.dumps(infoDict)
LOG.debug(tmpJSON)
return self.entity, self.extras
|
import xml.etree.ElementTree as et
from cdxml_elements import *
class CDXML_Editor(object):
def __init__(self, cdxml_files_path=None, output_path=None):
self.container = BOX()
self.cdxml_path = cdxml_files_path
self.output_path = output_path
if self.output_path:
self.tree = et.ElementTree()
self.cdxml = et.Element('CDXML', attrib={'color':'3','bgcolor':'2'})
self.tree._setroot(self.cdxml)
self.add_colortable()
self.page = et.SubElement(self.cdxml,'page', attrib={
'HeightPages': '1',
'WidthPages': '1',
'DrawingSpace':'poster'})
def add_colortable(self):
self.colortable = et.SubElement(self.cdxml,'colortable')
bg_color = et.SubElement(self.colortable,'color',attrib={'r':'1','g':'0.980','b':'0.941'}) #2 - white
fg_color = et.SubElement(self.colortable,'color',attrib={'r':'0.200','g':'0.200','b':'0.200'}) #3 - black
promiscuous = et.SubElement(self.colortable,'color',attrib={'r':'0.500','g':'0.500','b':'0.500'}) #4 - gray
intermediates = et.SubElement(self.colortable,'color',attrib={'r':'0.200','g':'0.200','b':'0.800'}) #5 - blue
target = et.SubElement(self.colortable,'color',attrib={'r':'0.800','g':'0.200','b':'0.200'}) #6 - red
self.color_index = 6
def append(self, box, arrange=""):
self.container.append(box, arrange=arrange)
def parse_cdxml(self, compound):
path = self.cdxml_path + compound + '.cdxml'
output = ''
try:
output = et.parse(path).find('.//*')
except:
# Extract cpd_name from path if no cdxml file exists
output = path.split('/')[-1][:-6]
return output
def get_cpd_cdxml(self, compound, id_offset=0, color="5"):
try:
cpd,label = self.parse_cdxml(compound)
output = COMPOUND(cpd, label, id_offset=id_offset, color=color)
last_id = int(output.last_id)
return output, last_id
except:
cpd_name = self.parse_cdxml(compound)
output = BOX()
output.append(TEXT(cpd_name, color=color))
return output, id_offset
def add_reactants(self, reactants, previous_reactions, last_id):
if len(previous_reactions) == 0: # First reaction
for r_index in range(len(reactants)):
rc, last_id = self.get_cpd_cdxml(reactants[r_index], id_offset=last_id, color="4")
self.append(rc, arrange="right")
if r_index < len(reactants)-1:
self.append(TEXT('+', color="4"), arrange="right")
else:
for pr in previous_reactions:
main_rs = list(set(pr.products).intersection(set(reactants)))
box = BOX()
for mr_index in range(len(main_rs)):
rc, last_id = self.get_cpd_cdxml(main_rs[mr_index], id_offset=last_id, color="5")
box.append(rc, arrange="right")
if mr_index < len(main_rs)-1:
box.append(TEXT('+',color="5"), arrange="right")
pr.append(box, arrange="right")
if len(previous_reactions) == 1:
self.append(previous_reactions[0].container, arrange="right")
else:
box = BOX()
for pr in previous_reactions:
box.append(pr.container, arrange="below", align="right")
box.y_reference = box.height/2
self.append(box, arrange="right")
return last_id
def add_product(self, product, last_id):
target, last_id = self.get_cpd_cdxml(product, id_offset=last_id, color="6")
self.append(target, arrange="right")
def add_transition(self, promiscuous_reactants, promiscuous_products, misc_products,
reaction_proteins="", reaction_solvents="", reaction_catalysts="", reaction_SPRESI_info=""):
self.transition = TRANSITION(reactants=promiscuous_reactants, products=promiscuous_products, misc_products=misc_products,
reaction_proteins=reaction_proteins, reaction_solvents=reaction_solvents,
reaction_catalysts=reaction_catalysts, reaction_SPRESI_info=reaction_SPRESI_info)
self.container.append(self.transition, arrange="right")
def set_products(self, cdxml_products):
self.products = cdxml_products
def set_FBA(self, color_index):
self.transition.set_color(str(color_index))
def add_color(self, fba_value):
if self.output_path:
et.SubElement(self.colortable, 'color',attrib={'r':str(fba_value),'g':'0','b':'0'})
self.color_index += 1
def generate_file(self):
self.container.set_x(self.container.get_x() + 50)
self.container.set_y(self.container.get_y() + 100)
for element in self.container.root.findall('./'):
self.page.append(element)
self.page.set('BoundingBox', '0 0 %f %f' % (self.container.width+150, self.container.height+150))
self.page.set('Width', str(self.container.width + 150))
self.page.set('Height', str(self.container.height + 150))
self.tree.write(self.output_path, encoding='UTF-8') |
import pytest
import subprocess
import re
import math
number = "([0-9]+.[0-9]+e[+-][0-9]+)"
tol = 1e-5
# "basicnewton"
@pytest.mark.parametrize("solver", ["basic"])
@pytest.mark.parametrize("num_proc", [1, 2])
def test_simple(solver, num_proc):
cmd = ("cd ..; mpiexec -n {} python sauce.py solver={} "
"problem=simple T=0.1 grid_spacing=0.1 "
"testing=True")
d = subprocess.check_output(cmd.format(num_proc, solver), shell=True)
match = re.search("Velocity norm = " + number, str(d))
err = match.groups()
ref = 1.901026e-03
assert(abs(eval(err[0])-ref) < tol)
@pytest.mark.parametrize("solver", ["basic"])
@pytest.mark.parametrize("num_proc", [1, 2])
def test_taylorgreen(solver, num_proc):
cmd = ("cd ..; mpiexec -n {} python sauce.py solver={} "
"problem=taylorgreen T=0.002 testing=True N=20")
d = subprocess.check_output(cmd.format(num_proc, solver), shell=True)
match = re.search("Final error norms: u = " + number +
" phi = " + number +
" c_p = " + number +
" c_m = " + number +
" V = " + number, str(d))
err = match.groups()
for e in err:
assert eval(e) < 1e-1
if __name__ == "__main__":
#test_simple("basic", 1)
test_taylorgreen("basic", 1)
|
import hashlib
from dagster.serdes import serialize_dagster_namedtuple
def create_snapshot_id(snapshot):
json_rep = serialize_dagster_namedtuple(snapshot)
m = hashlib.sha1() # so that hexdigest is 40, not 64 bytes
m.update(json_rep.encode())
return m.hexdigest()
|
from . import database
from utils import *
from .common import *
@validate_lang_code
def snippets_in_articles(lang_code):
cfg = flask.g._cfg
try:
page_ids = flask.request.args.getlist('page_id', type=int)
except ValueError:
return flask.jsonify(error = 'Invalid request')
if not page_ids:
return flask.jsonify(error = 'Invalid request')
result = database.get_snippets_in_articles(
lang_code, page_ids, cfg.api.max_returned_snippets)
return flask.jsonify({
page_id: [
flask.url_for('citation_hunt',
id = snippet_id, lang_code = lang_code, _external = True)
for snippet_id in snippet_ids
]
for page_id, snippet_ids in result.items()})
|
from django.urls import path, re_path
from django.views.generic.base import RedirectView
from .views import index, item, logIn, signUp, logOut, inbox, myBids
urlpatterns = [
path("", index.index, name="index"),
path("item/<str:itemId>", item.item, name="item"),
path("logIn", logIn.logIn, name="logIn"),
path("signUp", signUp.signUp, name="signUp"),
path("logOut", logOut.logOut, name="logOut"),
path("inbox", inbox.inbox, name="inbox"),
path("myBids", myBids.myBids, name="myBids"),
# re_path(r'^.*$',
# RedirectView.as_view(url='/', permanent=False),
# name='index')
]
|
# ------------------------------------------------------------------------
# Globals
DEFAULTS = {
"PopulationData" : {
"seroprevalence": 0.0,
},
"EpidemiologicalData" : {
"latencyTime" : 3.0,
"infectiousPeriod" : 3.0,
"lengthHospitalStay": 7.0,
"lengthICUStay": 14.0,
"seasonalForcing": 0.0,
"peakMonth": 0,
"overflowSeverity": 2.0,
"r0": 3.0,
},
"ContainmentData": {
"numberPoints": 15.0
}
}
|
from app.services.messages.message_manager import MessageManager
__all__ = [
'MessageManager'
]
|
import datetime
import pathlib
import unittest
from src.container import create_container
from src.models import AdminSistema, AdminEstacio, Estacionamento
from src.models.horario_divergente import HorarioDivergente
from src.repo import HorarioDivergenteRepo
from tests.factories import set_session, EstacionamentoFactory
from tests.factories.factory import HorarioDivergenteFactory
from tests.utils import make_engine, make_general_db_setup, make_savepoint, get_adm_sistema, get_adm_estacio, \
general_db_teardown
class TestHorarioDivergenteRepo(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
config_path = str(pathlib.Path(__file__).parents[2] / 'test.ini')
cls.container = create_container(config_path)
conn_string = str(cls.container.config.get('db')['conn_string'])
cls.engine = make_engine(conn_string)
@classmethod
def tearDownClass(cls) -> None:
cls.engine.dispose()
def setUp(self) -> None:
self.maxDiff = 3000
self.crypto = self.container.crypto()
self.conn, self.outer_trans, self.session = make_general_db_setup(self.engine)
set_session(self.session) # Factories
self.data, self.habre, self.hfecha = datetime.date.today(), datetime.time(13, 0, 0), datetime.time(20, 30, 0)
self.estacios = EstacionamentoFactory.create_batch(10, cadastro_terminado=True)
self.horarios = [HorarioDivergenteFactory.create(data=self.data, estacionamento=self.estacios[1])]
self.session.commit()
make_savepoint(self.conn, self.session)
self.adm_sis, self.adm_sis_sess = get_adm_sistema(self.crypto, self.session)
self.adm_estacio, self.adm_estacio_sess = get_adm_estacio(self.crypto, self.session)
self.adm_estacio.estacionamento = self.estacios[0]
self.adm_estacio.admin_mestre = False
self.adm_estacio_edit, self.adm_estacio_edit_sess = get_adm_estacio(self.crypto, self.session, n=6471)
self.adm_estacio_edit.estacionamento = self.estacios[1]
self.adm_estacio_edit.admin_mestre = False
self.repo = HorarioDivergenteRepo()
def tearDown(self) -> None:
general_db_teardown(self.conn, self.outer_trans, self.session)
def test_setup(self):
admin_sis = self.session.query(AdminSistema).all()
admin_estacio = self.session.query(AdminEstacio).all()
estacios = self.session.query(Estacionamento).all()
horarios = self.session.query(HorarioDivergente).all()
self.assertEqual([self.adm_sis], admin_sis)
self.assertIn(self.adm_estacio, admin_estacio)
self.assertEqual(self.estacios, estacios)
self.assertEqual(self.horarios, horarios)
def test_set_ok(self):
success, horad = self.repo.set(self.adm_estacio_sess, self.session, self.data, self.habre, self.hfecha)
self.assertEqual(True, success, f'Success should be True. Error: {horad}')
self.assertEqual(self.data, horad.data, 'Data should match')
self.assertEqual(self.habre, horad.hora_abr, 'Hora abre should match')
self.assertEqual(self.hfecha, horad.hora_fec, 'Hora fecha should match')
self.assertEqual(self.estacios[0], horad.estacionamento, 'Estacionamento should match')
self.assertEqual(self.estacios[0].id, horad.estacio_fk, 'Estacio fk should match')
def test_edit_ok(self):
ori_id = self.horarios[0].id
success, horad = self.repo.set(self.adm_estacio_edit_sess, self.session, self.data, self.habre, self.hfecha)
self.assertEqual(True, success, f'Success should be True. Error: {horad}')
self.assertEqual(self.data, horad.data, 'Data should match')
self.assertEqual(self.habre, horad.hora_abr, 'Hora abre should match')
self.assertEqual(self.hfecha, horad.hora_fec, 'Hora fecha should match')
self.assertEqual(self.estacios[1], horad.estacionamento, 'Estacionamento should match')
self.assertEqual(self.estacios[1].id, horad.estacio_fk, 'Estacio fk should match')
self.assertEqual(ori_id, horad.id, 'Should not create a new instance')
def test_set_no_permission(self):
_sess = [self.adm_sis_sess, None]
for i in range(len(_sess)):
success, ret = self.repo.set(_sess[i], self.session, self.data, self.habre, self.hfecha)
self.assertEqual('sem_permissao', ret, f'Error should be "sem_permissao" on {i}')
self.assertEqual(False, success, f'Success should be False on {i}')
def test_set_adm_estacio_no_estacio(self):
adm_estacio, admin_estacio_sess = get_adm_estacio(self.crypto, self.session, n=9717)
success, error = self.repo.set(admin_estacio_sess, self.session, self.data, self.habre, self.hfecha)
self.assertEqual('sem_estacio', error, 'Error should be "sem_estacio"')
self.assertEqual(False, success, 'Success should be False')
def test_set_fecha_antes_abre(self):
_TESTS = (
(datetime.time(10, 30, 0), datetime.time(10, 29, 0)),
(datetime.time(10, 30, 0), datetime.time(10, 30, 0)),
(datetime.time(10, 30, 0), datetime.time(9, 31, 0))
)
for i in range(len(_TESTS)):
abr, fec = _TESTS[i]
success, error = self.repo.set(self.adm_estacio_sess, self.session, self.data, abr, fec)
self.assertEqual('fecha_antes_de_abrir', error,
f'Error should be "fecha_antes_de_abrir" on {i}')
self.assertEqual(False, success, f'Success should be False on {i}')
def test_set_data_passada(self):
datas = [datetime.date(2021, 10, 30), datetime.date(2020, 12, 30), datetime.date(2021, 11, 15)]
for data in datas:
success, error = self.repo.set(self.adm_estacio_sess, self.session, data, self.habre, self.hfecha)
self.assertEqual('data_passada', error, f'Error should be "data_passada" on {data}')
self.assertEqual(False, success, f'Success should be False on {data}')
def test_delete_ok(self):
ori_id = int(self.horarios[0].id)
success, error = self.repo.delete(self.adm_estacio_edit_sess, self.session, self.data)
self.assertIsNone(error, 'Error should be None')
self.assertEqual(True, success, 'Success should be True')
instance = self.session.query(HorarioDivergente).get(ori_id)
self.assertIsNone(instance, 'Should delete the instance from the db')
def test_delete_sem_permissao(self):
_sess = [self.adm_sis_sess, None]
for i in range(len(_sess)):
success, ret = self.repo.delete(_sess[i], self.session, self.data)
self.assertEqual('sem_permissao', ret, f'Error should be "sem_permissao" on {i}')
self.assertEqual(False, success, f'Success should be False on {i}')
def test_delete_sem_estacio(self):
adm_estacio, admin_estacio_sess = get_adm_estacio(self.crypto, self.session, n=9717)
success, error = self.repo.delete(admin_estacio_sess, self.session, self.data)
self.assertEqual('sem_estacio', error, 'Error should be "sem_estacio"')
self.assertEqual(False, success, 'Success should be False')
def test_delete_data_nao_encontrada(self):
data = datetime.date(2021, 11, 10)
success, error = self.repo.delete(self.adm_estacio_edit_sess, self.session, data)
self.assertEqual('data_nao_encontrada', error, 'Error should be "data_nao_encontrada"')
self.assertEqual(False, success, 'Success should be False')
if __name__ == '__main__':
unittest.main()
|
import stanza
from tool.model.ner_model import NERModel
class StanzaModel(NERModel):
def __init__(self, save_personal_titles, fix_personal_titles):
super().__init__(save_personal_titles, fix_personal_titles)
self.model = stanza.Pipeline('en', processors='tokenize,ner', tokenize_no_ssplit=True,
logging_level='ERROR', use_gpu=False)
print('Stanza model loaded.')
def get_doc_entities(self, text):
doc = self.model(text)
entities = []
for index, ent in enumerate(doc.entities):
if ent.type == "PERSON":
ent_text = text[ent.start_char:ent.end_char]
if self.fix_personal_titles and ent_text.startswith(
self.personal_titles):
ent.start_char += (1 + len(ent_text.split(' ')[0]))
if self.save_personal_titles:
personal_title = self.recognize_personal_title(ent, doc)
entities.append(
[ent.start_char, ent.end_char, "PERSON", personal_title])
else:
entities.append([ent.start_char, ent.end_char, "PERSON"])
return {'content': text, 'entities': entities}
def recognize_personal_title(self, ent, doc):
personal_title = None
span_id = [x['id'] for x in doc.to_dict()[0] if x['start_char']
== ent.start_char][0]
assert len(doc.sentences) == 1
if span_id > 1:
word_before_name = [x['text']
for x in doc.to_dict()[0] if x['id'] == span_id - 1][0]
if word_before_name.replace(".", "") in self.personal_titles:
personal_title = word_before_name.replace(".", "")
if word_before_name.lower() == "the":
personal_title = "the"
return personal_title
|
TRAIN_IDS_PATH = "data/train_ids.npy"
# TRAIN_LABELS_PATH = "data/raw/solutions_training.csv"
TRAIN_LABELS_PATH = "data/raw/training_solutions_rev1.csv"
import numpy as np
import os
import csv
with open(TRAIN_LABELS_PATH, 'r') as f:
reader = csv.reader(f, delimiter=",")
train_ids = []
for k, line in enumerate(reader):
if k == 0: continue # skip header
train_ids.append(int(line[0]))
train_ids = np.array(train_ids)
print("Saving %s" % TRAIN_IDS_PATH)
np.save(TRAIN_IDS_PATH, train_ids) |
from django import forms
from .utils import BootstrapFormMixin
class RegistrationForm(BootstrapFormMixin, forms.Form):
first_name = forms.CharField(label='First Name', max_length=100)
last_name = forms.CharField(label='Last Name', max_length=100)
email = forms.EmailField(label='Email', max_length=100)
password = forms.CharField(label='Password', widget=forms.PasswordInput)
confirm_password = forms.CharField(
label='Confirm Password',
widget=forms.PasswordInput
)
def clean_confirm_password(self):
password = self.cleaned_data.get('password')
confirm_password = self.cleaned_data['confirm_password']
if password and confirm_password != password:
raise forms.ValidationError('Confirm password must match password')
return confirm_password
def clean_password(self):
password = self.cleaned_data['password']
if len(password) < 8:
raise forms.ValidationError('Minimum password length of 8 characters is required')
return password
class LoginForm(BootstrapFormMixin, forms.Form):
email = forms.EmailField(label='Email', max_length=100)
password = forms.CharField(label='Password', widget=forms.PasswordInput)
|
# example3_css_properties - fold css properties into the cssCamelCase attributes. e.g.
# style:"margin-left:2em;" becomes cssMarginLeft:"2em"
import os, sys; sys.path.append(os.path.dirname(os.path.abspath(__file__))+os.sep+'..')
from lib.funcytag import funcyTag
div=funcyTag('div')
def build_example_html():
t = div( { 'id':'outer', 'cssFontSize':'20px', 'cssColor':'red', 'cssMarginLeft':None },
'begin outer div',
div( { 'id':'middle', 'cssFontSize':'16px', 'cssColor':'green', 'cssMarginLeft':'12px' },
'begin middle div',
div( { 'id':'inner', 'cssFontSize':'12px', 'cssColor':'blue', 'cssMarginLeft':'12px' },
'center div'
),
'end middle div'
),
'end outer div'
)
return unicode(t)
if __name__ == "__main__":
print
print build_example_html()
print
|
from builtins import object
class Batch(object):
"""
A fake batch, which batches nothing and just sends messages on the
queue immediately.
"""
def __init__(self, queue, max_batch_len):
self.queue = queue
# NOTE: this is ignored, and "batches" always contain a single job.
self.max_batch_len = max_batch_len
def append(self, job):
self.queue.send_message(job)
def flush(self):
pass
class Queue(object):
"""
A fake queue, which doesn't store or communicate any messages at all, but
calls the server to have them processed immediately.
This is useful for testing and running locally.
"""
def __init__(self, server):
self.server = server
def start_batch(self, max_batch_len=1):
return Batch(self, max_batch_len)
def send_message(self, msg):
self.server.dispatch_job(msg)
def flush(self):
pass
def receive_messages(self):
# fake queue doesn't actually hold any messages, so this is really
# an error.
raise NotImplementedError("Fake queue doesn't hold any messages.")
def create(j, cfg):
return Queue(j)
|
# -*- coding: utf-8 -*-
# @Time : 2020/11/4 16:04
# @Email : 986798607@qq.com
# @Software: PyCharm
# @License: BSD 3-Clause
from itertools import combinations_with_replacement
import torch.nn.functional as F
import numpy as np
import os
import torch
from numpy import random
from torch import nn
from torch.nn import Module
from torch.utils import tensorboard
from cams.cam3d import GradCAM3dpp, GradCAM3d
from cams.nnn import Indexes
class Moudle1(Module):
def __init__(self, *args): # 鍒濆鍖?
super(Moudle1, self).__init__()
D_in, dens_out = 1, 22
D1, D2 = 6, 1
dense1, dense2 = 27, 64
AvgPool3d_x, AvgPool3d_y, AvgPool3d_z =10,10,10
self.link = D2 * AvgPool3d_x * AvgPool3d_y * AvgPool3d_x
model_conv = nn.Sequential(
# Indexes(D_in, D2,(10,10,10)),
nn.Conv3d(D_in, D2, 1, stride=1, padding=0),
# nn.BatchNorm3d(D2),
# nn.ReLU(True),
# nn.MaxPool3d(3, stride=1, padding=1),
# nn.Dropout3d()
)
model_sigmod = nn.Sigmoid()
model_Linear = nn.Sequential(
nn.ReLU(True),
nn.Dropout(),
nn.Linear(self.link, dens_out),
nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(dens_out, dens_out),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(dense2, dens_out),
)
self.model_conv = model_conv
self.model_sigmod = model_sigmod
self.avgpool = nn.AdaptiveAvgPool3d((AvgPool3d_x, AvgPool3d_y, AvgPool3d_z))
self.model_Linear = model_Linear
def forward(self, x, t=1):
if t == 0:
x = self.model_conv(x)
print("conv out", x.shape)
x = self.model_sigmod(x)
x = self.avgpool(x)
print("avgpool", x.shape)
x = torch.flatten(x, start_dim=1, end_dim=-1)
print("flatten", x.shape)
x = self.model_Linear(x)
print("linear", x.shape)
else:
x = self.model_conv(x)
x = self.avgpool(x)
x = torch.flatten(x, start_dim=1, end_dim=-1)
x = self.model_Linear(x)
return x
def run(train, test=None):
if test is None:
test = train
train_x, train_y= train
model = Moudle1()
device = torch.device('cuda:0')
# device = torch.device('cpu')
model.to(device)
learning_rate = 1e-4
optimizer = torch.optim.SGD(model.parameters(), lr=0.01) # 鍏锋湁閫氱敤浼樺寲绠楁硶鐨勪紭鍖栧寘锛屽SGD,Adam
#
loss_fn = torch.nn.CrossEntropyLoss(reduction='mean') # 涓昏鏄敤鏉ュ垽瀹氬疄闄呯殑杈撳嚭涓庢湡鏈涚殑杈撳嚭鐨勬帴杩戠▼搴?
# loss_fn = torch.nn.MSELoss(reduction='mean') # 涓昏鏄敤鏉ュ垽瀹氬疄闄呯殑杈撳嚭涓庢湡鏈涚殑杈撳嚭鐨勬帴杩戠▼搴?
for t in range(20000):
train_x = train_x.to(device)
train_y = train_y.to(device)
y_pred = model(train_x, t)
# y_pred = y_pred*we
# prob = F.softmax(y_pred, dim=1)
# prob = F.relu(y_pred)
# _, idx = torch.max(prob, dim=1)
loss = loss_fn(y_pred,train_y)
if loss.item() < 0.001:
break
# if t % 10 == 9:
print(t, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if t%50==0:
writer.add_scalar('loss', loss.item(), global_step=t)
test_x, test_y = test
test_x = test_x.to(device)
test_y = test_y.to(device)
y_pred = model(test_x)
loss2 = loss_fn(y_pred, test_y)
print(loss2.item())
writer.close()
return model
random.seed(0)
torch.random.manual_seed(0)
def get():
x = random.random((120, 10, 10, 10)) + 0.00001
# key = np.full((3,3,3),0.5)
# key[1,1,1]=1.0
# iter = list(combinations_with_replacement(range(8), 3))
# y = []
# for ai, index in enumerate(iter):
# i, j, k = index
# print(ai, index)
# x[ai, i:i + 3, j:j + 3, k:k + 3] = key
# # x[ai, i:i + 3, j:j + 3, k:k + 3] = x[ai, i:i + 3, j:j + 3, k:k + 3] + key
# l1, l2, l3 = random.randint(0, 8, 3)
# x[ai, l1:l1 + 3, l2:l2 + 3, l3:l3 + 3] = x[ai, l1:l1 + 3, l2:l2 + 3, l3:l3 + 3] + key
# # y.append((i ** 2 + j ** 2 + k ** 2) ** 0.5)
# y.append((i + j + k))
iter = list(combinations_with_replacement(range(1,9), 3))
y = []
for ai, index in enumerate(iter):
i, j, k = index
print(ai, index)
x[ai, i, j, k] = 1.0
# x[ai, i:i + 3, j:j + 3, k:k + 3] = x[ai, i:i + 3, j:j + 3, k:k + 3] + key
l1, l2, l3 = random.randint(1, 9, 3)
x[ai, l1, l2, l3] = 1.0
# y.append((i ** 2 + j ** 2 + k ** 2) ** 0.5)
y.append((i + j + k-3))
x = torch.tensor(x)
x = x.unsqueeze(dim=1)
y = torch.tensor(y).reshape((-1, 1))
x = x.type(torch.float32)
y = y.type(torch.float32)
x = x / torch.max(x)
return x, y
def del_files(path_file):
ls = os.listdir(path_file)
for i in ls:
f_path = os.path.join(path_file, i)
# 判断是否是一个目录,若是,则递归删除
if os.path.isdir(f_path):
del_files(f_path)
else:
os.remove(f_path)
writer = tensorboard.SummaryWriter(log_dir="/home/iap13/wcx/tb/exp1", flush_secs=10)
data = [get() for i in range(10)]
x, y = zip(*data)
x = torch.cat(x, dim=0)
y = torch.cat(y, dim=0)
y_ = torch.zeros((1200, 22))
y = y.type(torch.long).squeeze()
y_ = torch.index_fill(y_, 1, y, torch.tensor(1))
# model = run((x, y), None)
# torch.save(model.state_dict(), "model_dict")
model = Moudle1()
model.load_state_dict(torch.load("model_dict"))
device = torch.device('cpu')
model.to(device)
model.eval()
target_layer = model.model_conv[-1]
# wrapped_model = GradCAM3d(model, target_layer)
wrapped_model = GradCAM3dpp(model, target_layer)
# wrapped_model = SmoothGradCAMpp(model, target_layer)
x = x.to(device)
y = y.to(device)
# for i in range(0, 1):
# xi = x[i]
# yi = y[i]
#
# tensor_shown = xi.unsqueeze(0)
#
# cams, idx = wrapped_model.forward(tensor_shown)
# cams = cams.squeeze().cpu().numpy()
# xi = xi.squeeze().cpu().numpy()
# for t in range(10):
# writer.add_images('countdown%d'%i,
# cams[t],
# global_step=t,
# dataformats='HW')
# writer.close()
i=2
xi = x[i]
yi = y[i]
tensor_shown = xi.unsqueeze(0)
cams, idx = wrapped_model.forward(tensor_shown)
cams = cams.squeeze().cpu().numpy()
xi = xi.squeeze().cpu().numpy()
for t in range(10):
writer.add_images('countdown%d'%i,
cams[t],
global_step=t,
dataformats='HW')
writer.close()
# model = Moudle1()
# writer.add_graph(model.eval(),x)
# writer.close() |
from __future__ import annotations
from librespot.common import Utils
from librespot.metadata import SpotifyId
from librespot.metadata.PlayableId import PlayableId
import re
class EpisodeId(SpotifyId.SpotifyId, PlayableId):
_PATTERN = re.compile(r"spotify:episode:(.{22})")
_hexId: str
def __init__(self, hex_id: str):
self._hexId = hex_id.lower()
@staticmethod
def from_uri(uri: str) -> EpisodeId:
matcher = EpisodeId._PATTERN.search(uri)
if matcher is not None:
episode_id = matcher.group(1)
return EpisodeId(
Utils.Utils.bytes_to_hex(
PlayableId.BASE62.decode(episode_id, 16)))
TypeError("Not a Spotify episode ID: {}".format(uri))
@staticmethod
def from_base62(base62: str) -> EpisodeId:
return EpisodeId(
Utils.Utils.bytes_to_hex(PlayableId.BASE62.decode(base62, 16)))
@staticmethod
def from_hex(hex_str: str) -> EpisodeId:
return EpisodeId(hex_str)
def to_mercury_uri(self) -> str:
return "hm://metadata/4/episode/{}".format(self._hexId)
def to_spotify_uri(self) -> str:
return "Spotify:episode:{}".format(
PlayableId.BASE62.encode(Utils.Utils.hex_to_bytes(self._hexId)))
def hex_id(self) -> str:
return self._hexId
def get_gid(self) -> bytes:
return Utils.Utils.hex_to_bytes(self._hexId)
|
import psutil
p = psutil.Process(8012)
while True:
print(p.cpu_percent()) |
# -*- coding: utf-8 -*-
from rmaics import rmaics
from kernal import record_player
#%%
game = rmaics(agent_num=4, render=True)
game.reset()
# only when render = True
game.play()
#%%
game.save_record('./records/record1.npy')
#%%
print('play saved file')
player = record_player()
player.play('./records/record0.npy')
|
import os
import torch
from torchvision import datasets, transforms
class MNIST:
def __init__(self, args):
super(MNIST, self).__init__()
self.INPUT_SIZE = 784
self.NUM_CLASSES = 10
use_cuda = not args.no_cuda and torch.cuda.is_available()
kwargs = {'num_workers': args.workers, 'pin_memory': True} if use_cuda else {}
self.train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
root=args.data_dir,
train=True,
download=True,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]
)
),
batch_size=args.batch_size,
shuffle=True,
**kwargs
)
self.test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
root=args.data_dir,
train=False,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]
)
),
batch_size=args.batch_size,
shuffle=True,
**kwargs
)
class CIFAR10:
def __init__(self, args):
super(CIFAR10, self).__init__()
self.INPUT_SIZE = 1024
self.NUM_CLASSES = 10
use_cuda = not args.no_cuda and torch.cuda.is_available()
kwargs = {'num_workers': args.workers, 'pin_memory': True} if use_cuda else {}
normalize = transforms.Normalize(
mean=[0.491, 0.482, 0.447],
std=[0.247, 0.243, 0.262]
)
self.train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
root=os.path.join(args.data_dir, 'CIFAR10'),
train=True,
download=True,
transform=transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
),
batch_size=args.batch_size,
shuffle=True,
**kwargs
)
self.test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
root=os.path.join(args.data_dir, 'CIFAR10'),
train=False,
download=True,
transform=transforms.Compose(
[
transforms.ToTensor(),
normalize
]
)
),
batch_size=args.batch_size,
shuffle=True,
**kwargs
)
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
try:
from pyVmomi import vim, pbm
from pyVim.connect import SoapStubAdapter
except ImportError:
pass
from ansible.module_utils.vmware import PyVmomi
class SPBM(PyVmomi):
def __init__(self, module):
super(SPBM, self).__init__(module)
self.spbm_content = None
self.spbm_si = None
self.version = "pbm.version.version2"
def get_spbm_connection(self):
"""
Creates a Service instance for VMware SPBM
"""
client_stub = self.si._GetStub()
try:
session_cookie = client_stub.cookie.split('"')[1]
except IndexError:
self.module.fail_json(msg="Failed to get session cookie")
ssl_context = client_stub.schemeArgs.get('context')
additional_headers = {'vcSessionCookie': session_cookie}
hostname = self.module.params['hostname']
if not hostname:
self.module.fail_json(msg="Please specify required parameter - hostname")
stub = SoapStubAdapter(host=hostname, path="/pbm/sdk", version=self.version,
sslContext=ssl_context, requestContext=additional_headers)
self.spbm_si = pbm.ServiceInstance("ServiceInstance", stub)
self.spbm_content = self.spbm_si.PbmRetrieveServiceContent()
|
#!/usr/bin/env python
import argparse
import collections
import datetime
import glob
import logging
import math
import os
import random
import re
import sys
import weakref
import time
import pickle
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
# ==============================================================================
# -- add PythonAPI for release mode --------------------------------------------
# ==============================================================================
try:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/carla')
except IndexError:
pass
import gym
import gym_carla
import carla
import numpy as np
from stable_baselines.deepq.policies import CnnPolicy
from stable_baselines import SAC,GAIL,DQN
from stable_baselines.gail import generate_expert_traj,ExpertDataset
from stable_baselines.ddpg.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines.common.callbacks import CallbackList, CheckpointCallback, EvalCallback
from SQIL import SQIL_DQN
from tensorflow.keras.preprocessing.sequence import pad_sequences
#from stable_baselines.common import SubprocVecEnv
def main():
# parameters for the gym_carla environment
params = {
'number_of_vehicles': 150,
'number_of_walkers': 0,
'display_size': 256, # screen size of bird-eye render
'max_past_step': 5, # the number of past steps to draw
'dt': 0.1, # time interval between two frames
'discrete': True, # whether to use discrete control space
'discrete_acc': [-6.0,-3.0, 0.0, 3.0], # discrete value of accelerations
'discrete_steer': [-0.2, 0.0, 0.2], # discrete value of steering angles
'continuous_accel_range': [-3.0, 3.0], # continuous acceleration range
'continuous_steer_range': [-0.3, 0.3], # continuous steering angle range
'ego_vehicle_filter': 'vehicle.lincoln*', # filter for defining ego vehicle
'port': 2000, # connection port
'town': 'Town03', # which town to simulate
'task_mode': 'roundabout', # mode of the task, [random, roundabout (only for Town03)]
'max_time_episode': 800, # maximum timesteps per episode
'max_waypt': 12, # maximum number of waypoints
'obs_range': 32, # observation range (meter)
'lidar_bin': 0.5, # bin size of lidar sensor (meter)
'd_behind': 12, # distance behind the ego vehicle (meter)
'out_lane_thres': 3.0, # threshold for out of lane
'desired_speed': 5, # desired speed (m/s)
'max_ego_spawn_times': 200, # maximum times to spawn ego vehicle
'display_route': True, # whether to render the desired route
'pixor_size': 64, # size of the pixor labels
'pixor': False, # whether to output PIXOR observation
'obs_single':True, #return a single key of the obs_space dict for obs space
'obs_name':'birdeye',#basic options have 'camera' ; 'lidar' ; 'birdeye' ; 'state'
'use_control':True,
'add_state':False,
'random_seed':10
}
# Set gym-carla environment
#env = gym.make('carla-v0',params=params)
#env = env.unwrapped
'''
def dummy_expert(_obs):
"""
Random agent. It samples actions randomly
from the action space of the environment.
:param _obs: (np.ndarray) Current observation
:return: (np.ndarray) action taken by the expert
"""
c = env.ego.get_control()
#print("throttle:",c.throttle,"steer:",c.steer,"brake:",c.brake)
acc = c.throttle if c.throttle >0 else -c.brake
acc = (acc+1)/2
steer = np.clip(c.steer,-0.2,0.2)
action = np.array([acc,steer])
#print(action)
return action
#model = SAC.load("./dqn_logs_xunhuan_sac2/rl_model_30000_steps.zip",env)
#model = DQN.load("dqn_carla_new")
#generate_expert_traj(model, 'expert_carla_dqn_1',env,n_timesteps=0,n_episodes=5,limit_return=200)
print("expert saved!")
dataset = ExpertDataset(expert_path='expert_carla_dqn_1.npz')
#dataset.plot()
model = SQIL_DQN(CnnPolicy, env,gamma=0.995,buffer_size=dataset.observations.shape[0]+1)
model.tensorboard_log = './tensorboard_dqn_log/SQIL/'
# Note: in practice, you need to train for 1M steps to have a working policy
checkpoint_callback = CheckpointCallback(save_freq=10000, save_path='./dqn_logs_xunhuan_DQfD/')
#model.pretrain(dataset,n_epochs=10000)
#model.save("BC_gail_CARLA")
model.initializeExpertBuffer(dataset.observations,dataset.observations.shape[0]+1,dataset.actions,0.0,dataset.dones)
print("expert_buffer initiated!")
model.learn(total_timesteps=100000,callback=checkpoint_callback)
model.save("gail_CARLA")
model = SQIL_DQN.load("./dqn_logs_xunhuan_DQfD/rl_model_80000_steps.zip")
dataset = ExpertDataset(expert_path='expert_carla_dqn_1.npz')
model.initializeExpertBuffer(dataset.observations,dataset.observations.shape[0]+1,dataset.actions,dataset.dones)
model.buffer_size = dataset.observations.shape[0]+1
generate_expert_traj(model, 'expert_carla_DQFD_test',env,n_timesteps=0,n_episodes=100,limit_return=200,mlp_obs=True)
'''
path = 'C:/Users/24829/Desktop/gym-carla-master/'
dts1 = ExpertDataset(expert_path=path+'expert_carla_sacfd_test.npz')
speed_data1 = dts1.observations[:,2]
dones_data1 = dts1.dones
dts2 = ExpertDataset(expert_path=path+'expert_carla_new_human_continuous_key_mlp.npz')
speed_data2 = dts2.observations[:,2]
dones_data2 = dts2.dones
#print(speed_data1[:20])
#dts2 = ExpertDataset(expert_path='expert_carla_new_human_continuous_mlp_test.npz')
#speed_data2 = dts2.observations[:,0]
#dones_data2 = dts2.dones
dts3 = ExpertDataset(expert_path=path+'expert_carla_ppo_test.npz')
print(dts3.observations.shape)
speed_data3 = dts3.observations[:,2]
dones_data3 = dts3.dones
dts4 = ExpertDataset(expert_path=path+'expert_carla_TD3_test.npz')
dones_data4 = dts4.dones
speed_data4 = dts4.observations[:,2]
dones_data4 = dts4.dones
dts5 = ExpertDataset(expert_path=path+'expert_carla_new_human_test_discrete_te2.npz')
speed_data5 = dts5.observations[:350,]
dones_data5 = dts5.dones
dts6 = ExpertDataset(expert_path=path+'expert_carla_new_human_test_discrete_te2.npz')
speed_data6 = dts6.observations[:350,2]
dones_data6 = dts6.dones
#print(speed_data[:10])
def preprocess(speed_data,dones_data,num):
speed_data = np.reshape(speed_data,(-1,1))
j = 0
ind = []
for i in range(dones_data.shape[0]):
if dones_data[i]==2 or dones_data==True:
ind.append(i)
for i in range(num):
if i==num-1:
buf_arr = speed_data[ind[i]:]
else:
buf_arr = speed_data[ind[i]:ind[i+1]]
buf_arr = pad_sequences([buf_arr],maxlen=800,padding='post',truncating='post',dtype="float32")
buf_arr = buf_arr[0]
if i==0:
#print(buf_arr.shape)
x_array = buf_arr
else:
x_array = np.append(x_array,buf_arr,axis=1)
return x_array
def preprocess_2(data,dones,num,n=False):
data = np.reshape(data,(-1,1))
j = 0
ind = []
for i in range(dones.shape[0]):
if dones[i]==2 or dones[i]==True:
ind.append(i)
out_list = []
if n:
nl = [9,11,19,25,27]
for i in range(num):
if n:
if i in nl:
continue
if i==num-1:
buf_arr = data[ind[i]:]
else:
buf_arr = data[ind[i]:ind[i+1]]
#print(len(buf_arr))
if len(buf_arr)<200:
continue
out_list.append(buf_arr[:200])
return out_list
#out_x = preprocess_2(speed_data1,dones_data1,30)
#out_y = preprocess_2(speed_data7,dones_data7,30,realm=55.47)
out_speed = preprocess_2(speed_data1,dones_data1,30,n=True)
avg = np.mean(np.reshape(out_speed,(-1,200)),axis=0)
std = np.std(np.reshape(out_speed,(-1,200)),axis=0)
out_speed_2 = preprocess_2(speed_data2,dones_data2,5)
avg2 = np.mean(np.reshape(out_speed_2,(-1,200)),axis=0)
std2 = np.std(np.reshape(out_speed_2,(-1,200)),axis=0)
out_speed_3 = preprocess_2(speed_data3,dones_data3,30)
avg3 = np.mean(np.reshape(out_speed_3,(-1,200)),axis=0)
std3 = np.std(np.reshape(out_speed_3,(-1,200)),axis=0)
out_speed_4 = preprocess_2(speed_data4,dones_data4,30)
avg4 = np.mean(np.reshape(out_speed_4,(-1,200)),axis=0)
std4 = np.std(np.reshape(out_speed_4,(-1,200)),axis=0)
avg = avg[20:]
avg2 = avg2[20:]
std = std[20:]
std2 = std2[20:]
avg3 = avg3[20:]
avg4 = avg4[20:]
std3 = std3[20:]
std4 = std4[20:]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
plt.figure()
#plt.grid(True)
plt.plot(np.linspace(0,800,len(avg)),avg,linewidth=2.5,alpha=0.7)
plt.fill_between(np.linspace(0,800,len(avg)),avg+std/2,avg-std/2,alpha=0.2)
plt.plot(np.linspace(0,800,len(avg2)),avg2,linewidth=2.5,alpha=0.7)
plt.fill_between(np.linspace(0,800,len(avg2)),(avg2+std2/2),(avg2-std2/2),alpha=0.2)
#plt.plot(np.linspace(0,800,len(avg3)),avg3,linewidth=2.5,alpha=0.7)
#plt.fill_between(np.linspace(0,800,len(avg3)),(avg3+std3/2),(avg3-std3/2),alpha=0.2)
#plt.plot(np.linspace(0,800,len(avg4)),avg4,linewidth=2.5,alpha=0.7)
#plt.fill_between(np.linspace(0,800,len(avg4)),(avg4+std4/2),(avg3-std4/2),alpha=0.2)
plt.plot([0,800],[7,7],linewidth=3)
plt.legend(['Proposed','Human','Set speed'],frameon=False,loc=4,bbox_to_anchor=(0.95,0.15))
plt.axis([0,800,0,10])
plt.xlabel('Steps')
plt.ylabel('Speed m/s')
plt.savefig('C:/Users/24829/Desktop/paper-pic/speed.pdf')
#for i in range(len(out_speed)):
# plt.plot(range(200),out_speed[i],linewidth=1,alpha=0.7)
#plt.scatter(-52.47,6.48)
#plt.axis("equal")
#plt.savefig("speed.png",transparent=True)
plt.show()
'''
x_array1 = preprocess(speed_data1,dones_data1,3)
x_array2 = preprocess(speed_data2,dones_data2,10)
x_array3 = preprocess(speed_data3,dones_data3,3)
x_array4 = preprocess(speed_data4,dones_data4,10)
x_array5 = preprocess(speed_data5,dones_data5,1)
x_array6 = preprocess(speed_data6,dones_data6,1)
x1 = np.abs(np.mean(x_array1-0.1*x_array3,axis=1))
std1 = np.std(x_array1,axis=1)
x2 = np.abs(np.mean(x_array2-0.1*x_array4,axis=1))
std2 = np.std(x_array2,axis=1)
x3 = np.abs(np.mean(x_array3,axis=1))
std3 = np.std(x_array3,axis=1)
x4 = np.abs(np.mean(x_array4,axis=1))
std4 = np.std(x_array4,axis=1)
x5 = np.abs(np.mean(x_array5-0.1*x_array6,axis=1))
std5 = np.std(x_array5,axis=1)
#print(x.shape)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
plt.figure()
#plt.fill_between(range(len(x1)),np.squeeze(x1-std1),np.squeeze(x1+std1), alpha=0.2)
plt.plot(x5)
plt.plot(x1)
#plt.fill_between(range(len(x2)),np.squeeze(x2-std2),np.squeeze(x2+std2), alpha=0.2)
plt.plot(x2[:350])
plt.legend(["PID","PID+PP",'Filtered'],frameon=False)
plt.title("Average lateral errors")
plt.xlabel("Steps/n")
plt.ylabel("Errors/m")
'''
'''
plt.plot(x3)
plt.fill_between(range(len(x3)),np.squeeze(x3-std3),np.squeeze(x3+std3), alpha=0.2)
plt.plot(x4)
plt.fill_between(range(len(x4)),np.squeeze(x4-std4),np.squeeze(x4+std4), alpha=0.2)
plt.plot(x5)
plt.fill_between(range(len(x5)),np.squeeze(x5-std5),np.squeeze(x5+std5), alpha=0.2)\
plt.legend(["DQN","PPO","SAC","TD3","A3C"])
plt.plot(np.mean(x1[:400]))
'''
#plt.grid(True)
#plt.plot(x_array1[:,3])
#plt.plot(x_array2[:,2])
plt.legend(["H","s"])
plt.show()
#dts.plot()
def calc_one_method(p,num,n=False,name=''):
path = 'C:/Users/24829/Desktop/gym-carla-master/'
dts = ExpertDataset(expert_path=path+p)
#if dts.observations.shape[1]==7:
# print('='*5,dts.observations.shape,name,'='*5)
speed_data = dts.observations[:,2]
dones_data = dts.dones
def preprocess_2(data,dones,num,n=False):
data = np.reshape(data,(-1,1))
j = 0
ind = []
for i in range(dones.shape[0]):
if dones[i]==2 or dones[i]==True:
ind.append(i)
out_list = []
if n:
nl = [9,11,19,25,27]
for i in range(num):
if n:
if i in nl:
continue
if i==num-1:
buf_arr = data[ind[i]:]
else:
buf_arr = data[ind[i]:ind[i+1]]
print(len(buf_arr))
if len(buf_arr)<200:
continue
out_list.append(buf_arr[:200])
return out_list
out_speed = preprocess_2(speed_data,dones_data,num,n=n)
avg = np.mean(np.reshape(out_speed,(-1,200)),axis=0)
std = np.std(np.reshape(out_speed,(-1,200)),axis=0)
return avg[20:],std[20:]
def plot_figure():
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
path_list=[
'expert_carla_sacfd_test.npz',
'expert_carla_new_human_continuous_key_mlp.npz',
'expert_carla_ppo_test.npz',
'expert_carla_TD3_test.npz',
'expert_carla_sac_test.npz',
'expert_carla_sqil_test.npz',
#'expert_carla_gail_mlp.npz',
#'expert_carla_bc_mlp.npz',
#'expert_carla_dqnrule_test_2.npz'
]
plt.figure()
#plt.grid(True)
for i,p in enumerate(path_list):
if i==0:
n=True
else:
n=False
if i==1:
num=5
else:
num=30
avg , std = calc_one_method(p,num,n)
if i>1:
plt.plot(np.linspace(0,20,len(avg)),avg*7/5,linewidth=2,alpha=0.7)
plt.fill_between(np.linspace(0,20,len(avg)),avg*7/5+std/2,avg*7/5-std/2,alpha=0.2)
else:
plt.plot(np.linspace(0,20,len(avg)),avg,linewidth=2,alpha=0.7)
plt.fill_between(np.linspace(0,20,len(avg)),avg+std/2,avg-std/2,alpha=0.2)
with open('res_rule.pkl','rb') as reader:
params_dict=pickle.load(reader)
l = []
for s in params_dict['speed']:
if len(s)==200:
l.append(s[22:])
plt.plot(np.linspace(0,20,200-22),np.mean(np.array(l),axis=0),linewidth=2,alpha=0.7)
plt.fill_between(np.linspace(0,20,200-22),np.mean(np.array(l),axis=0)+np.std(np.array(l),axis=0),np.mean(np.array(l),axis=0)-np.std(np.array(l),axis=0),alpha=0.2)
#plt.plot([0,20],[7,7],linewidth=3)
plt.xlabel('Time /s')
plt.ylabel('Speed m/s')
# plt.legend(['Proposed','Human','PPO','TD3','SAC','GAIL','SQIL',"BC",'DQfD',"Rule-based",'Set speed'],frameon=False,\
# bbox_to_anchor=(0.68,0.99),loc=9,ncol=3,fontsize='x-small')
plt.legend(['Proposed','Human','PPO','TD3','SAC','SQIL','DQfD'],frameon=False,\
bbox_to_anchor=(0.68,0.99),loc=9,ncol=3,fontsize='x-small')
plt.axis([0,20,0,10])
plt.title('Agent vehicle Speed')
plt.savefig('C:/Users/24829/Desktop/paper-pic/speed_5.pdf')
plt.show()
def calc_traj(p,num,n=False):
path = 'C:/Users/24829/Desktop/gym-carla-master/'
dts = ExpertDataset(expert_path=path+p)
#if dts.observations.shape[1]==7:
# print('='*5,dts.observations.shape,name,'='*5)
x_data = dts.observations[:,5]
y_data = dts.observations[:,6]
dones_data = dts.dones
def preprocess(speed_data,dones_data,num,n):
speed_data = np.reshape(speed_data,(-1,1))
j = 0
ind = []
x_array = []
if n:
nl = [9,11,19,25,27]
for i in range(dones_data.shape[0]):
if dones_data[i]!=0:
ind.append(i)
for i in range(num):
if n:
if i in [1,2,6,9,17,18,19,20,21,25,27,29]:
continue
if i==num-1:
buf_arr = speed_data[ind[i]:]
else:
buf_arr = speed_data[ind[i]:ind[i+1]]
#buf_arr = pad_sequences([buf_arr],maxlen=800,padding='post',truncating='post',dtype="float32")
x_array.append(buf_arr)
return x_array
x,y = preprocess(x_data,dones_data,num,n),preprocess(y_data,dones_data,num,n)
return x,y
def draw_traj():
path_list=[
'expert_carla_sacfd_test.npz',
'expert_carla_new_human_continuous_key_mlp.npz'
]
l = ['Proposed','Human']
i = 0
all_traj_list = []
for p in path_list:
X,Y = [],[]
if i==1:
num=5
n=True
else:
num=30
n=True
x,y = calc_traj(p,num,n)
for x_traj,y_traj in zip(x,y):
X.append(x_traj)
Y.append(y_traj)
all_traj_list.append((X,Y))
i+=1
import matplotlib.pyplot as plt
import seaborn as sns
#sns.set(style="darkgrid")
plt.figure()
#plt.grid(True)
j = 0
mk,cl = ['o','^'],['r','b']
line_width=[3.5,1.5]
for X,Y in all_traj_list:
k = 0
for x,y in zip(X,Y):
#plt.plot(-y,-x,linewidth=2,alpha=0.5)
plt.scatter(-y[-1],-x[-1],c='orange',marker='o',linewidths=8,alpha=0.5)
print(len(x),j,k,x[-1],y[-1])
if x[-1]>-5:
plt.plot(-y,-x,linewidth=5.5,alpha=1,color='red')
k+=1
plt.plot(-Y[0],-X[0],linewidth=3.5,alpha=0.5,color='b')
j+=1
#plt.axis([-52,-50,6.45,6.55])
plt.savefig("trajj.png",transparent=True)
#plt.savefig('C:/Users/24829/Desktop/paper-pic/traj_point2.pdf')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
def recg_files():
path = 'C:/Users/24829/Desktop/gym-carla-master/'
path_list = glob.glob(path+'*.npz')
print(path_list)
dts = ExpertDataset(expert_path=path+p)
def test_list():
a = np.array([1,0,0,2,3,4,0,0,0,0])
print(np.where(a!=0)[0])
if __name__ == '__main__':
#draw_traj()
#test_list()
plot_figure()
#main() |
import numpy as np
import pyransac3d
def get_plane_equation(plane_model, height_map, RESOLUTION, verbose=False):
xgrid, ygrid = np.meshgrid(np.linspace(0, height_map.shape[1]*RESOLUTION, height_map.shape[1]), np.linspace(0, height_map.shape[0]*RESOLUTION, height_map.shape[0]))
xyz = np.zeros((np.size(xgrid), 3))
xyz[:,0] = np.reshape(xgrid, -1)
xyz[:,1] = np.reshape(ygrid, -1)
xyz[:,2] = np.reshape(height_map, -1)
# plane = pyransac3d.Plane()
best_eq , best_inliers = plane_model.fit(xyz, 0.01)
best_eq = np.array(best_eq)
if best_eq[3] < 0:
best_eq *= -1
a,b,c,d = best_eq
# if best_eq is None:
# rospy.loginfo('Plane detection not successful.')
# return None
if verbose:
print(f'Equation of plane: {a:.2f} x + {b:.2f} y + {c:.2f} z + {d:.2f} = 0')
def get_rotation_mat(M):
# https://stackoverflow.com/questions/9423621/3d-rotations-of-a-plane
N = (0,0,1)
c = np.dot(M,N)
x,y,z = np.cross(M,N) / np.linalg.norm(np.cross(M,N))
s = np.sqrt(1 - c*c)
C = 1 - c
rmat = np.array([[ x*x*C+c, x*y*C-z*s, x*z*C+y*s ],
[ y*x*C+z*s, y*y*C+c, y*z*C-x*s ],
[ z*x*C-y*s, z*y*C+x*s, z*z*C+c ]])
return rmat
# rmat = get_rotation_mat([a,b,c])
# xyz_rotated = np.dot(xyz, rmat.T)
# assert(xyz.shape==xyz_rotated.shape)
# [a,b,c,d], best_inliers = plane_model.fit(xyz_rotated, 0.01)
# if verbose:
# print(f'Equation of transformed plane: {a:.2f} x + {b:.2f} y + {c:.2f} z + {d:.2f} = 0')
return np.array([a,b,c,d]) |
#!/usr/bin/env python3
import os
import boto3
from io import StringIO
class QuickDirtyS3Storage(object):
def __init__(self):
self.bucketname = os.environ.get('KALEIDOSCOPE_S3_BUCKET')
self.filename = 'kaleidoscope-appstate.json'
resource = boto3.resource('s3')
self.bucket = resource.Bucket(self.bucketname)
def load(self):
buf = StringIO.StringIO()
self.bucket.download_fileobj(self.filename, buf)
return json.loads(str(buf))
def save(self,obj):
buf = StringIO.StringIO(json.dumps(obj))
self.bucket.upload_fileobj(buf, self.filename)
class SimpleLocalStorage(object):
def __init__(self):
self.filename = '/tmp/state.json'
def load(self):
with open(self.filename,'r') as fh:
return json.load(fh)
def save(self,obj):
with open(self.filename,'w') as fh:
return json.dump(fh)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# This script will set subversion property recursively
#
# Usage: SvnSetProp PROPNAME PROPVAL [PATH]
import fnmatch
import getopt
import os
import subprocess
import sys
class SvnSetProp:
def __init__(self):
# ----------------------------
# arguments
# ----------------------------
self.path = '.'
self.propname = ''
self.propval = ''
self.includes = []
self.excludes = []
self.setdir = True
self.setfile = True
self.recu = False
self.test = False
def showUsage(self, err = None, exitCode = 2):
if err:
print "Error:", err
print
print "SvnSetProp.py [Options] PROPNAME PROPVAL [PATH]"
print
print "Options (takes an parameter):"
print " -i, --include Include files (wildcard)"
print " -e, --exclude Exclude files (wildcard)"
print
print "Flags (no parameter):"
print " -d, --dir Set property to directory only"
print " -f, --file Set property to file only"
print " -r, --recu Recursively rename files in subdirectories"
print " -t, --test Test only"
print
if exitCode > 0:
sys.exit(exitCode)
def getArgs(self, args):
try:
opts, args = getopt.getopt(args, "h?i:e:dfrt",
[ "help", "include=", "exclude=", "dir", "file", "recu", "test" ]
)
for opt, arg in opts:
if opt in ("-h", "-?", "--help"):
self.showUsage()
elif opt in ("--include", "-i"):
self.includes.append(arg)
elif opt in ("--exclude", "-e"):
self.excludes.append(arg)
elif opt in ("--dir", "-d"):
self.setdir = True
self.setfile = False
elif opt in ("--file", "-f"):
self.setdir = False
self.setfile = True
elif opt in ("--recu", "-r"):
self.recu = True
elif opt in ("--test", "-t"):
self.test = True
else:
self.showUsage("Invalid argument: " + opt)
except getopt.GetoptError as e:
self.showUsage(str(e))
if len(args) < 1:
self.showUsage("Missing PROPNAME argument!")
self.propname = args[0]
if len(args) < 2:
self.showUsage("Missing PROPVAL argument!")
self.propval = args[1]
if len(args) > 2:
self.path = args[2]
def run(self):
print 'SvnSetProp %s=%s [ %s ]' % (self.propname, self.propval, self.path)
if self.includes:
print ' includes: %s' % (self.includes)
if self.excludes:
print ' excludes: %s' % (self.excludes)
self.process(self.path)
print 'OK!'
def myexec(self, kargs, show):
if show: print ' '.join(kargs)
return subprocess.Popen(kargs, stdout=subprocess.PIPE).communicate()[0]
def svnsetprop(self, path):
if not self.setdir and os.path.isdir(path):
return
if not self.setfile and os.path.isfile(path):
return
fname = os.path.basename(path)
if self.excludes:
for p in self.excludes:
if fnmatch.fnmatch(fname, p):
return
if self.includes:
inc = False
for p in self.includes:
if not fnmatch.fnmatch(fname, p):
inc = True
break
if not inc:
return
self.myexec(['svn', 'ps', self.propname, self.propval, path], True)
def process(self, path):
if not os.path.exists(path):
return
self.svnsetprop(path)
if self.recu and os.path.isdir(path):
for subdir in os.listdir(path):
if subdir == '.svn': continue
npath = os.path.join(path, subdir)
self.process(npath)
# start
if __name__ == "__main__":
scp = SvnSetProp()
scp.getArgs(sys.argv[1:])
scp.run()
sys.exit(0)
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Signing Model Objects
This module contains classes that encapsulate data about the signing process.
"""
import os.path
class CodeSignedProduct(object):
"""Represents a build product that will be signed with `codesign(1)`."""
def __init__(self,
path,
identifier,
options=None,
requirements=None,
identifier_requirement=True,
sign_with_identifier=False,
entitlements=None,
verify_options=None):
"""A build product to be codesigned.
Args:
path: The path to the product to be signed. This is relative to a
work directory containing the build products.
identifier: The unique identifier set when code signing. This is
only explicitly passed with the `--identifier` flag if
|sign_with_identifier| is True.
options: Options flags to pass to `codesign --options`, from
|CodeSignOptions|.
requirements: String for additional `--requirements` to pass to the
`codesign` command. These are joined with a space to the
|config.CodeSignConfig.codesign_requirements_basic| string. See
|CodeSignedProduct.requirements_string()| for details.
identifier_requirement: If True, a designated identifier requirement
based on |identifier| will be inserted into the requirements
string. If False, then no designated requirement will be
generated based on the identifier.
sign_with_identifier: If True, then the identifier will be specified
when running the `codesign` command. If False, `codesign` will
infer the identifier itself.
entitlements: File name of the entitlements file to sign the product
with. The file should reside in the |Paths.packaging_dir|.
verify_options: Flags to pass to `codesign --verify`, from
|VerifyOptions|.
"""
self.path = path
self.identifier = identifier
if not CodeSignOptions.valid(options):
raise ValueError('Invalid CodeSignOptions: {}'.format(options))
self.options = options
self.requirements = requirements
self.identifier_requirement = identifier_requirement
self.sign_with_identifier = sign_with_identifier
self.entitlements = entitlements
if not VerifyOptions.valid(verify_options):
raise ValueError('Invalid VerifyOptions: {}'.format(verify_options))
self.verify_options = verify_options
def requirements_string(self, config):
"""Produces a full requirements string for the product.
Args:
config: A |config.CodeSignConfig| object.
Returns:
A string for designated requirements of the product, which can be
passed to `codesign --requirements`.
"""
# If the signing identity indicates ad-hoc (i.e. no real signing
# identity), do not enforce any requirements. Ad hoc signing will append
# a hash to the identifier, which would violate the
# identifier_requirement and most other requirements that would be
# specified.
if config.identity == '-':
return ''
reqs = []
if self.identifier_requirement:
reqs.append('designated => identifier "{identifier}"'.format(
identifier=self.identifier))
if self.requirements:
reqs.append(self.requirements)
if config.codesign_requirements_basic:
reqs.append(config.codesign_requirements_basic)
return ' '.join(reqs)
def __repr__(self):
return 'CodeSignedProduct(identifier={0.identifier}, ' \
'options={0.options}, path={0.path})'.format(self)
def make_enum(class_name, options):
"""Makes a new class type for an enum.
Args:
class_name: Name of the new type to make.
options: A dictionary of enum options to use. The keys will become
attributes on the class, and the values will be wrapped in a tuple
so that the options can be joined together.
Returns:
A new class for the enum.
"""
attrs = {}
@classmethod
def valid(cls, opts_to_check):
"""Tests if the specified |opts_to_check| are valid.
Args:
options: Iterable of option strings.
Returns:
True if all the options are valid, False if otherwise.
"""
if opts_to_check is None:
return True
valid_values = options.values()
return all([option in valid_values for option in opts_to_check])
attrs['valid'] = valid
for name, value in options.items():
assert type(name) is str
assert type(value) is str
attrs[name] = (value,)
return type(class_name, (object,), attrs)
"""Enum for the options that can be specified when validating the results of
code signing.
These options are passed to `codesign --verify` after the
|CodeSignedProduct| has been signed.
"""
VerifyOptions = make_enum(
'signing.model.VerifyOptions', {
'DEEP': '--deep',
'STRICT': '--strict',
'NO_STRICT': '--no-strict',
'IGNORE_RESOURCES': '--ignore-resources',
})
CodeSignOptions = make_enum(
'signing.model.CodeSignOptions', {
'RESTRICT': 'restrict',
'LIBRARY_VALIDATION': 'library',
'HARDENED_RUNTIME': 'runtime',
'KILL': 'kill',
})
# Specify the components of HARDENED_RUNTIME that are also available on
# older macOS versions.
CodeSignOptions.FULL_HARDENED_RUNTIME_OPTIONS = (
CodeSignOptions.HARDENED_RUNTIME + CodeSignOptions.RESTRICT +
CodeSignOptions.LIBRARY_VALIDATION + CodeSignOptions.KILL)
class Distribution(object):
"""A Distribution represents a final, signed, and potentially channel-
customized Chrome product.
Channel customization refers to modifying parts of the app bundle structure
to have different file names, internal identifiers, and assets.
"""
def __init__(self,
channel=None,
branding_code=None,
app_name_fragment=None,
packaging_name_fragment=None,
product_dirname=None,
creator_code=None,
channel_customize=False,
package_as_dmg=True,
package_as_pkg=False):
"""Creates a new Distribution object. All arguments are optional.
Args:
channel: The release channel for the product.
branding_code: A branding code helps track how users acquired the
product from various marketing channels.
app_name_fragment: If present, this string fragment is appended to
the |config.CodeSignConfig.app_product|. This renames the binary
and outer app bundle.
packaging_name_fragment: If present, this is appended to the
|config.CodeSignConfig.packaging_basename| to help differentiate
different |branding_code|s.
product_dirname: If present, this string value is set in the app's
Info.plist with the key "CrProductDirName". This key influences
the browser's default user-data-dir location.
creator_code: If present, this will set a new macOS creator code
in the Info.plist "CFBundleSignature" key and in the PkgInfo
file. If this is not specified, the original values from the
build products will be kept.
channel_customize: If True, then the product will be modified in
several ways:
- The |channel| will be appended to the
|config.CodeSignConfig.base_bundle_id|.
- The product will be renamed with |app_name_fragment|.
- Different assets will be used for icons in the app.
package_as_dmg: If True, then a .dmg file will be created containing
the product.
package_as_pkg: If True, then a .pkg file will be created containing
the product.
"""
self.channel = channel
self.branding_code = branding_code
self.app_name_fragment = app_name_fragment
self.packaging_name_fragment = packaging_name_fragment
self.product_dirname = product_dirname
self.creator_code = creator_code
self.channel_customize = channel_customize
self.package_as_dmg = package_as_dmg
self.package_as_pkg = package_as_pkg
def to_config(self, base_config):
"""Produces a derived |config.CodeSignConfig| for the Distribution.
Args:
base_config: The base CodeSignConfig to derive.
Returns:
A new CodeSignConfig instance that uses information in the
Distribution to alter various properties of the |base_config|.
"""
this = self
class DistributionCodeSignConfig(base_config.__class__):
@property
def base_config(self):
return base_config
@property
def distribution(self):
return this
@property
def app_product(self):
if this.channel_customize:
return '{} {}'.format(base_config.app_product,
this.app_name_fragment)
return base_config.app_product
@property
def base_bundle_id(self):
base_bundle_id = base_config.base_bundle_id
if this.channel_customize:
return base_bundle_id + '.' + this.channel
return base_bundle_id
@property
def provisioning_profile_basename(self):
profile = base_config.provisioning_profile_basename
if profile and this.channel_customize:
return '{}_{}'.format(profile, this.app_name_fragment)
return profile
@property
def packaging_basename(self):
if this.packaging_name_fragment:
return '{}-{}-{}'.format(
self.app_product.replace(' ', ''), self.version,
this.packaging_name_fragment)
return super(DistributionCodeSignConfig,
self).packaging_basename
return DistributionCodeSignConfig(
base_config.identity, base_config.installer_identity,
base_config.notary_user, base_config.notary_password,
base_config.notary_asc_provider)
class Paths(object):
"""Paths holds the three file path contexts for signing operations.
The input directory always remains un-modified.
The output directory is where final, signed products are stored.
The work directory is set by internal operations.
"""
def __init__(self, input, output, work):
self._input = input
self._output = output
self._work = work
@property
def input(self):
return self._input
@property
def output(self):
return self._output
@property
def work(self):
return self._work
def packaging_dir(self, config):
"""Returns the path to the product packaging directory, which contains
scripts and assets used in signing.
Args:
config: The |config.CodeSignConfig| object.
Returns:
Path to the packaging directory.
"""
return os.path.join(self.input, '{} Packaging'.format(config.product))
def replace_work(self, new_work):
"""Creates a new Paths with the same input and output directories, but
with |work| set to |new_work|."""
return Paths(self.input, self.output, new_work)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (self._input == other._input and
self._output == other._output and self._work == other._work)
def __repr__(self):
return 'Paths(input={0.input}, output={0.output}, ' \
'work={0.work})'.format(self)
|
from rest_framework import serializers
from patientdb.models import Patient, Signals
class PatientSerializer(serializers.ModelSerializer):
class Meta:
model = Patient
fields = ("record_name", "n_sig", "fs", "counter_freq", "base_counter", "sig_len", "base_time", "base_date",
"comments", "sig_name", "p_signal", "d_signal", "e_p_signal", "file_name", "fmt", "samps_per_frame",
"skew", "byte_offset", "adc_gain", "baseline", "units", "adc_res", "adc_zero", "init_value",
"checksum", "block_size")
class SignalsSerializer(serializers.ModelSerializer):
class Meta:
model = Signals
fields =('signal_record_name', 'time', 'mlii', 'v5')
|
## Copyright 2002-2003 Andrew Loewenstern, All Rights Reserved
# see LICENSE.txt for license information
import unittest
import ktable, khashmir
import khash, node, knode
import actions
import test_airhook
import test_krpc
tests = unittest.defaultTestLoader.loadTestsFromNames(['khash', 'node', 'knode', 'actions', 'ktable', 'test_airhook', 'test_krpc'])
result = unittest.TextTestRunner().run(tests)
|
from django import forms
from .models import Analisis
class AnalisisForm(forms.ModelForm):
class Meta:
models = Analisis
fields = [
'tipo_analisis',
'fecha_de_solicitud',
'fecha_resultados',
'resultados',
'analisis_del_diagnostico'
]
def __init__(self, *args, **kwargs):
super(AnalisisForm, self).__init__(*args, **kwargs)
for field in self.fields:
self.fields[field].required = True |
# -*- coding: utf-8 -*-
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import os
import shutil
import numpy as np
import pytest
from scipy.io import wavfile
from wavenet_vocoder.bin.calc_stats import calc_stats
from wavenet_vocoder.bin.feature_extract import melcepstrum_extract
from wavenet_vocoder.bin.feature_extract import melspectrogram_extract
from wavenet_vocoder.bin.feature_extract import world_feature_extract
from wavenet_vocoder.bin.noise_shaping import convert_mcep_to_mlsa_coef
from wavenet_vocoder.bin.noise_shaping import noise_shaping
from wavenet_vocoder.utils import check_hdf5
from wavenet_vocoder.utils import find_files
from wavenet_vocoder.utils import read_hdf5
from wavenet_vocoder.utils import write_hdf5
def make_dummy_wav(name, maxlen=32000, fs=16000):
length = np.random.randint(maxlen // 2, maxlen)
x = np.random.randn(length)
x = x / np.abs(x).max()
x = np.int16(x * (np.iinfo(np.int16).max + 1))
wavfile.write(name, fs, x)
def make_args(**kwargs):
defaults = dict(
hdf5dir="tmp/hdf5",
wavdir="tmp/wav_filtered",
outdir="tmp/wav_nwf",
stats="tmp/stats.h5",
feature_type="world",
fs=16000,
shiftms=5,
minf0=40,
maxf0=400,
mspc_dim=80,
mcep_dim=24,
mcep_alpha=0.41,
fftl=1024,
highpass_cutoff=70,
mcep_dim_start=2,
mcep_dim_end=25,
fmin=None,
fmax=None,
mag=0.5,
save_wav=True,
inv=False,
)
defaults.update(kwargs)
return argparse.Namespace(**defaults)
@pytest.mark.parametrize("feature_type", [
("melspc"), ("world"), ("mcep"),
])
def test_preprocessing(feature_type):
# make arguments
args = make_args(feature_type=feature_type)
# prepare dummy wav files
wavdir = "tmp/wav"
if not os.path.exists(wavdir):
os.makedirs(wavdir)
for i in range(5):
make_dummy_wav(wavdir + "/%d.wav" % i, 8000, args.fs)
# feature extract
wav_list = find_files(wavdir, "*.wav")
if not os.path.exists(args.wavdir):
os.makedirs(args.wavdir)
if args.feature_type == "world":
world_feature_extract(wav_list, args)
elif args.feature_type == "melspc":
melspectrogram_extract(wav_list, args)
else:
melcepstrum_extract(wav_list, args)
# calc_stats
file_list = find_files(args.hdf5dir, "*.h5")
calc_stats(file_list, args)
# noise shaping
if feature_type != "melspc":
wav_list = find_files(args.wavdir, "*.wav")
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
if not check_hdf5(args.stats, "/mlsa/coef"):
avg_mcep = read_hdf5(args.stats, args.feature_type + "/mean")
if args.feature_type == "world":
avg_mcep = avg_mcep[args.mcep_dim_start:args.mcep_dim_end]
mlsa_coef = convert_mcep_to_mlsa_coef(avg_mcep, args.mag, args.mcep_alpha)
write_hdf5(args.stats, "/mlsa/coef", mlsa_coef)
write_hdf5(args.stats, "/mlsa/alpha", args.mcep_alpha)
noise_shaping(wav_list, args)
# remove
shutil.rmtree("tmp")
|
#!/usr/bin/python
"""
ZetCode wxPython tutorial
This program creates a Burning widget.
author: Jan Bodnar
website: zetcode.com
last edited: May 2018
"""
import wx
class Burning(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, size=(-1, 30), style=wx.SUNKEN_BORDER)
self.parent = parent
self.font = wx.Font(9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL, False, 'Courier 10 Pitch')
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnPaint(self, e):
num = range(75, 700, 75)
dc = wx.PaintDC(self)
dc.SetFont(self.font)
w, h = self.GetSize()
self.cw = self.parent.GetParent().cw
step = int(round(w / 10.0))
j = 0
till = (w / 750.0) * self.cw
full = (w / 750.0) * 700
if self.cw >= 700:
dc.SetPen(wx.Pen('#FFFFB8'))
dc.SetBrush(wx.Brush('#FFFFB8'))
dc.DrawRectangle(0, 0, full, 30)
dc.SetPen(wx.Pen('#ffafaf'))
dc.SetBrush(wx.Brush('#ffafaf'))
dc.DrawRectangle(full, 0, till-full, 30)
else:
dc.SetPen(wx.Pen('#FFFFB8'))
dc.SetBrush(wx.Brush('#FFFFB8'))
dc.DrawRectangle(0, 0, till, 30)
dc.SetPen(wx.Pen('#5C5142'))
for i in range(step, 10*step, step):
dc.DrawLine(i, 0, i, 6)
width, height = dc.GetTextExtent(str(num[j]))
dc.DrawText(str(num[j]), i-width/2, 8)
j = j + 1
def OnSize(self, e):
self.Refresh()
class Example(wx.Frame):
def __init__(self, *args, **kwargs):
super(Example, self).__init__(*args, **kwargs)
self.InitUI()
def InitUI(self):
self.cw = 75
panel = wx.Panel(self)
CenterPanel = wx.Panel(panel)
self.sld = wx.Slider(CenterPanel, value=75, maxValue=750, size=(200, -1),
style=wx.SL_LABELS)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
self.wid = Burning(panel)
hbox.Add(self.wid, 1, wx.EXPAND)
hbox2.Add(CenterPanel, 1, wx.EXPAND)
hbox3.Add(self.sld, 0, wx.LEFT|wx.TOP, 35)
CenterPanel.SetSizer(hbox3)
vbox.Add(hbox2, 1, wx.EXPAND)
vbox.Add(hbox, 0, wx.EXPAND)
self.Bind(wx.EVT_SCROLL, self.OnScroll)
panel.SetSizer(vbox)
self.sld.SetFocus()
self.SetTitle("Burning widget")
self.Centre()
def OnScroll(self, e):
self.cw = self.sld.GetValue()
self.wid.Refresh()
def main():
app = wx.App()
ex = Example(None)
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main()
|
#-------------------------------------------------------------------------------
# Lia
#-------------------------------------------------------------------------------
def runLia(inputFile):
fi = open(inputFile, 'r') #reads in the file that list the before/after file names
inputData = fi.readline().split() #reads in files
k, n = int(inputData[0]), int(inputData[1])
return fastaNames
#-------------------------------------------------------------------------------
# Fin
#-------------------------------------------------------------------------------
|
max_dataset = 350
min_dataset = 0
limit_dataset = 250 |
n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro numero: '))
s = n1+n2
# print ('A soma entre {}'.format (n1),'e {}' .format(n2), 'vale', s)
print('A soma entre {} e {} vale {}' .format(n1, n2, s)) |
indexPassed = []
globalAcc = 0
with open('marcomole00/8/input.txt') as file:
lines = file.read().split('\n')
for instruction in lines:
indexIn = 0
extIndex = lines.index(instruction)
indexPassed = []
infiniteLoop = False
accumulator = 0
field, value = instruction.split(' ')
if field == 'nop':
lines[extIndex]= instruction.replace('nop', 'jmp')
elif field == 'jmp':
lines[extIndex] = instruction.replace('jmp','nop')
while True:
indexPassed.append(indexIn)
fieldTemp, valueTemp = lines[indexIn].split(' ')
print (lines[extIndex], ' ', fieldTemp)
if fieldTemp == 'jmp':
indexIn += int(valueTemp)
else: indexIn += 1
if fieldTemp == 'acc':
accumulator += int(valueTemp)
if indexIn in indexPassed:
#print(instruction, ' ', fieldTemp)
infiniteLoop = True
break
if(indexIn >= len(lines)):
print(accumulator)
break
if infiniteLoop == True:
if field == 'nop':
lines[extIndex] = instruction
elif field == 'jmp':
lines[extIndex] = instruction
else: break
print(accumulator) |
import unittest
import pandas as pd
import numpy as np
from pgmpy.estimators import HillClimbSearch, K2Score
from pgmpy.models import BayesianModel
class TestHillClimbEstimator(unittest.TestCase):
def setUp(self):
self.rand_data = pd.DataFrame(
np.random.randint(0, 5, size=(5000, 2)), columns=list("AB")
)
self.rand_data["C"] = self.rand_data["B"]
self.est_rand = HillClimbSearch(
self.rand_data, scoring_method=K2Score(self.rand_data)
)
self.model1 = BayesianModel()
self.model1.add_nodes_from(["A", "B", "C"])
self.model2 = self.model1.copy()
self.model2.add_edge("A", "B")
# link to dataset: "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/titanic_train.csv"
)
self.titanic_data1 = self.titanic_data[
["Survived", "Sex", "Pclass", "Age", "Embarked"]
]
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
self.est_titanic1 = HillClimbSearch(self.titanic_data1)
self.est_titanic2 = HillClimbSearch(self.titanic_data2)
def test_legal_operations(self):
model2_legal_ops = list(self.est_rand._legal_operations(self.model2))
model2_legal_ops_ref = [
(("+", ("C", "A")), -28.15602208305154),
(("+", ("A", "C")), -28.155467430966382),
(("+", ("C", "B")), 7636.947544933631),
(("+", ("B", "C")), 7937.805375579936),
(("-", ("A", "B")), 28.155467430966382),
(("flip", ("A", "B")), -0.0005546520851567038),
]
self.assertSetEqual(
set([op for op, score in model2_legal_ops]),
set([op for op, score in model2_legal_ops_ref]),
)
def test_legal_operations_blacklist_whitelist(self):
model2_legal_ops_bl = list(
self.est_rand._legal_operations(
self.model2, black_list=[("A", "B"), ("A", "C"), ("C", "A"), ("C", "B")]
)
)
model2_legal_ops_bl_ref = [("+", ("B", "C")), ("-", ("A", "B"))]
self.assertSetEqual(
set([op for op, score in model2_legal_ops_bl]), set(model2_legal_ops_bl_ref)
)
model2_legal_ops_wl = list(
self.est_rand._legal_operations(
self.model2, white_list=[("A", "B"), ("A", "C"), ("C", "A"), ("A", "B")]
)
)
model2_legal_ops_wl_ref = [
("+", ("A", "C")),
("+", ("C", "A")),
("-", ("A", "B")),
("flip", ("A", "B")),
]
self.assertSetEqual(
set([op for op, score in model2_legal_ops_wl]), set(model2_legal_ops_wl_ref)
)
def test_legal_operations_titanic(self):
est = self.est_titanic1
start_model = BayesianModel(
[("Survived", "Sex"), ("Pclass", "Age"), ("Pclass", "Embarked")]
)
legal_ops = est._legal_operations(start_model)
self.assertEqual(len(list(legal_ops)), 20)
tabu_list = [
("-", ("Survived", "Sex")),
("-", ("Survived", "Pclass")),
("flip", ("Age", "Pclass")),
]
legal_ops_tabu = est._legal_operations(start_model, tabu_list=tabu_list)
self.assertEqual(len(list(legal_ops_tabu)), 18)
legal_ops_indegree = est._legal_operations(start_model, max_indegree=1)
self.assertEqual(len(list(legal_ops_indegree)), 11)
legal_ops_both = est._legal_operations(
start_model, tabu_list=tabu_list, max_indegree=1
)
legal_ops_both_ref = [
(("+", ("Embarked", "Survived")), 10.050632580087608),
(("+", ("Survived", "Pclass")), 41.88868046549101),
(("+", ("Age", "Survived")), -23.635716036430836),
(("+", ("Pclass", "Survived")), 41.81314459373226),
(("+", ("Sex", "Pclass")), 4.772261678792802),
(("-", ("Pclass", "Age")), 11.546515590731815),
(("-", ("Pclass", "Embarked")), -32.171482832532774),
(("flip", ("Pclass", "Embarked")), 3.3563814191281836),
(("flip", ("Survived", "Sex")), 0.039737027979640516),
]
self.assertSetEqual(set(legal_ops_both), set(legal_ops_both_ref))
def test_estimate_rand(self):
est1 = self.est_rand.estimate()
self.assertSetEqual(set(est1.nodes()), set(["A", "B", "C"]))
self.assertTrue(
list(est1.edges()) == [("B", "C")] or list(est1.edges()) == [("C", "B")]
)
est2 = self.est_rand.estimate(start=BayesianModel([("A", "B"), ("A", "C")]))
self.assertTrue(
list(est2.edges()) == [("B", "C")] or list(est2.edges()) == [("C", "B")]
)
def test_estimate_titanic(self):
self.assertSetEqual(
set(self.est_titanic2.estimate().edges()),
set([("Survived", "Pclass"), ("Sex", "Pclass"), ("Sex", "Survived")]),
)
def tearDown(self):
del self.rand_data
del self.est_rand
del self.model1
del self.titanic_data
del self.titanic_data1
del self.titanic_data2
del self.est_titanic1
del self.est_titanic2
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""
Module used for working with the Deployment Map (yml) file.
"""
import os
import yaml
from errors import InvalidDeploymentMapError
from logger import configure_logger
LOGGER = configure_logger(__name__)
class DeploymentMap:
def __init__(
self,
parameter_store,
pipeline_name_prefix,
map_path=None
):
self.map_path = map_path or 'deployment_map.yml'
self.map_dir_path = map_path or 'deployment_maps'
self.parameter_store = parameter_store
self.map_contents = self._get_deployment_map()
self.map_contents = self._get_deployment_apps_from_dir()
self.pipeline_name_prefix = pipeline_name_prefix
self.account_ou_names = {}
self._validate_deployment_map()
def update_deployment_parameters(self, pipeline):
for account in pipeline.template_dictionary['targets']:
self.account_ou_names.update(
{item['name']: item['path'] for item in account if item['name'] != 'approval'}
)
self.parameter_store.put_parameter(
"/deployment/{0}/account_ous".format(
pipeline.name
),
str(self.account_ou_names)
)
if pipeline.notification_endpoint:
self.parameter_store.put_parameter(
"/notification_endpoint/{0}".format(
pipeline.name
),
str(pipeline.notification_endpoint)
)
def _get_deployment_map(self, file_path=None):
if file_path is None:
file_path = self.map_path
try:
LOGGER.info('Loading deployment_map file %s', file_path)
with open(file_path, 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
except FileNotFoundError:
LOGGER.debug('Nothing found at %s', file_path)
return None
def _get_deployment_apps_from_dir(self):
if os.path.isdir(self.map_dir_path):
for file in os.listdir(self.map_dir_path):
if file.endswith(".yml") and file != 'example-deployment_map.yml':
deployment_map = self._get_deployment_map('{}/{}'.format(self.map_dir_path, file))
if 'pipelines' not in self.map_contents:
self.map_contents['pipelines'] = []
if 'pipelines' in deployment_map:
self.map_contents['pipelines'].extend(deployment_map['pipelines'])
return self.map_contents
def _validate_deployment_map(self):
"""
Validates the deployment map contains valid configuration
"""
try:
for pipeline in self.map_contents["pipelines"]:
for target in pipeline.get("targets", []):
if isinstance(target, dict):
# Prescriptive information on the error should be raised
assert target["path"]
except KeyError:
raise InvalidDeploymentMapError(
"Deployment Map target or regions specification is invalid"
)
except TypeError:
LOGGER.error(
"No Deployment Map files found, create a deployment_map.yml file in the root of the repository to create pipelines. "
"You can create additional deployment maps if required in a folder named deployment_maps with any name (ending in .yml)"
)
raise Exception from None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.