content stringlengths 5 1.05M |
|---|
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic client for the FollowJointTrajectory action."""
from action_msgs.msg import GoalStatus
from control_msgs.action import FollowJointTrajectory
from trajectory_msgs.msg import JointTrajectoryPoint
from builtin_interfaces.msg import Duration
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
class followJointTrajectoryClient(Node):
def __init__(self, name, actionName):
super().__init__(name)
self.client = ActionClient(self, FollowJointTrajectory, actionName)
self.remainingIteration = 0
self.currentTrajectory = None
def goal_response_callback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Goal rejected by action server.')
return
self.get_logger().info('Goal accepted by action server.')
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(self.get_result_callback)
def feedback_callback(self, feedback):
self.get_logger().info('Received feedback from action server.')
def get_result_callback(self, future):
status = future.result().status
if status == GoalStatus.STATUS_SUCCEEDED:
self.get_logger().info('Goal succeeded.')
else:
self.get_logger().info('Goal failed with status: {0}'.format(status))
if self.remainingIteration > 0:
self.send_goal(self.currentTrajectory, self.remainingIteration - 1)
else:
# Shutdown after receiving a result
rclpy.shutdown()
def send_goal(self, trajectory, iteration=1):
self.get_logger().info('Waiting for action server...')
self.client.wait_for_server()
self.currentTrajectory = trajectory
self.remainingIteration = iteration - 1
goal_msg = FollowJointTrajectory.Goal()
goal_msg.trajectory.joint_names = trajectory['joint_names']
for point in trajectory['points']:
trajectoryPoint = JointTrajectoryPoint(positions=point['positions'],
velocities=point['velocities'],
accelerations=point['accelerations'],
time_from_start=Duration(
sec=point['time_from_start']['sec'],
nanosec=point['time_from_start']['nanosec']
))
goal_msg.trajectory.points.append(trajectoryPoint)
self.get_logger().info('Sending goal request...')
self._send_goal_future = self.client.send_goal_async(
goal_msg,
feedback_callback=self.feedback_callback)
self._send_goal_future.add_done_callback(self.goal_response_callback)
|
#### TESTED
from typing import Tuple
import pandas as pd
from pandas import DataFrame
from dbnd import parameter, task
from dbnd_examples.data import data_repo
from targets.target_config import FileFormat
class TestDocDataSerializationDeSerialization:
def test_prepare_data_save_result_with_no_header(self):
#### DOC START
@task(result=parameter.csv.save_options(FileFormat.csv, header=False))
def prepare_data(data: DataFrame) -> DataFrame:
data["new_column"] = 5
return data
#### DOC END
prepare_data.task(data=data_repo.wines).dbnd_run()
def test_prepare_data_tab_delimited(self):
#### DOC START
@task(data=parameter[DataFrame].csv.load_options(FileFormat.csv, sep="\t"))
def prepare_data(data: DataFrame) -> DataFrame:
data["new_column"] = 5
return data
#### DOC END
prepare_data.task(data=data_repo.wines).dbnd_run()
def test_prepare_data(self):
#### DOC START
@task
def prepare_data(data: DataFrame) -> DataFrame:
data["new_column"] = 5
return data
#### DOC END
prepare_data.task(data=data_repo.wines).dbnd_run()
def test_tuple_two_outputs(self):
#### DOC START
@task(result="training_set,real_data")
def prepare_data(data: DataFrame) -> Tuple[DataFrame, DataFrame]:
data["new_column"] = 5
return data, data
#### DOC END
prepare_data.task(data=data_repo.wines).dbnd_run()
|
"""Implements the astroplan-based sky target visibility web tool using Flask.
"""
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as pl
import flask
import json
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy.utils.data import get_file_contents
import astroplan
from astroplan.plots import plot_altitude
try:
from io import BytesIO # Python 3
except ImportError:
from cStringIO import StringIO as BytesIO # Legacy Python
app = flask.Flask('astroplanapp')
def observing_sites():
"""Returns a dict of observatory sites based on astropy's database."""
jsonurl = 'http://data.astropy.org/coordinates/sites.json'
js = json.loads(get_file_contents(jsonurl, show_progress=False, cache=True))
sitedict = {}
for sitekey, site in js.items():
sitedict[sitekey] = site['name']
return sitedict
def _parse_single_target(target):
try:
crd = SkyCoord(target)
except ValueError: # The coordinate string is ambiguous; make assumptions
if target[0].isalpha():
crd = SkyCoord.from_name(target)
elif ":" in target:
crd = SkyCoord(target, unit="hour,deg")
else:
crd = SkyCoord(target, unit="deg")
return astroplan.FixedTarget(crd)
def _parse_targets(targets):
"""Parses the 'targets' GET argument.
Returns
-------
targets : list of `astropy.FixedTarget` objects
"""
if targets is None:
return []
result = [_parse_single_target(single_target)
for single_target in targets.splitlines()]
return result
@app.route('/')
def root():
return flask.render_template('index.html', sites=observing_sites())
@app.route('/plot-airmass')
def app_plot_airmass():
date = flask.request.args.get('date', default=None, type=str)
location = flask.request.args.get('location', default=None, type=str)
targets = flask.request.args.get('targets', default=None, type=str)
targets = targets.replace("\n", "%0A").replace("\r", "%0D")
return flask.render_template('airmass.html', date=date, location=location, targets=targets)
@app.route('/airmass.png')
def airmass_png():
# Parse the arguments
date = flask.request.args.get('date', default=None, type=str)
location = flask.request.args.get('location', default=None, type=str)
targets = flask.request.args.get('targets', default=None, type=str)
observer = astroplan.Observer.at_site(location)
if date is None:
midnight = observer.midnight(Time.now())
else:
# +10*u.minute circumvents astroplan issue #155
midnight = observer.midnight(Time(date)) + 10 * u.minute
targets = _parse_targets(targets)
# Create the airmass plot
fig = pl.figure()
ax = fig.add_subplot(111)
for target in targets:
plot_altitude(target, observer, midnight, ax=ax)
pl.tight_layout()
# Stream the image to the browser using BytesIO
img = BytesIO()
fig.savefig(img, transparent=True, format='png')
img.seek(0)
response = flask.send_file(img, mimetype="image/png")
return response
|
import os
from collections import namedtuple
try:
from shutil import which
except ImportError:
def which(name):
if os.path.exists("/usr/local/bin/" + name):
return "/usr/local/bin/" + name
elif os.path.exists("/usr/bin/" + name):
return "/usr/bin/" + name
class ARN(namedtuple("ARN", "partition service region account resource")):
def __str__(self):
return ":".join(["arn"] + list(self))
ARN.__new__.__defaults__ = ("aws", "", "", "", "")
def from_bytes(data, big_endian=False):
"""Used on Python 2 to handle int.from_bytes"""
if isinstance(data, str):
data = bytearray(data)
if big_endian:
data = reversed(data)
num = 0
for offset, byte in enumerate(data):
num += byte << (offset * 8)
return num
|
"""
autotask dbip mmdb下载,由server控制
"""
# from ..autopluginbase import AutoPluginBase
#
#
# class Dbip(AutoPluginBase):
#
# def __init__(self):
# AutoPluginBase.__init__(self)
#
# def start(self):
# pass
import gzip
import shutil
from pathlib import Path
import requests
from commonbaby.mslog import MsLogger, MsLogManager
_logger: MsLogger = MsLogManager.get_logger("DBIP")
filename = Path('./dbip.mmdb.gz')
url = 'https://download.db-ip.com/free/dbip-city-lite-2019-08.mmdb.gz'
count = 0
with requests.get(url, stream=True) as r:
r.raise_for_status()
with filename.open('wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 1024):
count += 1
_logger.info(f'{count} times Downloaded 1Mb, and waiting...')
if chunk: # filter out keep-alive new chunks
f.write(chunk)
with gzip.open('./dbip.mmdb.gz', 'rb') as f_in:
with open('dbip.mmdb', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 21 19:56:54 2021
@author: Paschalis Giakoumoglou 10054
@author: Christos Kyratsous 10105
"""
import matplotlib.pyplot as plt
import numpy as np
def euler(F, x0, t0, tmax, dt):
t = np.arange(t0, tmax + dt, dt)
x = np.zeros((len(t), len(x0)), np.float64)
x[0, :] = x0
for n in range(len(t) - 1):
x[n + 1, :] = x[n, :] + dt * F(t[n], x[n, :])
return t, x
def F(t, x):
return np.array([1.1 * x[0] - 0.4 * x[0] * x[1], 0.4 * x[0] * x[1] - 0.1 * x[1]])
x0 = np.array([20, 1])
dt = [10**-2, 10**-3]
for i in range(len(dt)):
t, x = euler(F, x0, 0, 200, dt[i])
#t, x = euler(F, x0, 0, 2000, 10**-5)
plt.plot(t, x)
plt.title("Time Evolution of Foxes and Rabbits Population (dt ="+str(dt[i])+")")
plt.legend("RF")
plt.xlabel("Time")
plt.ylabel("No. of Rabbits and No. of Foxes")
plt.grid(axis="x")
plt.show()
plt.plot(x[:, 0], x[:, 1])
plt.title("Evolution of Foxes and Rabbits Population (dt ="+str(dt[i])+")")
plt.xlabel("Rabbits")
plt.ylabel("Foxes")
plt.grid()
plt.show()
|
import re
from simpleeval import simple_eval, NameNotDefined
class Expression:
def __init__(self, expr):
self.expr = expr
def eval(self, context):
raise NotImplementedError
class Bind(Expression):
def eval(self, context):
return simple_eval(self.expr, names=context)
class Condition(Bind):
pass
class ForLoop(Expression):
REGEX = re.compile(r'^\s*(.+)\s*in\s*(.+)\s*$')
def __init__(self, expr):
super().__init__(expr)
m = self.REGEX.match(expr)
if m:
self.loop_variable = m.groups()[0].strip()
if not self.loop_variable.isidentifier():
raise SyntaxError(f'Not valid identifier name: {self.loop_variable}')
self.iterable = m.groups()[1].strip()
else:
raise SyntaxError(f'For loop syntax error: {expr}')
def eval(self, context):
try:
iterable = simple_eval(f'{self.iterable}', names=context)
for counter, item in enumerate(iterable, start=1):
yield counter, self.loop_variable, item
except NameNotDefined as e:
raise ValueError(f'"{self.iterable}" is not defined in context') from e
|
# -*- coding: utf-8 -*-
from . static import Base_RM_Field
class RM_Field_FEFILT0_IPVERSION_IPVERSION(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IPVERSION_IPVERSION, self).__init__(register,
'IPVERSION', 'FEFILT0.IPVERSION.IPVERSION', 'read-only',
u"",
0, 32)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_EN_EN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_EN_EN, self).__init__(register,
'EN', 'FEFILT0.EN.EN', 'read-write',
u"",
0, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_STATUS_FEFILTLOCKSTATUS(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_STATUS_FEFILTLOCKSTATUS, self).__init__(register,
'FEFILTLOCKSTATUS', 'FEFILT0.STATUS.FEFILTLOCKSTATUS', 'read-only',
u"",
0, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_LOCK_FEFILTLOCKKEY(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_LOCK_FEFILTLOCKKEY, self).__init__(register,
'FEFILTLOCKKEY', 'FEFILT0.LOCK.FEFILTLOCKKEY', 'write-only',
u"",
0, 16)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CFG_DEC1(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CFG_DEC1, self).__init__(register,
'DEC1', 'FEFILT0.CFG.DEC1', 'read-write',
u"",
0, 14)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CFG_CHFGAINREDUCTION(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CFG_CHFGAINREDUCTION, self).__init__(register,
'CHFGAINREDUCTION', 'FEFILT0.CFG.CHFGAINREDUCTION', 'read-write',
u"",
26, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CFG_CHFCOEFFSWEN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CFG_CHFCOEFFSWEN, self).__init__(register,
'CHFCOEFFSWEN', 'FEFILT0.CFG.CHFCOEFFSWEN', 'read-write',
u"",
27, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CFG_CHFCOEFFSWSEL(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CFG_CHFCOEFFSWSEL, self).__init__(register,
'CHFCOEFFSWSEL', 'FEFILT0.CFG.CHFCOEFFSWSEL', 'read-write',
u"",
28, 2)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CFG_CHFCOEFFFWSWEN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CFG_CHFCOEFFFWSWEN, self).__init__(register,
'CHFCOEFFFWSWEN', 'FEFILT0.CFG.CHFCOEFFFWSWEN', 'read-write',
u"",
30, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CFG_CHFCOEFFFWSWSEL(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CFG_CHFCOEFFFWSWSEL, self).__init__(register,
'CHFCOEFFFWSWSEL', 'FEFILT0.CFG.CHFCOEFFFWSWSEL', 'read-write',
u"",
31, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_SRC2_SRC2RATIO(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_SRC2_SRC2RATIO, self).__init__(register,
'SRC2RATIO', 'FEFILT0.SRC2.SRC2RATIO', 'read-write',
u"",
0, 15)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_SRC2_UPGAPS(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_SRC2_UPGAPS, self).__init__(register,
'UPGAPS', 'FEFILT0.SRC2.UPGAPS', 'read-write',
u"",
22, 3)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_SRC2_SRC2ENABLE(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_SRC2_SRC2ENABLE, self).__init__(register,
'SRC2ENABLE', 'FEFILT0.SRC2.SRC2ENABLE', 'read-write',
u"",
27, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_GAINCTRL_DEC0GAIN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_GAINCTRL_DEC0GAIN, self).__init__(register,
'DEC0GAIN', 'FEFILT0.GAINCTRL.DEC0GAIN', 'read-write',
u"",
8, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_GAINCTRL_DEC1GAIN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_GAINCTRL_DEC1GAIN, self).__init__(register,
'DEC1GAIN', 'FEFILT0.GAINCTRL.DEC1GAIN', 'read-write',
u"",
9, 2)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE00_SET0CSDCOEFF0(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE00_SET0CSDCOEFF0, self).__init__(register,
'SET0CSDCOEFF0', 'FEFILT0.CHFCSDCOE00.SET0CSDCOEFF0', 'read-write',
u"",
0, 6)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE00_SET0CSDCOEFF1(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE00_SET0CSDCOEFF1, self).__init__(register,
'SET0CSDCOEFF1', 'FEFILT0.CHFCSDCOE00.SET0CSDCOEFF1', 'read-write',
u"",
8, 8)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE00_SET0CSDCOEFF2(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE00_SET0CSDCOEFF2, self).__init__(register,
'SET0CSDCOEFF2', 'FEFILT0.CHFCSDCOE00.SET0CSDCOEFF2', 'read-write',
u"",
16, 8)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE00_SET0CSDCOEFF3(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE00_SET0CSDCOEFF3, self).__init__(register,
'SET0CSDCOEFF3', 'FEFILT0.CHFCSDCOE00.SET0CSDCOEFF3', 'read-write',
u"",
24, 8)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE01_SET0CSDCOEFF4(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE01_SET0CSDCOEFF4, self).__init__(register,
'SET0CSDCOEFF4', 'FEFILT0.CHFCSDCOE01.SET0CSDCOEFF4', 'read-write',
u"",
0, 8)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE01_SET0CSDCOEFF5(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE01_SET0CSDCOEFF5, self).__init__(register,
'SET0CSDCOEFF5', 'FEFILT0.CHFCSDCOE01.SET0CSDCOEFF5', 'read-write',
u"",
8, 9)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE01_SET0CSDCOEFF6(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE01_SET0CSDCOEFF6, self).__init__(register,
'SET0CSDCOEFF6', 'FEFILT0.CHFCSDCOE01.SET0CSDCOEFF6', 'read-write',
u"",
17, 9)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE02_SET0CSDCOEFF7(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE02_SET0CSDCOEFF7, self).__init__(register,
'SET0CSDCOEFF7', 'FEFILT0.CHFCSDCOE02.SET0CSDCOEFF7', 'read-write',
u"",
0, 9)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE02_SET0CSDCOEFF8(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE02_SET0CSDCOEFF8, self).__init__(register,
'SET0CSDCOEFF8', 'FEFILT0.CHFCSDCOE02.SET0CSDCOEFF8', 'read-write',
u"",
9, 10)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE02_SET0CSDCOEFF9(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE02_SET0CSDCOEFF9, self).__init__(register,
'SET0CSDCOEFF9', 'FEFILT0.CHFCSDCOE02.SET0CSDCOEFF9', 'read-write',
u"",
19, 10)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE03_SET0CSDCOEFF10(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE03_SET0CSDCOEFF10, self).__init__(register,
'SET0CSDCOEFF10', 'FEFILT0.CHFCSDCOE03.SET0CSDCOEFF10', 'read-write',
u"",
0, 11)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE03_SET0CSDCOEFF11(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE03_SET0CSDCOEFF11, self).__init__(register,
'SET0CSDCOEFF11', 'FEFILT0.CHFCSDCOE03.SET0CSDCOEFF11', 'read-write',
u"",
11, 12)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE10_SET1CSDCOEFF0(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE10_SET1CSDCOEFF0, self).__init__(register,
'SET1CSDCOEFF0', 'FEFILT0.CHFCSDCOE10.SET1CSDCOEFF0', 'read-write',
u"",
0, 6)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE10_SET1CSDCOEFF1(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE10_SET1CSDCOEFF1, self).__init__(register,
'SET1CSDCOEFF1', 'FEFILT0.CHFCSDCOE10.SET1CSDCOEFF1', 'read-write',
u"",
8, 8)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE10_SET1CSDCOEFF2(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE10_SET1CSDCOEFF2, self).__init__(register,
'SET1CSDCOEFF2', 'FEFILT0.CHFCSDCOE10.SET1CSDCOEFF2', 'read-write',
u"",
16, 8)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE10_SET1CSDCOEFF3(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE10_SET1CSDCOEFF3, self).__init__(register,
'SET1CSDCOEFF3', 'FEFILT0.CHFCSDCOE10.SET1CSDCOEFF3', 'read-write',
u"",
24, 8)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE11_SET1CSDCOEFF4(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE11_SET1CSDCOEFF4, self).__init__(register,
'SET1CSDCOEFF4', 'FEFILT0.CHFCSDCOE11.SET1CSDCOEFF4', 'read-write',
u"",
0, 8)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE11_SET1CSDCOEFF5(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE11_SET1CSDCOEFF5, self).__init__(register,
'SET1CSDCOEFF5', 'FEFILT0.CHFCSDCOE11.SET1CSDCOEFF5', 'read-write',
u"",
8, 9)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE11_SET1CSDCOEFF6(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE11_SET1CSDCOEFF6, self).__init__(register,
'SET1CSDCOEFF6', 'FEFILT0.CHFCSDCOE11.SET1CSDCOEFF6', 'read-write',
u"",
17, 9)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE12_SET1CSDCOEFF7(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE12_SET1CSDCOEFF7, self).__init__(register,
'SET1CSDCOEFF7', 'FEFILT0.CHFCSDCOE12.SET1CSDCOEFF7', 'read-write',
u"",
0, 9)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE12_SET1CSDCOEFF8(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE12_SET1CSDCOEFF8, self).__init__(register,
'SET1CSDCOEFF8', 'FEFILT0.CHFCSDCOE12.SET1CSDCOEFF8', 'read-write',
u"",
9, 10)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE12_SET1CSDCOEFF9(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE12_SET1CSDCOEFF9, self).__init__(register,
'SET1CSDCOEFF9', 'FEFILT0.CHFCSDCOE12.SET1CSDCOEFF9', 'read-write',
u"",
19, 10)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE13_SET1CSDCOEFF10(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE13_SET1CSDCOEFF10, self).__init__(register,
'SET1CSDCOEFF10', 'FEFILT0.CHFCSDCOE13.SET1CSDCOEFF10', 'read-write',
u"",
0, 11)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE13_SET1CSDCOEFF11(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE13_SET1CSDCOEFF11, self).__init__(register,
'SET1CSDCOEFF11', 'FEFILT0.CHFCSDCOE13.SET1CSDCOEFF11', 'read-write',
u"",
11, 12)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF0S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF0S, self).__init__(register,
'SET0CSDCOEFF0S', 'FEFILT0.CHFCSDCOE00S.SET0CSDCOEFF0S', 'read-write',
u"",
0, 3)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF1S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF1S, self).__init__(register,
'SET0CSDCOEFF1S', 'FEFILT0.CHFCSDCOE00S.SET0CSDCOEFF1S', 'read-write',
u"",
3, 4)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF2S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF2S, self).__init__(register,
'SET0CSDCOEFF2S', 'FEFILT0.CHFCSDCOE00S.SET0CSDCOEFF2S', 'read-write',
u"",
7, 4)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF3S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF3S, self).__init__(register,
'SET0CSDCOEFF3S', 'FEFILT0.CHFCSDCOE00S.SET0CSDCOEFF3S', 'read-write',
u"",
11, 4)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF4S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF4S, self).__init__(register,
'SET0CSDCOEFF4S', 'FEFILT0.CHFCSDCOE00S.SET0CSDCOEFF4S', 'read-write',
u"",
15, 4)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF5S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF5S, self).__init__(register,
'SET0CSDCOEFF5S', 'FEFILT0.CHFCSDCOE00S.SET0CSDCOEFF5S', 'read-write',
u"",
19, 5)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF6S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE00S_SET0CSDCOEFF6S, self).__init__(register,
'SET0CSDCOEFF6S', 'FEFILT0.CHFCSDCOE00S.SET0CSDCOEFF6S', 'read-write',
u"",
24, 5)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE01S_SET0CSDCOEFF7S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE01S_SET0CSDCOEFF7S, self).__init__(register,
'SET0CSDCOEFF7S', 'FEFILT0.CHFCSDCOE01S.SET0CSDCOEFF7S', 'read-write',
u"",
0, 5)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE01S_SET0CSDCOEFF8S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE01S_SET0CSDCOEFF8S, self).__init__(register,
'SET0CSDCOEFF8S', 'FEFILT0.CHFCSDCOE01S.SET0CSDCOEFF8S', 'read-write',
u"",
5, 5)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE01S_SET0CSDCOEFF9S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE01S_SET0CSDCOEFF9S, self).__init__(register,
'SET0CSDCOEFF9S', 'FEFILT0.CHFCSDCOE01S.SET0CSDCOEFF9S', 'read-write',
u"",
10, 5)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE01S_SET0CSDCOEFF10S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE01S_SET0CSDCOEFF10S, self).__init__(register,
'SET0CSDCOEFF10S', 'FEFILT0.CHFCSDCOE01S.SET0CSDCOEFF10S', 'read-write',
u"",
15, 6)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE01S_SET0CSDCOEFF11S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE01S_SET0CSDCOEFF11S, self).__init__(register,
'SET0CSDCOEFF11S', 'FEFILT0.CHFCSDCOE01S.SET0CSDCOEFF11S', 'read-write',
u"",
21, 6)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF0S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF0S, self).__init__(register,
'SET1CSDCOEFF0S', 'FEFILT0.CHFCSDCOE10S.SET1CSDCOEFF0S', 'read-write',
u"",
0, 3)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF1S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF1S, self).__init__(register,
'SET1CSDCOEFF1S', 'FEFILT0.CHFCSDCOE10S.SET1CSDCOEFF1S', 'read-write',
u"",
3, 4)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF2S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF2S, self).__init__(register,
'SET1CSDCOEFF2S', 'FEFILT0.CHFCSDCOE10S.SET1CSDCOEFF2S', 'read-write',
u"",
7, 4)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF3S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF3S, self).__init__(register,
'SET1CSDCOEFF3S', 'FEFILT0.CHFCSDCOE10S.SET1CSDCOEFF3S', 'read-write',
u"",
11, 4)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF4S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF4S, self).__init__(register,
'SET1CSDCOEFF4S', 'FEFILT0.CHFCSDCOE10S.SET1CSDCOEFF4S', 'read-write',
u"",
15, 4)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF5S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF5S, self).__init__(register,
'SET1CSDCOEFF5S', 'FEFILT0.CHFCSDCOE10S.SET1CSDCOEFF5S', 'read-write',
u"",
19, 5)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF6S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE10S_SET1CSDCOEFF6S, self).__init__(register,
'SET1CSDCOEFF6S', 'FEFILT0.CHFCSDCOE10S.SET1CSDCOEFF6S', 'read-write',
u"",
24, 5)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE11S_SET1CSDCOEFF7S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE11S_SET1CSDCOEFF7S, self).__init__(register,
'SET1CSDCOEFF7S', 'FEFILT0.CHFCSDCOE11S.SET1CSDCOEFF7S', 'read-write',
u"",
0, 5)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE11S_SET1CSDCOEFF8S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE11S_SET1CSDCOEFF8S, self).__init__(register,
'SET1CSDCOEFF8S', 'FEFILT0.CHFCSDCOE11S.SET1CSDCOEFF8S', 'read-write',
u"",
5, 5)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE11S_SET1CSDCOEFF9S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE11S_SET1CSDCOEFF9S, self).__init__(register,
'SET1CSDCOEFF9S', 'FEFILT0.CHFCSDCOE11S.SET1CSDCOEFF9S', 'read-write',
u"",
10, 5)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE11S_SET1CSDCOEFF10S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE11S_SET1CSDCOEFF10S, self).__init__(register,
'SET1CSDCOEFF10S', 'FEFILT0.CHFCSDCOE11S.SET1CSDCOEFF10S', 'read-write',
u"",
15, 6)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_CHFCSDCOE11S_SET1CSDCOEFF11S(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_CHFCSDCOE11S_SET1CSDCOEFF11S, self).__init__(register,
'SET1CSDCOEFF11S', 'FEFILT0.CHFCSDCOE11S.SET1CSDCOEFF11S', 'read-write',
u"",
21, 6)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DIGMIXCTRL_DIGMIXFREQ(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DIGMIXCTRL_DIGMIXFREQ, self).__init__(register,
'DIGMIXFREQ', 'FEFILT0.DIGMIXCTRL.DIGMIXFREQ', 'read-write',
u"",
0, 20)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DIGMIXCTRL_DIGIQSWAPEN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DIGMIXCTRL_DIGIQSWAPEN, self).__init__(register,
'DIGIQSWAPEN', 'FEFILT0.DIGMIXCTRL.DIGIQSWAPEN', 'read-write',
u"",
20, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DIGMIXCTRL_MIXERCONJ(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DIGMIXCTRL_MIXERCONJ, self).__init__(register,
'MIXERCONJ', 'FEFILT0.DIGMIXCTRL.MIXERCONJ', 'read-write',
u"",
21, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DIGMIXCTRL_DIGMIXFBENABLE(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DIGMIXCTRL_DIGMIXFBENABLE, self).__init__(register,
'DIGMIXFBENABLE', 'FEFILT0.DIGMIXCTRL.DIGMIXFBENABLE', 'read-write',
u"",
22, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DIGMIXCTRL_ZIFMODEENABLE(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DIGMIXCTRL_ZIFMODEENABLE, self).__init__(register,
'ZIFMODEENABLE', 'FEFILT0.DIGMIXCTRL.ZIFMODEENABLE', 'read-write',
u"",
23, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMP_DCESTIEN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMP_DCESTIEN, self).__init__(register,
'DCESTIEN', 'FEFILT0.DCCOMP.DCESTIEN', 'read-write',
u"",
0, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMP_DCCOMPEN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMP_DCCOMPEN, self).__init__(register,
'DCCOMPEN', 'FEFILT0.DCCOMP.DCCOMPEN', 'read-write',
u"",
1, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMP_DCRSTEN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMP_DCRSTEN, self).__init__(register,
'DCRSTEN', 'FEFILT0.DCCOMP.DCRSTEN', 'read-write',
u"",
2, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMP_DCCOMPFREEZE(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMP_DCCOMPFREEZE, self).__init__(register,
'DCCOMPFREEZE', 'FEFILT0.DCCOMP.DCCOMPFREEZE', 'read-write',
u"",
3, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMP_DCCOMPGEAR(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMP_DCCOMPGEAR, self).__init__(register,
'DCCOMPGEAR', 'FEFILT0.DCCOMP.DCCOMPGEAR', 'read-write',
u"",
4, 3)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMP_DCLIMIT(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMP_DCLIMIT, self).__init__(register,
'DCLIMIT', 'FEFILT0.DCCOMP.DCLIMIT', 'read-write',
u"",
8, 2)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMP_DCGAINGEAREN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMP_DCGAINGEAREN, self).__init__(register,
'DCGAINGEAREN', 'FEFILT0.DCCOMP.DCGAINGEAREN', 'read-write',
u"",
10, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMP_DCGAINGEAR(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMP_DCGAINGEAR, self).__init__(register,
'DCGAINGEAR', 'FEFILT0.DCCOMP.DCGAINGEAR', 'read-write',
u"",
11, 4)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMP_DCGAINGEARSMPS(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMP_DCGAINGEARSMPS, self).__init__(register,
'DCGAINGEARSMPS', 'FEFILT0.DCCOMP.DCGAINGEARSMPS', 'read-write',
u"",
15, 8)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMPFILTINIT_DCCOMPINITVALI(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMPFILTINIT_DCCOMPINITVALI, self).__init__(register,
'DCCOMPINITVALI', 'FEFILT0.DCCOMPFILTINIT.DCCOMPINITVALI', 'read-write',
u"",
0, 15)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMPFILTINIT_DCCOMPINITVALQ(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMPFILTINIT_DCCOMPINITVALQ, self).__init__(register,
'DCCOMPINITVALQ', 'FEFILT0.DCCOMPFILTINIT.DCCOMPINITVALQ', 'read-write',
u"",
15, 15)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCCOMPFILTINIT_DCCOMPINIT(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCCOMPFILTINIT_DCCOMPINIT, self).__init__(register,
'DCCOMPINIT', 'FEFILT0.DCCOMPFILTINIT.DCCOMPINIT', 'read-write',
u"",
30, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCESTI_DCCOMPESTIVALI(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCESTI_DCCOMPESTIVALI, self).__init__(register,
'DCCOMPESTIVALI', 'FEFILT0.DCESTI.DCCOMPESTIVALI', 'read-only',
u"",
0, 15)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_DCESTI_DCCOMPESTIVALQ(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_DCESTI_DCCOMPESTIVALQ, self).__init__(register,
'DCCOMPESTIVALQ', 'FEFILT0.DCESTI.DCCOMPESTIVALQ', 'read-only',
u"",
15, 15)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCAL_IRCALEN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCAL_IRCALEN, self).__init__(register,
'IRCALEN', 'FEFILT0.IRCAL.IRCALEN', 'read-write',
u"",
0, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCAL_MURSHF(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCAL_MURSHF, self).__init__(register,
'MURSHF', 'FEFILT0.IRCAL.MURSHF', 'read-write',
u"",
1, 5)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCAL_MUISHF(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCAL_MUISHF, self).__init__(register,
'MUISHF', 'FEFILT0.IRCAL.MUISHF', 'read-write',
u"",
7, 6)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCAL_IRCORREN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCAL_IRCORREN, self).__init__(register,
'IRCORREN', 'FEFILT0.IRCAL.IRCORREN', 'read-write',
u"",
13, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCAL_IRCALCOEFRSTCMD(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCAL_IRCALCOEFRSTCMD, self).__init__(register,
'IRCALCOEFRSTCMD', 'FEFILT0.IRCAL.IRCALCOEFRSTCMD', 'write-only',
u"",
14, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCAL_IRCALIFADCDBG(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCAL_IRCALIFADCDBG, self).__init__(register,
'IRCALIFADCDBG', 'FEFILT0.IRCAL.IRCALIFADCDBG', 'read-write',
u"",
15, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCALCOEF_CRV(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCALCOEF_CRV, self).__init__(register,
'CRV', 'FEFILT0.IRCALCOEF.CRV', 'read-only',
u"",
0, 15)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCALCOEF_CIV(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCALCOEF_CIV, self).__init__(register,
'CIV', 'FEFILT0.IRCALCOEF.CIV', 'read-only',
u"",
16, 15)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCALCOEFWR0_CRVWD(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCALCOEFWR0_CRVWD, self).__init__(register,
'CRVWD', 'FEFILT0.IRCALCOEFWR0.CRVWD', 'read-write',
u"",
0, 15)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCALCOEFWR0_CRVWEN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCALCOEFWR0_CRVWEN, self).__init__(register,
'CRVWEN', 'FEFILT0.IRCALCOEFWR0.CRVWEN', 'read-write',
u"",
15, 1)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCALCOEFWR0_CIVWD(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCALCOEFWR0_CIVWD, self).__init__(register,
'CIVWD', 'FEFILT0.IRCALCOEFWR0.CIVWD', 'read-write',
u"",
16, 15)
self.__dict__['zz_frozen'] = True
class RM_Field_FEFILT0_IRCALCOEFWR0_CIVWEN(Base_RM_Field):
def __init__(self, register):
self.__dict__['zz_frozen'] = False
super(RM_Field_FEFILT0_IRCALCOEFWR0_CIVWEN, self).__init__(register,
'CIVWEN', 'FEFILT0.IRCALCOEFWR0.CIVWEN', 'read-write',
u"",
31, 1)
self.__dict__['zz_frozen'] = True
|
"""
base.py
The base agent, which should be inherited by the other agents.
"""
from warnings import warn
class Agent:
__slots__ = {
'training', 'last_score', 'tag'
}
def __init__(self,
training: bool = False,
tag: str = 'base'):
"""
Initialise of base agent which carries the common parameters shared by all agents.
:param training: Boolean indicating if the agent is training or not
:param tag: Unique tag identifying the agent class
"""
self.training: bool = training # Indicates if agent is training or not
self.last_score = None # Previous score of the game, parsed from messenger
self.tag: str = tag # Identifying tag denoting the type of agent
def __str__(self):
return f"Agent()"
def __call__(self, games):
"""
Call the agent to determine the next best action given a certain game state.
:param games: List of games, each in a certain state
:return: Action, which is either 0 (straight), 1 (left), or 2 (right)
"""
raise NotImplementedError
def reset(self, n_envs, sample_game):
"""Reset the agent to prepare for new evaluation."""
self.last_score = [0] * n_envs
def train(self, duration, max_duration):
"""Train the agent."""
warn("Nothing is trained")
return None
def save_model(self, model_name: str = None, epoch: int = None):
"""Save the current model. May be redundant for non-NN models."""
warn("No model saved")
def load_model(self, model_name: str = None, epoch: int = None):
"""Load the current model. May be redundant for non-NN models."""
warn("No model loaded")
|
from mitmproxy import contentviews
from mitmproxy.test import tflow
from mitmproxy.test import tutils
from mitmproxy.test import taddons
from mitmproxy.http import Headers
from ..mitmproxy import tservers
class TestScripts(tservers.MasterTest):
def test_add_header(self, tdata):
with taddons.context() as tctx:
a = tctx.script(tdata.path("../examples/addons/scripting-minimal-example.py"))
f = tflow.tflow()
a.request(f)
assert f.request.headers["myheader"] == "value"
def test_custom_contentviews(self, tdata):
with taddons.context() as tctx:
tctx.script(tdata.path("../examples/addons/contentview.py"))
swapcase = contentviews.get("swapcase")
_, fmt = swapcase(b"<html>Test!</html>")
assert any(b'tEST!' in val[0][1] for val in fmt)
def test_modify_form(self, tdata):
with taddons.context() as tctx:
sc = tctx.script(tdata.path("../examples/addons/http-modify-form.py"))
form_header = Headers(content_type="application/x-www-form-urlencoded")
f = tflow.tflow(req=tutils.treq(headers=form_header))
sc.request(f)
assert f.request.urlencoded_form["mitmproxy"] == "rocks"
f.request.headers["content-type"] = ""
sc.request(f)
assert list(f.request.urlencoded_form.items()) == [("foo", "bar")]
def test_modify_querystring(self, tdata):
with taddons.context() as tctx:
sc = tctx.script(tdata.path("../examples/addons/http-modify-query-string.py"))
f = tflow.tflow(req=tutils.treq(path="/search?q=term"))
sc.request(f)
assert f.request.query["mitmproxy"] == "rocks"
f.request.path = "/"
sc.request(f)
assert f.request.query["mitmproxy"] == "rocks"
def test_redirect_requests(self, tdata):
with taddons.context() as tctx:
sc = tctx.script(tdata.path("../examples/addons/http-redirect-requests.py"))
f = tflow.tflow(req=tutils.treq(host="example.org"))
sc.request(f)
assert f.request.host == "mitmproxy.org"
def test_send_reply_from_proxy(self, tdata):
with taddons.context() as tctx:
sc = tctx.script(tdata.path("../examples/addons/http-reply-from-proxy.py"))
f = tflow.tflow(req=tutils.treq(host="example.com", port=80))
sc.request(f)
assert f.response.content == b"Hello World"
|
"""
*Product Formation* A, B: Type _proves_ A x B: Type
An element here amounts to a construction taking an element of A
to an element of B. In the case of mere propositions, an element
of the function type is a proof of the implication, a mapping
of a warrant for A to a warrant of B. (Corfield 39)
"""
|
import json
import requests
from src.sdk.auth.auth_sdk import login
from src.utils.header_utils import build_headers
def import_topics(site,topics):
headers = build_headers(login(site))
for topic in topics:
# print(topic["name"])
response = requests.post(site["host"] + "import/admin/topic", data=json.dumps(topic),
headers=headers)
if response.status_code==200:
print("import successfully")
def import_pipelines(site,pipelines):
headers = build_headers(login(site))
for pipeline in pipelines:
response = requests.post(site["host"] + "import/admin/pipeline", data=json.dumps(pipeline),
headers=headers)
if response.status_code == 200:
print("import successfully")
def import_users(site,user):
headers = {"Content-Type": "application/json"}
user_site = site.copy()
user_site["username"]="imma-super"
user_site["password"]= "abc1234"
headers = build_headers(login(user_site))
# for user in users:
response = requests.post(site["host"] + "user", data=json.dumps(user),
headers=headers)
if response.status_code == 200:
print("import successfully")
def get_topic_definition_list(site):
headers = build_headers(login(site))
response = requests.get(site["host"] + "topic/all"
, headers=headers)
return response.json()
|
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from users.models import User
class SignUpForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(SignUpForm, self).__init__(*args, **kwargs)
for fieldname in ["email", "password1", "password2"]:
self.fields[fieldname].help_text = None
class Meta:
model = User
fields = ["email", "password1", "password2"]
class LoginForm(AuthenticationForm):
error_messages = {"invalid_login": ("Email or password is incorrect.")}
|
from imageai.Prediction import ImagePrediction
import os
execution_path = os.getcwd()
prediction = ImagePrediction()
prediction.setModelTypeAsSqueezeNet()
prediction.setModelPath(os.path.join(
execution_path, "squeezenet_weights_tf_dim_ordering_tf_kernels.h5"))
prediction.loadModel()
predictions, probabilities = prediction.predictImage(
os.path.join(execution_path, "images/baby.jpg"), result_count=5)
for eachPrediction, eachProbability in zip(predictions, probabilities):
print(eachPrediction, " : ", eachProbability)
|
__all__ = ["LatentDataset", "LatentDataModule"]
from pathlib import Path
from functools import partial
from typing import Tuple, Dict
import pandas as pd
import torch
from dotenv import find_dotenv
from torch.utils.data import Dataset
from src.data.base import DataModule
from src.data.snp import SNPDataset
from src.visualization.projection import PROJECTION_CSV_FILENAME
def find_projection_filepath(name: str, version: str) -> Path:
return (
Path(find_dotenv()).parent / "models" / name / version / PROJECTION_CSV_FILENAME
)
class LatentDataset(Dataset):
def __init__(self, model_name: str, version: str) -> None:
super().__init__()
projection_filepath = find_projection_filepath(model_name, version)
self.samples = pd.read_csv(projection_filepath, index_col=0)
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, ...]:
sample = self.samples.iloc[idx]
return torch.Tensor(sample.values), torch.LongTensor([sample.name])
@property
def num_features(self) -> int:
return self.samples.shape[1]
@property
def num_classes(self) -> int:
return self.samples.index.max() + 1
@property
def idx_to_class(self) -> Dict[int, str]:
return SNPDataset().idx_to_class
class LatentDataModule(DataModule):
"""Latent data module. Provides consistent data loaders for training,
validation, and inference. Each sample consists of a low-dimensional
(latent) representation of the SNP variants in an individual's allele and
an encoded label corresponding to the individual's ancestry.
Attributes
----------
num_features : int
Number of input features
num_classes : int
Number of target classes
idx_to_class : dict
Mapping of index to target class
Parameters
----------
model_name : str
Name of the model used to create the latent representation
version : str
Experiment version of the model
"""
def __init__(
self,
model_name: str,
version: str,
train_size: float = 0.8,
val_size: float = 0.15,
batch_size=32,
num_processes=0,
):
super().__init__(
partial(LatentDataset, model_name=model_name, version=version),
train_size,
val_size,
batch_size,
num_processes,
)
|
from flask import Flask, request
import os
import time
app = Flask(__name__)
app.debug = True
@app.route("/")
def delay():
delay = request.args.get("delay")
if delay:
time.sleep(float(delay))
return "delay app"
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8888)
|
# coding=utf-8
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
class Station(models.Model):
name = models.CharField(max_length=30)
managers = models.ManyToManyField(User, related_name='stations', blank=True)
def __unicode__(self):
return self.name
|
from django.core.management.base import BaseCommand
from pprint import pprint
from faker import Faker
class Command(BaseCommand):
def handle(self, *args, **options):
fake = Faker()
print(fake.name())
print(fake.text())
print('========')
print(fake.word())
pprint(fake.color())
print('END')
|
# Unit tests for P10_21.py
# IMPORTS
from nose.tools import ok_, eq_
from P10_21 import CheckingAccount
class TestCheckingsAccount():
def setup(self):
self.c = CheckingAccount()
def test_is_free(self):
ok_(True, self.c.is_free())
def test_deposit(self):
self.c.deposit(500)
ok_(500, self.c.getBalance())
def test_withdraw(self):
self.c.deposit(500)
self.c.withdraw(200)
ok_(300, self.c.getBalance())
def test_withdraw_fee(self):
self.c.deposit(500)
self.c.withdraw(100)
self.c.withdraw(100)
self.c.withdraw(100)
ok_(300, self.c.getBalance())
self.c.withdraw(100)
eq_(False, self.c.is_free())
ok_(199, self.c.getBalance())
def test_deposit_fee(self):
self.c.deposit(100)
self.c.deposit(100)
self.c.deposit(100)
ok_(300, self.c.getBalance())
self.c.deposit(100)
eq_(False, self.c.is_free())
ok_(399, self.c.getBalance())
|
""" adaptive time stepping for dg shallow water equation model """
from __future__ import division
from __future__ import absolute_import
from mpi4py import MPI
from flooddrake import *
from flooddrake.min_dx import MinDx
import numpy as np
class AdaptiveTimestepping(object):
def __init__(self, V, max_timestep):
self.V = V
self.mesh = self.V.mesh()
self.N = FacetNormal(self.mesh)
self.max_timestep = max_timestep
self.p = self.V.ufl_element().degree()
self.c_w_s = Function(FunctionSpace(self.mesh, 'DG', 0))
# find min cell edge lengths
self.min_lengths = MinDx(self.V.mesh())
g = parameters["flooddrake"]["gravity"]
self.max_wave_speed_kernel_2d = """ const double g=%(gravity)s; float max=-10000000, wave_speed=0; int a=0;
for(int i=0;i<vert_u_cell.dofs;i++){
if (vert_cell[i][0]<=0){
wave_speed=-10000000;
a=a+1;
}
if (vert_cell[i][0]>0){
wave_speed=sqrt(pow((vert_u_cell[i][0]/vert_cell[i][0]),2)+pow((vert_v_cell[i][0]/vert_cell[i][0]),2))+sqrt(g*vert_cell[i][0]);
}
max=fmax(wave_speed,max);
}
if (a==vert_u_cell.dofs){
cell_wave_speed[0][0]=10000000;
}
if (a<vert_u_cell.dofs){
cell_wave_speed[0][0]=cell_lengths[0][0]/max;
}
"""
self.max_wave_speed_kernel_1d = """ const double g=%(gravity)s; float max=-10000000, wave_speed=0; int a=0;
for(int i=0;i<vert_u_cell.dofs;i++){
if (vert_cell[i][0]<=0){
wave_speed=-10000000;
a=a+1;
}
if (vert_cell[i][0]>0){
wave_speed=fabs(vert_u_cell[i][0]/vert_cell[i][0])+sqrt(g*vert_cell[i][0]);
}
max=fmax(wave_speed,max);
}
if (a==vert_u_cell.dofs){
cell_wave_speed[0][0]=10000000;
}
if (a<vert_u_cell.dofs){
cell_wave_speed[0][0]=cell_lengths[0][0]/max;
}
"""
# replace parameter strings
self.max_wave_speed_kernel_1d = self.max_wave_speed_kernel_1d % {"gravity": g}
self.max_wave_speed_kernel_2d = self.max_wave_speed_kernel_2d % {"gravity": g}
super(AdaptiveTimestepping, self).__init__()
def FindTimestep(self, w):
""" Finds the CFL criterion satisfying timestep of the DG flooddrake model
:param w: state vector
"""
if self.V.mesh().geometric_dimension() == 2:
# split functions
h, mu, mv = split(w)
if self.V.mesh().geometric_dimension() == 1:
# split functions
h, mu = split(w)
# find minimum cfl timestep
self.c_w_s.assign(0)
if self.V.mesh().geometric_dimension() == 2:
par_loop(self.max_wave_speed_kernel_2d, dx, {"cell_wave_speed": (self.c_w_s, RW),
"vert_cell": (h, READ),
"vert_u_cell": (mu, READ),
"vert_v_cell": (mv, READ),
"cell_lengths": (self.min_lengths, READ)})
if self.V.mesh().geometric_dimension() == 1:
par_loop(self.max_wave_speed_kernel_1d, dx, {"cell_wave_speed": (self.c_w_s, RW),
"vert_cell": (h, READ),
"vert_u_cell": (mu, READ),
"cell_lengths": (self.min_lengths, READ)})
cfl_timestep = self.c_w_s.comm.allreduce(self.c_w_s.dat.data_ro.min(),
MPI.MIN)
delta_t = (1.0 / ((2.0 * self.p) + 1)) * cfl_timestep
return np.min([delta_t * 0.5, self.max_timestep])
|
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import requests
import pprint
import Period as period
import EnergyTier as et
import xlsxwriter
import os
import pandas as pd
class API:
def __init__(self):
self.URL = "https://api.openei.org/utility_rates"
self.PARAMS = {'version': 5, 'api_key': 'AHNGCVlcEKTgH6fgr1bXiSnneVb00masZcjSgp3I', 'format': 'json',
'getpage': '5b78ba715457a3bf45af0aea', 'limit': 20} # 'address': '302 College Ave, Palo Alto'
self.r = requests.get(url=self.URL, params=self.PARAMS)
self.data = self.r.json()
self.tariff = None
self.energyratestructure = []
self.energyweekdayschedule = []
self.energyweekendschedule = []
self.energy_period_list = []
self.max = None
self.rate = None
self.unit = None
self.adj = None
self.sell = None
self.weekday_date_list = []
self.weekend_date_list = []
self.date_list = []
def print_all(self):
"""
Prints necessary identifying information of all tariffs that show from result page on OpenEI
"""
count = 1
for item in self.data["items"]:
print("---------------------------------------------------", count)
print("Utility.......", item["utility"])
print("Name..........", item["name"])
if "enddate" in item:
print("End Date......", item["enddate"])
if "startdate" in item:
print("Start Date....", item["startdate"])
print("EIA ID........", item["eiaid"])
print("URL...........", item["uri"])
if "description" in item:
print("Description...", item["description"])
print(" ")
count += 1
def reset(self):
"""
Resets tariff's tier values to None; necessary for print_index
"""
self.max = None
self.rate = None
self.unit = None
self.adj = None
self.sell = None
def print_index(self, index):
"""
Establishes all periods and tiers of the tariff using period and tier objects
Args:
index (Int): user input for which tariff they choose
"""
i = index
label = self.data["items"][i - 1]["label"]
params = {'version': 5, 'api_key': 'AHNGCVlcEKTgH6fgr1bXiSnneVb00masZcjSgp3I', 'format': 'json', 'getpage': label, 'detail': 'full'}
r = requests.get(url=self.URL, params=params)
self.tariff = r.json()
if "energyratestructure" in self.tariff["items"][0]:
# print(self.tariff["items"][0]["energyratestructure"])
self.energyratestructure = self.tariff["items"][0]["energyratestructure"]
pcount = 1 # period count
tcount = 1 # tier count
for p in self.energyratestructure:
self.energy_period_list.append(period.Period(pcount))
for i in p:
if "max" in i:
self.max = i["max"]
if "rate" in i:
self.rate = i["rate"]
if "unit" in i:
self.unit = i["unit"]
if "adjustment" in i:
self.adj = i["adjustment"]
if "sell" in i:
self.sell = i["sell"]
self.energy_period_list[pcount - 1].add(et.Tier(tcount, self.max, self.rate, self.unit, self.adj, self.sell))
tcount += 1
self.reset()
tcount = 1
pcount += 1
def print_energy_structure(self):
"""
Prints energy structure, month and hour schedule of when every period is active, to terminal
"""
pprint.pprint(self.tariff)
if not self.energy_period_list: # if list is empty it is not printed
pass
else:
print(" ")
print("Tiered Energy Usage Charge Structure")
for period in self.energy_period_list:
print(" ")
period.tostring()
for tier in period.tier_list:
tier.tostring()
print(" ")
self.energyweekdayschedule = self.tariff["items"][0]["energyweekdayschedule"]
self.energyweekendschedule = self.tariff["items"][0]["energyweekendschedule"]
for year in self.energyweekdayschedule:
count = 0
for month in year:
year[count] = month + 1
count += 1
print(year)
print('=----------------------------------------------------------------------=')
for year in self.energyweekendschedule:
count = 0
for month in year:
year[count] = month + 1
count += 1
print(year)
def calendar(self):
"""
Makes an excel file with three spreadsheets: weekday schedule, weekend schedule, and the rates of each period
"""
# create three workbook with three worksheets
workbook = xlsxwriter.Workbook('calendar.xlsx')
wksht_weekday = workbook.add_worksheet(name="Weekday")
wksht_weekend = workbook.add_worksheet(name="Weekend")
wksht_rates = workbook.add_worksheet(name="Rates")
hours = [12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# conditional formatting that changes the format of a cell based on number value
# yellow
yellow = workbook.add_format()
yellow.set_align('center')
yellow.set_bg_color('yellow')
yellow.set_bold()
yellow.set_font_color('black')
yellow.set_border(1)
yellow.set_border_color('white')
cond_yellow = wksht_weekday.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 1, 'format': yellow})
cond_yellow = wksht_weekend.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 1, 'format': yellow})
# blue
blue = workbook.add_format()
blue.set_align('center')
blue.set_bg_color('blue')
blue.set_bold()
blue.set_font_color('white')
blue.set_border(1)
blue.set_border_color('white')
cond_blue = wksht_weekday.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 2, 'format': blue})
cond_blue = wksht_weekend.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 2, 'format': blue})
# green
green = workbook.add_format()
green.set_align('center')
green.set_bg_color('green')
green.set_bold()
green.set_font_color('white')
green.set_border(1)
green.set_border_color('white')
cond_green = wksht_weekday.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 3, 'format': green})
cond_green = wksht_weekend.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 3, 'format': green})
# red
red = workbook.add_format()
red.set_align('center')
red.set_bg_color('red')
red.set_bold()
red.set_font_color('black')
red.set_border(1)
red.set_border_color('white')
cond_red = wksht_weekday.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 4, 'format': red})
cond_red = wksht_weekend.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 4, 'format': red})
# purple
purple = workbook.add_format()
purple.set_align('center')
purple.set_bg_color('purple')
purple.set_bold()
purple.set_font_color('white')
purple.set_border(1)
purple.set_border_color('white')
cond_purple = wksht_weekday.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 5, 'format': purple})
cond_purple = wksht_weekend.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 5, 'format': purple})
# lime
lime = workbook.add_format()
lime.set_align('center')
lime.set_bg_color('lime')
lime.set_bold()
lime.set_font_color('black')
lime.set_border(1)
lime.set_border_color('white')
cond_lime = wksht_weekday.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 6, 'format': lime})
cond_lime = wksht_weekend.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '=', 'value': 6, 'format': lime})
# else
center = workbook.add_format()
center.set_align('center')
cond_else = wksht_weekday.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '>', 'value': 6, 'format': center})
cond_else = wksht_weekend.conditional_format(1, 1, 12, 24, {'type': 'cell', 'criteria': '>', 'value': 6, 'format': center})
# -------------------- weekday --------------------
# write hours in header
for i in range(len(hours)):
wksht_weekday.write(0, i+1, hours[i])
wksht_weekday.set_column(1, 24, 3.14, center)
# write months in first column
for i in range(len(months)):
wksht_weekday.write(i+1, 0, months[i])
wksht_weekday.set_column(0, 0, 4, center)
# write all periods conditional formatting in weekday schedule
x = 0
y = 0
for month in self.energyweekdayschedule:
for hour in month:
if hour == 1:
wksht_weekday.write(1 + y, 1 + x, hour, cond_yellow)
elif hour == 2:
wksht_weekday.write(1 + y, 1 + x, hour, cond_blue)
elif hour == 3:
wksht_weekday.write(1 + y, 1 + x, hour, cond_green)
elif hour == 4:
wksht_weekday.write(1 + y, 1 + x, hour, cond_red)
elif hour == 5:
wksht_weekday.write(1 + y, 1 + x, hour, cond_purple)
elif hour == 6:
wksht_weekday.write(1 + y, 1 + x, hour, cond_lime)
else:
wksht_weekday.write(1 + y, 1 + x, hour, cond_else)
x += 1
x = 0
y += 1
# -------------------- weekend --------------------
# write hours in header
for i in range(len(hours)):
wksht_weekend.write(0, i+1, hours[i])
wksht_weekend.set_column(1, 24, 3.14, center)
# write months in first column
for i in range(len(months)):
wksht_weekend.write(i+1, 0, months[i])
wksht_weekend.set_column(0, 0, 4, center)
# write all periods with conditional formatting in weekend schedule
x = 0
y = 0
for month in self.energyweekendschedule:
for hour in month:
if hour == 1:
wksht_weekend.write(1 + y, 1 + x, hour, cond_yellow)
elif hour == 2:
wksht_weekend.write(1 + y, 1 + x, hour, cond_blue)
elif hour == 3:
wksht_weekend.write(1 + y, 1 + x, hour, cond_green)
elif hour == 4:
wksht_weekend.write(1 + y, 1 + x, hour, cond_red)
elif hour == 5:
wksht_weekend.write(1 + y, 1 + x, hour, cond_purple)
elif hour == 6:
wksht_weekend.write(1 + y, 1 + x, hour, cond_lime)
else:
wksht_weekend.write(1 + y, 1 + x, hour, cond_else)
x += 1
x = 0
y += 1
# -------------------- rates --------------------
# write period and tiers in header
header = ['Period', 'Tier 1', 'Tier 2', 'Tier 3', 'Tier 4', 'Tier 5', 'Tier 6', 'Tier 7', 'Tier 8']
for i in range(len(header)):
wksht_rates.write(0, i, header[i])
wksht_rates.set_column(0, 0, 6.14, center)
wksht_rates.set_column(1, 8, 8.3, center)
# write period number and subsequent tier rates
period_number = 1
count = 0
for period in self.energy_period_list:
wksht_rates.write(period_number, 0, period_number)
for tier in period.tier_list:
wksht_rates.write(period_number, 1 + count, tier.get_rate())
count += 1
count = 0
period_number += 1
workbook.close()
def read_calendar(self):
"""
After user confirms their excel workbook is complete, each sheet is turned into a data frame
"""
print(" ")
file = "calendar.xlsx"
print("DF_WEEKDAY")
df_weekday = pd.read_excel(file, sheet_name="Weekday")
print(df_weekday)
print(" ")
print("DF_WEEKEND")
df_weekend = pd.read_excel(file, sheet_name="Weekend")
print(df_weekend)
print(" ")
print("DF_RATES")
df_rates = pd.read_excel(file, sheet_name="Rates")
print(df_rates)
print(" ")
def run(self):
"""
Runs the program utilizing the functions
"""
self.print_all()
i = int(input("Which tariff would you like to use?..."))
self.print_index(i)
self.print_energy_structure()
self.calendar()
file = "calendar.xlsx"
os.startfile(file)
response = input("Type 'ready' when you are done editing the excel file...")
while response != "ready":
response = input("Type 'ready' when you are done editing the excel file...")
self.read_calendar()
def main():
api = API()
api.run()
if __name__ == "__main__": main() |
from django.shortcuts import render, redirect, HttpResponse
from .forms import UserForm, LoginForm
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
# Create your views here.
def signup(request):
if request.method == "POST":
form = UserForm(request.POST)
if form.is_valid():
new_user = User.objects.create_user(**form.cleaned_data)
login(request, new_user)
return redirect('home')
else:
form = UserForm()
return render(request, 'signup.html', {'form': form})
def signin(request):
if request.method == "POST":
form = LoginForm(request.POST)
username = request.POST['username']
password = request.POST['password']
user = authenticate(username = username, password = password)
print(username)
if user is not None:
login(request, user)
return redirect('home')
else:
return HttpResponse('로그인 실패. 다시 시도 해보세요.')
else:
form = LoginForm()
return render(request, 'signin.html', {'form':form}) |
from chatbotapp.cnudata.cafeteria.studenthall1_info import *
# from chatbotapp.cnudata.studenthall2_info import make_answer_food_menu
from chatbotapp.cnudata.cafeteria.food_court_time import *
from chatbotapp.cnudata.cafeteria.dorm_info import *
from chatbotapp.cnudata.cafeteria.new_studenthall2_info import *
def get_entire_cafeteria_answer():
response_text = "\n충남대학교 학식 정보\n"
answer = insert_text(response_text)
reply = make_reply("기숙사식당", "기숙사식당")
answer = insert_replies(answer, reply)
reply = make_reply("제1학생회관", "제1학생회관")
answer = insert_replies(answer, reply)
reply = make_reply("제2학생회관", "제2학생회관")
answer = insert_replies(answer, reply)
reply = make_reply("제3학생회관", "제3학생회관")
answer = insert_replies(answer, reply)
# reply = make_reply("🌼 제4학생회관", "제4학생회관")
# answer = insert_replies(answer, reply)
# reply = make_reply("🌼 생활과학대학", "생활과학대학")
# answer = insert_replies(answer, reply)
return answer
def get_studenthall1_answer():
answer = category()
return answer
def get_ramen_answer():
answer = ramen()
return answer
def get_gansik_answer():
answer = gansik()
return answer
def get_america_answer():
answer = america()
return answer
def get_snack_answer():
answer = snack()
return answer
def get_korea_answer():
answer = korea()
return answer
def get_japan_answer():
answer = japan()
return answer
def get_china_answer():
answer = china()
return answer
# def get_studenthall2345_answer(name):
# response_text = f"\n😋 충남대학교 {name} 메뉴 😋 \n"
# response_text += make_answer_food_menu(name)
# answer = insert_text(response_text)
# reply = make_reply("다른 식당 메뉴보기", "학식")
# answer = insert_replies(answer, reply)
#
# return answer
def get_studenthall23_answer(name):
answer = get_studenthall23_answer_info(name)
return answer
def get_entire_time():
answer = entire_time()
return answer
def get_ramen_time():
answer = ramen_time()
return answer
def get_gansik_time():
answer = gansik_time()
return answer
def get_america_time():
answer = america_time()
return answer
def get_snack_time():
answer = snack_time()
return answer
def get_korea_time():
answer = korea_time()
return answer
def get_japan_time():
answer = japan_time()
return answer
def get_china_time():
answer = china_time()
return answer
def get_entire_dorm():
answer = dorm_time()
return answer
# def monday_dorm():
# answer = monday()
# return answer
#
# def tuesday_dorm():
# answer = tuesday()
# return answer
#
# def wednesday_dorm():
# answer = wednesday()
# return answer
#
# def thursday_dorm():
# answer = thursday()
# return answer
#
# def friday_dorm():
# answer = friday()
# return answer
#
# def saturday_dorm():
# answer = saturday()
# return answer
#
# def sunday_dorm():
# answer = sunday()
# return answer
#
# def today_dorm():
# pass
def day_of_week_dorm(the_day_of_week_number):
if Weekday.MONDAY.value == the_day_of_week_number:
answer = day_of_week("MONDAY")
if Weekday.TUESDAY.value == the_day_of_week_number:
answer = day_of_week("TUESDAY")
if Weekday.WEDNESDAY.value == the_day_of_week_number:
answer = day_of_week("WEDNESDAY")
if Weekday.THURSDAY.value == the_day_of_week_number:
answer = day_of_week("THURSDAY")
if Weekday.FRIDAY.value == the_day_of_week_number:
answer = day_of_week("FRIDAY")
if Weekday.SATURDAY.value == the_day_of_week_number:
answer = day_of_week("SATURDAY")
if Weekday.SUNDAY.value == the_day_of_week_number:
answer = day_of_week("SUNDAY")
return answer
# def get_monday_breakfast_menu():
# text = monday_dorm_menu("breakfast")
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "월요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# def get_monday_lunch_menu():
# text = monday_dorm_menu("lunch")
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "월요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# def get_monday_dinner_menu():
# text = monday_dorm_menu("dinner")
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "월요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
def get_entire_menu(when, the_day_of_week_number):
# if Weekday.MONDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "월요일기숙사식당")
# if Weekday.TUESDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "화요일기숙사식당")
# if Weekday.WEDNESDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "수요일기숙사식당")
# if Weekday.THURSDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "목요일기숙사식당")
# if Weekday.FRIDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "금요일기숙사식당")
# if Weekday.SATURDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "토요일기숙사식당")
# if Weekday.SUNDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "일요일기숙사식당")
# text = dorm_menu(when, the_day_of_week_number) 원래 이거였는데 , 3가지 다 한꺼번에 나오도록
text = dorm_menu("breakfast",the_day_of_week_number)
text += "\n"
text += dorm_menu("lunch", the_day_of_week_number)
text += "\n"
text += dorm_menu("dinner", the_day_of_week_number)
answer = insert_text(text)
# answer = insert_replies(answer,reply)
reply = make_reply("다른식당보기", "학식")
answer = insert_replies(answer, reply)
reply = make_reply("다른요일보기", "기숙사식당")
answer = insert_replies(answer, reply)
return answer
# print(get_entire_menu("breakfast",1))
#
# def get_monday_menu(when):
# text = monday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른시간보기", "월요일기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
#
#
# return answer
#
# def get_tuesday_menu(when):
# text = tuesday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "화요일기숙사식당")
# answer = insert_replies(answer, reply)
# return answer
# # def get_tuesday_breakfast_menu():
# # text = tuesday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "화요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_tuesday_lunch_menu():
# # text = tuesday_dorm_menu("lunch")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "화요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_tuesday_dinner_menu():
# # text = tuesday_dorm_menu("dinner")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "화요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
#
# def get_wednesday_menu(when):
# text = wednesday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "수요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# # def get_wednesday_breakfast_menu():
# # text = wednesday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "수요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_wednesday_lunch_menu():
# # text = wednesday_dorm_menu("lunch")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "수요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_wednesday_dinner_menu():
# # text = wednesday_dorm_menu("dinner")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "수요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
#
# def get_thursday_menu(when):
# text = thursday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "목요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# # def get_thursday_breakfast_menu():
# # text = thursday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "목요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_thursday_lunch_menu():
# # text = thursday_dorm_menu("lunch")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "목요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_thursday_dinner_menu():
# # text = thursday_dorm_menu("dinner")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "목요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
#
# def get_friday_menu(when):
# text = friday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "금요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
# # def get_friday_breakfast_menu():
# # text = friday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "금요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_friday_lunch_menu():
# # text = friday_dorm_menu("lunch")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "금요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_friday_dinner_menu():
# # text = friday_dorm_menu("dinner")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "금요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
#
#
# def get_saturday_menu(when):
# text = saturday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "토요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# # def get_saturday_breakfast_menu():
# # text = saturday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "토요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_saturday_lunch_menu():
# # text = saturday_dorm_menu("lunch")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "토요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_saturday_dinner_menu():
# # text = saturday_dorm_menu("dinner")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "토요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_sunday_breakfast_menu():
# # text = sunday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "일요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
#
# def get_sunday_menu(when):
# text = sunday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "일요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
# def get_sunday_lunch_menu():
# text = sunday_dorm_menu("lunch")
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "일요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# def get_sunday_dinner_menu():
# text = sunday_dorm_menu("dinner")
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "일요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Sets are mutable. But since they are unordered, indexing have no meaning.
# there is no way of determining which item will be popped
# set initial way
myword = set('Hello World')
mybasket = {'orange', 'apple', 'pear', 'orange', 'banana', 'apple'}
mynumber = {500,99,900,2,34,5,77,8}
# unorder word
print(myword)
# unordered collection with no duplicate elements
print(mybasket)
# adding second time
# show no duplicate
mynumber.add(77)
print(mynumber)
# cast list to set
mylist = [5,2,2,2,3,1,1,1,3]
print(set(mylist)) |
"""
8-8 code
"""
# def squsum(*param):
# sum = 0
# for i in param:
# sum += i*i
# print(sum)
#
#
# squsum(1, 2, 3)
def city_temp(**param):
for key,value in param.items():
print(key, ':', value)
# city_temp(bj='32c', xm='23c', sh='31c')
a = {'bj': '32c', 'xm': '23c', 'sh': '31c'}
city_temp(**a) |
# Copyright (c) 2018 Trail of Bits, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import io
import collections
import struct
from binaryninja import *
from binja_var_recovery.util import *
LOPS = LowLevelILOperation
MOPS = MediumLevelILOperation
VariableSet = set()
class Value(object):
def __init__(self, bv, addr, base = None):
self.bv = bv
self.address = addr
self._has_xrefs = has_xrefs(bv, addr)
self._base = base if base else addr
@property
def base_address(self):
return self._base
@property
def size(self):
if self.address > self._base:
return self.address - self._base
else:
return self._base - self.address
def __str__(self):
return "<{:x} - {:x}>".format(self.address, self._base)
def __repr__(self):
return "<{:x} - {:x}>".format(self.address, self._base)
def __sub__(self, other):
if self._has_xrefs:
return Value(self.bv, self.address - other.address, self._base)
else:
return Value(self.bv, self.address - other.address, other._base)
def __add__(self, other):
if self._has_xrefs:
return Value(self.bv, self.address + other.address, self._base)
else:
return Value(self.bv, self.address + other.address, other._base)
def __lshift__(self, other):
if isinstance(other, Value):
return Value(self.bv, self.address << other.address, self._base)
else:
return self
def __rshift__(self, other):
if isinstance(other, Value):
return Value(self.bv, self.address >> other.address, self._base)
else:
return self
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
if isinstance(self, Value):
return (self.__repr__() == other.__repr__())
else:
return False
def __ne__(self, other):
return not self.__eq__(self)
class SymbolicValue(Value):
def __init__(self, bv, sym_value, offset=0):
super(SymbolicValue, self).__init__(bv, 0)
self.sym_value = sym_value
self.offset = offset
@property
def base_address(self):
return 0
@property
def size(self):
return 0
def __str__(self):
return "<{}, {}>".format(self.sym_value, self.offset)
def __repr__(self):
return "<{} {}>".format(self.sym_value, self.offset)
def __add__(self, other):
if isinstance(other, Value):
return SymbolicValue(self.bv, self.sym_value, self.offset + other.address)
else:
return SymbolicValue(self.bv, self.sym_value + " + " + other.sym_value, other.offset)
def __sub__(self, other):
if isinstance(other, Value):
return SymbolicValue(self.bv, self.sym_value, self.offset - other.address)
else:
return SymbolicValue(self.bv, self.sym_value + " - " + other.sym_value, other.offset)
def __lshift__(self, other):
if isinstance(other, Value):
return SymbolicValue(self.bv, self.sym_value + " << " + "{}".format(hex(other.address)), self.offset)
else:
return self
def __rshift__(self, other):
if isinstance(other, Value):
return SymbolicValue(self.bv, self.sym_value + " >> " + "{}".format(hex(other.address)), self.offset)
else:
return self
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
if isinstance(self, SymbolicValue):
return (self.__repr__() == other.__repr__())
else:
return False
def __ne__(self, other):
return not self.__eq__(self)
def is_subset(s1, s2):
for item in s2:
if item not in s1:
return False
return True
def is_long(bv, addr):
return isinstance(addr, long)
def is_values(bv, obj):
return isinstance(obj, Values)
def is_sym_values(bv, obj):
return isinstance(obj, SymbolicValue)
def is_mlil(bv, insn):
return isinstance(insn, MediumLevelILInstruction)
def is_llil(bv, insn):
return isinstance(insn, LowLevelILInstruction)
def is_reg_ssa(bv, reg):
return isinstance(reg, SSARegister)
def is_call(bv, insn):
if is_llil(bv, insn):
return insn.operation == LOPS.LLIL_CALL or \
insn.operation == LOPS.LLIL_CALL_STACK_ADJUST or \
insn.operation == LOPS.LLIL_CALL_SSA
return False
def is_load(bv, insn):
return insn.operation == LOPS.LLIL_LOAD_SSA or \
insn.operation == LOPS.LLIL_LOAD
def is_store(bv, insn):
return insn.operation == LOPS.LLIL_STORE_SSA or \
insn.operation == LOPS.LLIL_STORE
def is_constant(bv, insn):
""" Check if the operand is constant"""
if is_mlil(bv, insn):
return insn.operation == MOPS.MLIL_CONST or \
insn.operation == MOPS.MLIL_CONST_PTR
elif is_llil(bv, insn):
return insn.operation == LOPS.LLIL_CONST or \
insn.operation == LOPS.LLIL_CONST_PTR
def ssa_reg_name(ssa_reg):
return "{}#{}".format(ssa_reg.reg, ssa_reg.version)
def global_reg_name(func, ssa_reg):
return "{}_{}".format(func.name, ssa_reg_name(ssa_reg))
def get_constant(bv, insn):
""" Get the constant value of the operand """
return insn.constant
def is_register(bv, insn):
if is_mlil(bv, insn):
return insn.operation == MOPS.MLIL_REG or \
insn.operation == MOPS.MLIL_REG_SSA
elif is_llil(bv, insn):
return insn.operation == LOPS.LLIL_REG or \
insn.operation == LOPS.LLIL_REG_SSA
def is_address(bv, insn):
if insn.operation == LowLevelILOperation.LLIL_SET_REG or \
insn.operation == LowLevelILOperation.LLIL_SET_REG_SSA:
return is_constant(bv, insn.src)
return False
def call_target(bv, insn):
if is_llil(bv, insn) and is_constant(bv, insn.dest):
return insn.dest.constant
else:
return None
def call_params(bv, insn):
if is_llil(bv, insn):
for op in insn.operands:
if op.operation == LOPS.LLIL_CALL_PARAM:
return op.src
return None
def get_call_params(bv, insn):
for pparam in call_params(bv, insn):
yield pparam
def get_call_output(bv, insn):
if is_llil(bv, insn):
for op in insn.operands:
if op.operation == LOPS.LLIL_CALL_OUTPUT_SSA:
for reg in op.dest:
yield reg
def is_stack_op(bv, expr):
for opnd in expr.operands:
if isinstance(opnd, SSARegister):
if repr(opnd.reg) in ["rsp", "rbp"]:
return True
if is_llil(bv, opnd) and is_register(bv, opnd):
reg_name = repr(opnd.src.reg)
if reg_name in ["rsp", "rbp"]:
return True
return False
def is_arith_op(bv, insn):
postfix = insn.postfix_operands
DEBUG("insn {} ->postfix {}".format(insn, postfix))
def get_exec_sections(bv):
for k in bv.sections:
v = bv.sections[k]
if bv.is_offset_executable(v.start):
yield v
def dw(bv, addr, end):
if end - addr < 4:
return None
return struct.unpack('<L', bv.read(addr, 4))[0]
def qw(bv, addr, end):
if end - addr < 8:
return None
return struct.unpack('<Q', bv.read(addr, 8))[0]
def dw_data(data, offset, length):
if length - offset < 4:
return None
return struct.unpack('<L', data[offset:offset+4])[0]
def qw_data(data, offset, length):
if length - offset < 8:
return None
return struct.unpack('<Q', data[offset:offset+8])[0]
def search_riprel_data(addr, start, data):
datalen = len(data)
x64 = 0
offset = 0
while offset < datalen:
cur_addr = start + offset
opcode = data[offset]
# 5 byte instruction
operand_idx = offset + 1
opend = start + operand_idx + 4
reladdr = (addr - opend) & 0xffffffff
if (reladdr == dw_data(data, operand_idx, datalen)
and (opcode == '\xe8' or opcode == '\xe9')
and reladdr != 0):
yield cur_addr
# 6 byte instruction
operand_idx = offset + 2
opend = start + operand_idx + 4
reladdr = (addr - opend) & 0xffffffff
if (reladdr == dw_data(data, operand_idx, datalen)
and offset != x64
and data[offset+1] != '\xe8'
and data[offset+1] != '\xe9'
and reladdr != 0):
yield cur_addr
# 7 byte instruction
operand_idx = offset + 3
opend = start + operand_idx + 4
reladdr = (addr - opend) & 0xffffffff
if (reladdr == dw_data(data, operand_idx, datalen)
and data[offset] == '\x48'
and reladdr != 0):
# 64 bit register
x64 = offset + 1
yield cur_addr
# 10 byte instruction
operand_idx = offset + 2
opend = start + operand_idx + 8
reladdr = (addr - opend) & 0xffffffff
if (addr == qw_data(data, operand_idx, datalen)
and data[offset] == '\x48'
and ord(data[offset+1])&0xF8 == 0xb8):
# 64 bit register
x64 = offset + 1
yield cur_addr
offset += 1
def xrefs(bv, addr):
for s in get_exec_sections(bv):
length = s.end - s.start
data = bv.read(s.start, length)
for x in search_riprel_data(addr, s.start, data):
yield(x)
def find_xrefs(bv, addr):
DEBUG("[-] searching for reference to {:08X}".format(addr))
refs = []
for x in xrefs(bv, addr):
refs.append(x)
DEBUG("xrefs {:x}".format(x))
if (len(refs) == 0):
DEBUG("could not find references to {:08X}".format(addr))
return refs
def has_xrefs(bv, addr):
code_xrefs = bv.get_code_refs(addr)
data_xrefs = bv.get_data_refs(addr)
if len(code_xrefs) or len(data_xrefs):
return True
return False
def get_memory_version(bv, insn):
""" Get the version of the ssa memory for the `MediumLevelILInstruction`. For
the `LowLevelILInstruction` it finds out if the insn is memory operation
"""
if is_mlil(bv, insn):
if insn.ssa_memory_version > 0:
return insn.ssa_memory_version
elif "mem#0" in str(insn):
return 0
elif is_llil(bv, insn):
if "mem#" in str(insn):
return 1
def get_address(bv, insn):
for token in insn.tokens:
if token.type == InstructionTextTokenType.PossibleAddressToken:
return token.value
def has_memory_xrefs(bv, addr):
if addr in VariableSet:
return True
if not is_data_variable_section(bv, addr):
return False
# if there is any reference to data section return false
# Not handling such cases
for ref in bv.get_data_refs(addr):
return False
dv_refs = list()
dv_func_set = set()
dv = bv.get_data_var_at(addr)
prev_dv = bv.get_previous_data_var_before(addr)
for ref in bv.get_code_refs(addr):
dv_func_set.add(ref.function.start)
llil = ref.function.get_low_level_il_at(ref.address)
if llil:
DEBUG("VariableAnalysis: {:x} - {:x} {}".format(addr, ref.address, llil.ssa_form))
dv_refs.append(llil.ssa_form)
for ins in dv_refs:
if get_memory_version(bv, ins) is None:
return False
if is_call(bv, ins):
return False
if get_address(bv, ins) != addr:
return False
prev_dv_refs = list()
prev_dv_func_set = set()
if prev_dv != None:
for ref in bv.get_code_refs(prev_dv):
prev_dv_func_set.add(ref.function.start)
llil = ref.function.get_low_level_il_at(ref.address)
prev_dv_refs.append(llil)
for ins in prev_dv_refs:
if ins and (is_call(bv, ins) or is_address(bv, ins)):
return False
if is_subset(prev_dv_func_set, dv_func_set):
return False
VariableSet.add(addr)
return True
def has_address_xrefs(bv, insn, addr):
if addr in VariableSet:
return True
if not is_data_variable_section(bv, addr):
return False
# if there is any reference to data section return True
# Assuming this will be the start address of the synbol
for ref in bv.get_data_refs(addr):
VariableSet.add(addr)
return True
dv_refs = list()
dv_func_set = set()
dv = bv.get_data_var_at(addr)
prev_dv = bv.get_previous_data_var_before(addr)
for ref in bv.get_code_refs(addr):
dv_func_set.add(ref.function.start)
llil = ref.function.get_low_level_il_at(ref.address)
if llil:
DEBUG("AddressAnalysis: {:x} - {:x} {}".format(addr, ref.address, llil.ssa_form))
dv_refs.append(llil.ssa_form)
prev_dv_refs = list()
prev_dv_func_set = set()
if prev_dv != None:
for ref in bv.get_code_refs(prev_dv):
prev_dv_func_set.add(ref.function.start)
llil = ref.function.get_low_level_il_at(ref.address)
prev_dv_refs.append(llil)
if is_subset(prev_dv_func_set, dv_func_set):
return False
for ins in dv_refs:
if is_address(bv, ins) and \
get_address(bv, ins) == addr:
VariableSet.add(addr)
return True
return False |
#!/usr/bin/env python3
import aiohttp
import logging
import sys
logging.basicConfig(
format="%(asctime)s %(levelname)s:%(name)s: %(message)s",
level=logging.DEBUG,
datefmt="%H:%M:%S",
stream=sys.stderr,
)
logger = logging.getLogger("http_fetcher")
logging.getLogger("chardet.charsetprober").disabled = True
async def fetch_html(hub, url: str, session: aiohttp.ClientSession, **kwargs) -> str:
resp = await session.request(method="GET", url=url, **kwargs)
resp.raise_for_status()
logger.debug(f"Got response [{resp.status}] for URL: {url}")
return await resp.text()
|
from .. import config
config.setup_examples()
import infermedica_api
if __name__ == "__main__":
api: infermedica_api.APIv3Connector = infermedica_api.get_api()
age = 38
print("Parse simple text:")
response = api.parse("i feel stomach pain but no coughing today", age=age)
print(response, end="\n\n")
print("Parse simple text and include tokens information:")
response = api.parse(
"i feel stomach pain but no coughing today", age=age, include_tokens=True
)
print(response, end="\n\n")
|
__all__ = ['__version__']
__version__ = '0.1.10'
|
import os
import pymongo
from mongodb.exceptions import MongoURLNotFoundError
__all__ = ("MONGO_CLIENT", "new_mongo_session",)
_url = os.environ.get("MONGO_URL")
if _url is None:
raise MongoURLNotFoundError()
MONGO_CLIENT = pymongo.MongoClient(_url)
def new_mongo_session():
"""
Create a new mongo session and return it.
This should be used along with the ``with`` statement.
>>> with new_mongo_session() as session:
>>> pass
:return: mongo client session
"""
return MONGO_CLIENT.start_session()
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for unit tests."""
from typing import Any, Sequence
def assert_sequence_equals(expected: Sequence[Any], actual: Sequence[Any]):
"""Assert that two sequences contain matching elements."""
assert len(actual) == len(
expected
), f"Sequences have different sizes, expected length {len(expected)} but got legth {len(actual)}"
for index, pair in enumerate(zip(actual, expected)):
actual_item, expected_item = pair
assert (
actual_item == expected_item
), f"Pair {index} is not equal. Expected {expected_item} but got {actual_item}"
def assert_sequence_partial_equals(
expected: Sequence[Any], actual: Sequence[Any], attributes_to_test: Sequence[str]
):
"""Assert that the elements in a sequence match in all attributes defined by ``attributes_to_test``.
The elements in the list can be dicts or namedtuples.
"""
assert len(actual) == len(
expected
), f"Sequences have different sizes, expected length {len(expected)} but got legth {len(actual)}"
for index, pair in enumerate(zip(actual, expected)):
actual_item, expected_item = pair
for attr in attributes_to_test:
if isinstance(actual_item, dict):
actual_value = actual_item.get(attr)
else:
actual_value = getattr(actual_item, attr)
if isinstance(expected_item, dict):
expected_value = expected_item.get(attr)
else:
expected_value = getattr(expected_item, attr)
assert (
expected_value == actual_value
), f"Attribute '{attr}' of pair {index} is not equal ({expected_value}!={actual_value}). Expected {expected_item} but got {actual_item}"
|
from ..factory import Type
class maskPointChin(Type):
pass
|
"""
script to merge or split model parallel checkpoints
This script:
- assumes a checkpoint directory with pipeline parallel checkpoints (i.e. a global_step directory with files named like 'layer_00-model_00-model_states.pt')
- assumes that the checkpoint names haven't been changed, since it makes the script much cleaner. If you've changed the name from the default `layer_nn-model_nn-model_states.pt` pattern - *this script will not work*
- assumes the config files are saved to a subdirectory in the global_step directory
- merges the model parallel files of a single layer (i.e. joins 'layer_00-model_00-model_states.pt' and 'layer_00-model_01-model_states.pt')
- potentially splits the merged checkpoint to a target model parallel size
- does not change pipeline parallel settings; You might want to adjust when reloading a checkpoint.
Examples
```console
# print help
python tools/merge.py --help
# merge the global_step10 checkpoint in the checkpoints directory to checkpoints_merged with output model parallel 1 and pipe parallel 2
python tools/merge.py -d checkpoints -o checkpoints_merged -s 10 -mp 1 -pp 2
# merge the global_step10 checkpoint in the checkpoints directory to checkpoints_merged with output model parallel 4 and pipe parallel 1
python tools/merge.py -d checkpoints -o checkpoints_merged -s 10 -mp 4 -pp 1
```
"""
import re
import os
import yaml
import json
import shutil
import argparse
from typing import List
from pathlib import Path
from collections import defaultdict
from tqdm import tqdm
import torch
def parse_args():
parser = argparse.ArgumentParser(
"Merge or split model parallel groups in a pretrained model"
)
parser.add_argument(
"-d",
"--checkpoint_dir",
default="checkpoints/",
type=str,
help="parent directory in which checkpoints are stored; this is the 'save' parameter of neox args",
)
parser.add_argument(
"-s",
"--global_step",
type=int,
default=None,
help='which global step to edit (each checkpoint dir should contain multiple global steps.) \
defaults to the global step contained in "checkpoint_dir/latest"',
)
parser.add_argument(
"-mp",
"--model_parallel",
type=int,
default=1,
help="number of model parallel partitions in the output",
)
parser.add_argument(
"-pp",
"--pipe_parallel",
type=int,
default=1,
help="number of pipe parallel partitions in the output",
)
parser.add_argument(
"-o",
"--output_dir",
default="checkpoints_merged",
type=str,
help="Where to save the merged model",
)
parser.add_argument(
"-m",
"--parameter_mean",
action="store_true",
help="Take the mean of the duplicated parameters instead of the 0th index",
)
return parser.parse_args()
def replace(d, key, value):
# function to replace a k/v pair in a dict, that's agnostic to the difference between '-' and '_'
k_alt = key.replace("-", "_")
if key in d:
d[key] = value
else:
d[k_alt] = value
def get(d, key, default=None):
# function to get a k/v pair in a dict, that's agnostic to the difference between '-' and '_'
k_alt = key.replace("-", "_")
return d.get(key) or d.get(k_alt, default)
def get_output_config(
checkpoint_dir,
output_dir,
model_parallel_size,
pipe_parallel_size,
remove_zero_optimizer=True,
):
"""
read config files from source directory and change values to match the desired output.
Args:
checkpoint_dir: the original checkpoint directory
model_parallel_size: the output model parallel size
pipe_parallel_size: the output pipe parallel_size
remove_zero_optimizer: remove zero optimizer settings from the output dict.
Not doing so may result in errors due to size mismatches in optimizer states.
"""
# load all config files
result = dict()
for config in (checkpoint_dir / "configs").glob("*.yml"):
with open(config) as f:
data = yaml.full_load(f)
result.update(data)
# update model parallel size dependent on args
orig_mp = get(result, "model-parallel-size")
assert orig_mp is not None
replace(result, "model-parallel-size", model_parallel_size)
replace(result, "pipe-parallel-size", pipe_parallel_size)
# replace load / save directories:
replace(result, "load", str(output_dir))
replace(result, "save", str(output_dir))
# we need to make sure the resulting vocab size is correct
# do this by modifying the 'make_vocab_size_divisible_by' argument to be
# orig * (orig_mp / mp_out)
orig = get(result, "make_vocab_size_divisible_by", 128)
replace(
result,
"make_vocab_size_divisible_by",
int(orig * (orig_mp / model_parallel_size)),
)
replace(
result, "no_load_rng", True
) # need to tell megatron not to try to load past mp rng states
# remove zero optimizer
# Loading a zero optimizer in inference results in an error due to attempted weight load
if remove_zero_optimizer:
if "zero_optimization" in result:
if "stage" in result["zero_optimization"]:
result["zero_optimization"]["stage"] = 0
if "zero-optimization" in result:
if "stage" in result["zero-optimization"]:
result["zero-optimization"]["stage"] = 0
return result, orig_mp
def get_weight_paths_by_layer(weights_dir: Path) -> List[Path]:
# load list of source weight files
paths = list(weights_dir.glob("*.pt"))
# group checkpoint paths by layer index
paths_by_layer = defaultdict(list)
for p in paths:
layer = re.search("layer_(\d+)", p.stem)
if layer is None:
layer = "model_states"
else:
layer = layer.group(1)
paths_by_layer[layer].append(p)
# sort by layer name to have print statements below ordered
result = [(l, ps) for l, ps in paths_by_layer.items()]
result = sorted(result, key=lambda i: i[0])
return result
def load_grouped_weights(weight_paths: List[Path], first_only=False):
"""
Loads a dictinary mapping layer name to a list of weights
"""
# Important! Sort by name (i.e. the model parallel index)
# This guarantees the right order of weights in the merged matrix
weight_paths = sorted(weight_paths, key=lambda i: i.name)
# load checkpoints to cpu + group by layer name
loaded = []
by_module_name = defaultdict(list)
for weight_path in weight_paths:
loaded.append(torch.load(weight_path, map_location="cpu"))
if first_only:
break
for l in loaded:
for k, v in l.items():
by_module_name[k].append(v)
return by_module_name
@torch.no_grad()
def merge_partitions(partitions, partition_dim, stride=1, mp=1):
# Number and size of each partition.
num_partitions = len(partitions)
per_partition_size = None
for partition in partitions:
if per_partition_size is None:
per_partition_size = partition.shape[partition_dim]
else:
assert per_partition_size == partition.size(
partition_dim
), "all partitions should be of equal size"
merged_size = list(partition.shape)
merged_size[partition_dim] *= len(partitions)
merged = torch.zeros(*merged_size).to(partitions[0].dtype)
if stride == 1:
assert (per_partition_size * num_partitions) == merged.size(
partition_dim
), "ERROR: sizes do not match."
# If stride is 1, then do simple concatination.
torch.cat(partitions, dim=partition_dim, out=merged)
if mp == 1:
return merged.unsqueeze(0)
else:
assert (
merged.shape[partition_dim] % mp == 0
), "cannot convert to mp size " + str(mp)
mp_size = merged.shape[partition_dim] // mp
splits = torch.split(merged, mp_size, dim=partition_dim)
assert len(splits) == mp, "got different number of splits than mp"
for i in range(len(splits)):
assert len(splits[i].shape) == len(
merged.shape
), "split has different dimensions than merged"
assert list(splits[0].shape) == list(
splits[i].shape
), "all splits should have equal size"
return splits
else:
# we don't use stride > 1 anywhere rn
raise NotImplementedError
# weights of these layers will not be copied as they won't be valid after merging
IGNORED_LAYERS = [
"optimizer",
"random_rng_state",
"np_rng_state",
"torch_rng_state",
"cuda_rng_state",
"rng_tracker_states",
"optimizer_state_dict",
"param_shapes",
]
def merge_checkpoints(
checkpoint_dir,
model_parallel_size,
pipe_parallel_size,
output_dir,
global_step=None,
parameter_mean=False,
):
checkpoint_dir = Path(checkpoint_dir)
assert (
checkpoint_dir.is_dir()
), f"checkpoint dir does not exist: {str(checkpoint_dir)}"
output_dir = Path(output_dir)
if global_step is None:
if os.path.isfile(checkpoint_dir / "latest"):
with open(checkpoint_dir / "latest") as f:
global_step = int(f.read().strip().replace("global_step", ""))
else:
raise ValueError("No global step provided")
weights_dir = checkpoint_dir / f"global_step{global_step}"
output_weights_dir = output_dir / f"global_step{global_step}"
output_configs_dir = output_dir / "configs"
print(f"* Merging from {weights_dir}", flush=True)
print(f"* Merging to {output_weights_dir}", flush=True)
# load modified configs and original mp size
config, orig_mp = get_output_config(
checkpoint_dir, output_dir, model_parallel_size, pipe_parallel_size
)
output_layer_parallelism = (
config.get("output_layer_parallelism")
or config.get("output-layer-parallelism")
or "row"
)
# this maps layer names to the dimension that should be merged
# the only layers that need to be merged are:
# - vocab parallel embedding
# - row parallel linear weights
# - column parallel linear weights
# - column parallel linear biases
# everything else is non parallel, and we just take the 0th mp index in this case
PARTITION_DIM_MAP = {
"word_embeddings.weight": 0, # vocab
"attention.query_key_value.weight": 0, # column
"attention.query_key_value.bias": 0, # column
"mlp.dense_h_to_4h.bias": 0,
"mlp.dense_h_to_4h.weight": 0,
"attention.dense.weight": 1, # row
"mlp.dense_4h_to_h.weight": 1,
# variable:
"final_linear.weight": 0 if output_layer_parallelism == "column" else 1,
}
# prepare output directories
if output_weights_dir.is_dir():
resp = input(
f"* Output weights dir ({output_weights_dir}) already exists. Do you want to overwrite it? (yes/no) "
)
if resp.lower() in ["yes", "y"]:
shutil.rmtree(output_weights_dir)
else:
exit()
for p in [output_weights_dir, output_configs_dir]:
p.mkdir(exist_ok=True, parents=True)
# save modified config
with open(output_configs_dir / "config.yml", "w") as f:
json.dump(config, f, indent=4)
# load weight paths grouped by layer
# so that we can merge layer by layer
weight_paths_by_layer = get_weight_paths_by_layer(weights_dir)
# iterate over layers and produce a merged checkpoint
pbar = tqdm(weight_paths_by_layer)
for (layer, weight_paths) in pbar:
# load weights grouped by module name for the current layer
first_only = (
layer == "model_states"
) # for this layer, we only ever keep the first (all are copies of each other), so we can skip loading it.
grouped_weights = load_grouped_weights(weight_paths, first_only=first_only)
# merge and save
out_sd = {}
split_modules = defaultdict(list)
for module_name, partitions in grouped_weights.items():
if layer == "model_states":
if module_name in IGNORED_LAYERS:
# don't copy over optimizer / rng states as they won't be valid
continue
# so, *I think* that the following two only need to be rewritten because deepspeed
# is dumb, and breaks if you don't do this. I'm 80% certain they have no
# downstream effects, since we're dumping zero states anyway
elif module_name == "mp_world_size":
# overwrite mp in sd
out_sd[module_name] = model_parallel_size
elif module_name == "dp_world_size":
out_sd[module_name] = int(
partitions[0] * (model_parallel_size / orig_mp)
)
elif module_name == "args":
# change mp size in sd args
p = partitions[0]
replace(p, "model-parallel-size", model_parallel_size)
else:
out_sd[module_name] = partitions[0]
else:
partition_dim = PARTITION_DIM_MAP.get(module_name, None)
if partition_dim is None:
print(module_name)
# just take the 0th partition for non model-parallel weights
if parameter_mean:
out_sd[module_name] = torch.mean(torch.stack(partitions), dim=0)
else:
out_sd[module_name] = partitions[0]
else:
splits = merge_partitions(
partitions, partition_dim=partition_dim, mp=model_parallel_size
)
for mp_rank in range(model_parallel_size):
split_modules[module_name].append(splits[mp_rank])
# save outputs:
if layer == "model_states":
pbar.set_description(
f"* Saving state dicts for layer {layer}, module `{module_name}`"
)
for pp in range(pipe_parallel_size):
out_path = output_weights_dir / f"mp_rank_{pp:02}_model_states.pt"
torch.save(out_sd, out_path)
pbar.set_description(
f"* Saved state dicts for layer {layer}, module `{module_name}`"
)
else:
for mp_rank in range(model_parallel_size):
out_path = (
output_weights_dir
/ f"layer_{layer}-model_{mp_rank:02}-model_states.pt"
)
for module_name, splits in split_modules.items():
out_sd[module_name] = splits[mp_rank]
torch.save(out_sd, out_path)
pbar.set_description(
f"* Saved state dicts for layer {layer}, module `{module_name}`, and mp rank {mp_rank}"
)
# write 'latest' file
with open(output_dir / "latest", "w") as f:
f.write(f"global_step{global_step}")
print("* DONE!")
if __name__ == "__main__":
# get arguments
args = parse_args()
merge_checkpoints(
args.checkpoint_dir,
args.model_parallel,
args.pipe_parallel,
args.output_dir,
args.global_step,
args.layernorm_mean,
) |
"""Illustrates a method to intercept changes on objects, turning
an UPDATE statement on a single row into an INSERT statement, so that a new
row is inserted with the new data, keeping the old row intact.
This example adds a numerical version_id to the Versioned class as well
as the ability to see which row is the most "current" vesion.
"""
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import column_property
from sqlalchemy.orm import make_transient
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
class Versioned(object):
# we have a composite primary key consisting of "id"
# and "version_id"
id = Column(Integer, primary_key=True)
version_id = Column(Integer, primary_key=True, default=1)
# optional - add a persisted is_current_version column
is_current_version = Column(Boolean, default=True)
# optional - add a calculated is_current_version column
@classmethod
def __declare_last__(cls):
alias = cls.__table__.alias()
cls.calc_is_current_version = column_property(
select([func.max(alias.c.version_id) == cls.version_id]).where(
alias.c.id == cls.id
)
)
def new_version(self, session):
# optional - set previous version to have is_current_version=False
old_id = self.id
session.query(self.__class__).filter_by(id=old_id).update(
values=dict(is_current_version=False), synchronize_session=False
)
# make us transient (removes persistent
# identity).
make_transient(self)
# increment version_id, which means we have a new PK.
self.version_id += 1
@event.listens_for(Session, "before_flush")
def before_flush(session, flush_context, instances):
for instance in session.dirty:
if not isinstance(instance, Versioned):
continue
if not session.is_modified(instance, passive=True):
continue
if not attributes.instance_state(instance).has_identity:
continue
# make it transient
instance.new_version(session)
# re-add
session.add(instance)
Base = declarative_base()
engine = create_engine("sqlite://", echo=True)
Session = sessionmaker(engine)
# example 1, simple versioning
class Example(Versioned, Base):
__tablename__ = "example"
data = Column(String)
Base.metadata.create_all(engine)
session = Session()
e1 = Example(id=1, data="e1")
session.add(e1)
session.commit()
e1.data = "e2"
session.commit()
assert session.query(
Example.id,
Example.version_id,
Example.is_current_version,
Example.calc_is_current_version,
Example.data,
).order_by(Example.id, Example.version_id).all() == (
[(1, 1, False, False, "e1"), (1, 2, True, True, "e2")]
)
# example 2, versioning with a parent
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
child_id = Column(Integer)
child_version_id = Column(Integer)
child = relationship("Child", backref=backref("parent", uselist=False))
__table_args__ = (
ForeignKeyConstraint(
["child_id", "child_version_id"], ["child.id", "child.version_id"]
),
)
class Child(Versioned, Base):
__tablename__ = "child"
data = Column(String)
def new_version(self, session):
# expire parent's reference to us
session.expire(self.parent, ["child"])
# create new version
Versioned.new_version(self, session)
# re-add ourselves to the parent. this causes the
# parent foreign key to be updated also
self.parent.child = self
Base.metadata.create_all(engine)
session = Session()
p1 = Parent(child=Child(id=1, data="c1"))
session.add(p1)
session.commit()
p1.child.data = "c2"
session.commit()
assert p1.child_id == 1
assert p1.child.version_id == 2
assert session.query(
Child.id,
Child.version_id,
Child.is_current_version,
Child.calc_is_current_version,
Child.data,
).order_by(Child.id, Child.version_id).all() == (
[(1, 1, False, False, "c1"), (1, 2, True, True, "c2")]
)
|
class Solution:
"""
@param nums: A list of integers
@return: A integer indicate the sum of max subarray
@ time: O(n) Space:O(1)
"""
def maxSubArray(self, nums):
# greedy
'''
if not nums:
return 0
cur_sum = max_sum = nums[0] # 初始值 设为列表第一元素
for i in range(1,len(nums)):
cur_sum = max(nums[i], cur_sum + nums[i]) # 若当前指针所指元素<0, 则丢弃之前的数列
max_sum = max(cur_sum, max_sum) # 将当前值与最大值比较,取最大
return max_sum
'''
#DP
if not nums:
return 0
for i in range(1,len(nums)):
if nums[i - 1] > 0:
nums[i] += nums[i - 1]
return max(nums)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .geo_boundary_base import GeoBoundaryBase
class GeoDistance(GeoBoundaryBase):
"""Defines a geographical boundary that matches location within a certain
euclidean distance from a point.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:param center: The geo-location for the center point to match around.
:type center: ~microsoft.bing.commerce.search.models.GeoPoint
:param radius: The accpeptable euclidean distance to the center.
:type radius: float
"""
_validation = {
'_type': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'center': {'key': 'center', 'type': 'GeoPoint'},
'radius': {'key': 'radius', 'type': 'float'},
}
def __init__(self, **kwargs):
super(GeoDistance, self).__init__(**kwargs)
self.center = kwargs.get('center', None)
self.radius = kwargs.get('radius', None)
self._type = 'GeoDistance'
|
from .disclaimer import Disclaimer
from .glossary import Glossary
|
# Python Lists
# @IdiotInside_
print ("Creating List:")
colors = ['red', 'blue', 'green']
print (colors[0]) ## red
print (colors[1]) ## blue
print (colors[2]) ## green
print (len(colors)) ## 3
print ("Append to the List")
colors.append("orange")
print (colors[3]) ##orange
print ("Insert to the List")
colors.insert(3, "yellow")
print (colors[3]) ##yellow
print (colors[4]) ##orange
print ("Remove from the List")
print (colors[1]) ## blue
colors.remove("blue") ## deletes blue and shifts elements to the left
print (colors[1]) ## green
print ("Sorting Ascending order using sorted")
nums = [98,22,45,30]
numsAsc = sorted(nums)
print (numsAsc[0]) ## 22
print (numsAsc[1]) ## 30
print (numsAsc[2]) ## 45
print ("Sorting Descending order using sorted")
numsDesc = sorted(nums,reverse=True)
print (numsDesc[0]) ## 98
print (numsDesc[1]) ## 45
print (numsDesc[2]) ## 30
|
# -*- coding: utf-8 -*-
def in_range(num, minimum, maximum):
return minimum <= num <= maximum
def decode_to_string(data):
"""
Decode the strings in the list/set so we can call print the strings without the 'u' in front
Args:
data (list(str) or set(str))
"""
return str([x.encode('UTF8') for x in data]) |
# -*- coding: utf-8 -*-
"""
Building Skills in Object-Oriented Design V4
Card Definition for unit testing.
Note that Unicode ``\u1f0a1`` (🂡) to ``\u1f0de`` (🃞) has the images
of the cards themselves. There's an interesting wrinkle:
Unicode has 14 ranks; it includes a "knight" rank.
Since we use 13 ranks, we'll skip the knight.
"""
from typing import Any, cast
import sys
class Card:
Clubs = u"\N{BLACK CLUB SUIT}"
Diamonds = u"\N{WHITE DIAMOND SUIT}"
Hearts = u"\N{WHITE HEART SUIT}"
Spades = u"\N{BLACK SPADE SUIT}"
Jack = 11
Queen = 12
King = 13
Ace = 1
def __init__(self, rank: int, suit: str) -> None:
assert suit in (Card.Clubs, Card.Diamonds, Card.Hearts, Card.Spades)
assert 1 <= rank < 14
self.rank = rank
self.suit = suit
self.order = rank
@property
def hardValue(self) -> int:
return self.rank
@property
def softValue(self) -> int:
return self.rank
def __repr__(self) -> str:
return f"{self.__class__.__name__}(rank={self.rank!r}, suit={self.suit!r})"
def __str__(self) -> str:
return f"{self.rank:2d}{self.suit}"
@property
def image(self) -> str:
s = {
Card.Spades: 0x1F0A0,
Card.Hearts: 0x1F0B0,
Card.Diamonds: 0x1F0C0,
Card.Clubs: 0x1F0D0,
}[self.suit]
r = self.rank if self.rank < 12 else self.rank + 1
return chr(s + r)
def __le__(self, other: Any) -> bool:
return self.order <= cast(Card, other).order
def __lt__(self, other: Any) -> bool:
return self.order < cast(Card, other).order
def __ge__(self, other: Any) -> bool:
return self.order >= cast(Card, other).order
def __gt__(self, other: Any) -> bool:
return self.order > cast(Card, other).order
def __eq__(self, other: Any) -> bool:
return self.order == cast(Card, other).order
def __ne__(self, other: Any) -> bool:
return self.order != cast(Card, other).order
def __hash__(self) -> int:
return (hash(self.rank) + hash(self.suit)) % sys.hash_info.width
class AceCard(Card):
def __init__(self, rank: int, suit: str) -> None:
assert rank == 1
super().__init__(rank, suit)
self.order = 14 # above King
def __str__(self) -> str:
return f" A{self.suit}"
@property
def hardValue(self) -> int:
return 1
@property
def softValue(self) -> int:
return 11
class FaceCard(Card):
def __init__(self, rank: int, suit: str) -> None:
assert rank in (11, 12, 13)
self.rank_char = {11: "J", 12: "Q", 13: "K"}[rank]
super().__init__(rank, suit)
def __str__(self) -> str:
return f" {self.rank_char}{self.suit}"
@property
def hardValue(self) -> int:
return 10
@property
def softValue(self) -> int:
return 10
def card_factory(rank: int, suit: str) -> Card:
class_ = AceCard if rank == 1 else FaceCard if rank >= 11 else Card
return class_(rank, suit)
|
# Importing the libraries
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
dataset = pd.read_csv('50_Startup.csv')
#Importing data set
X=dataset.iloc[:,:-1].values
y=dataset.iloc[:,3].values
from sklearn.tree import DecisionTreeRegressor
reg=DecisionTreeRegressor(max_depth=5)
reg.fit(X,y)
y_pred=reg.predict(X)
print(reg.score)
# Loading the model to compare results
model_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'treemodel.pkl')
model=pickle.load(open(model_path,'rb'))
x_test=np.array([[16000, 135000, 450000]])
print(x_test)
print(model.predict(x_test))
#To generate a best fit model
X_range=np.zeros((50,3))
y_range=np.zeros((50,))
for i in range(3):
Xi=X[:,i]
vals=plt.hist(Xi,49)
plt.xlabel("Feature")
plt.ylabel("Frequency")
X_range[:,i]=np.transpose(vals[1])
y_range=model.predict(X_range)
# Plot the results
plt.figure()
plt.scatter(X[:,0], y, s=20, edgecolor="black", c="darkorange", label="train data")
plt.scatter(x_test[:,0], model.predict(x_test), s=30, color="yellowgreen", label="test data", linewidth=2)
plt.plot(X_range[:,0], y_range, color="cornflowerblue",
label="Regression_model", linewidth=2)
plt.xlabel("R&D Cost")
plt.ylabel("Profit")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
plt.figure()
plt.scatter(X[:,1], y, s=20, edgecolor="black", c="darkorange", label="train data")
plt.scatter(x_test[:,1], model.predict(x_test), s=30, color="yellowgreen", label="test data", linewidth=2)
plt.plot(X_range[:,1], y_range, color="cornflowerblue",
label="Regression_model", linewidth=2)
plt.xlabel("Admin Cost")
plt.ylabel("Profit")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
plt.figure()
plt.scatter(X[:,2], y, s=20, edgecolor="black", c="darkorange", label="train data")
plt.scatter(x_test[:,2], model.predict(x_test), s=30, color="yellowgreen", label="test data", linewidth=2)
plt.plot(X_range[:,2], y_range, color="cornflowerblue",
label="Regression_model", linewidth=2)
plt.xlabel("Marketing Cost")
plt.ylabel("Profit")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
#Save the model
model_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'treemodel.pkl')
pickle.dump(reg,open(model_path,'wb'))
|
import sys
from collections import OrderedDict
from knack import CLI, ArgumentsContext, CLICommandsLoader
from knack.commands import CommandGroup
class MyCommandsLoader(CLICommandsLoader):
def load_command_table(self, args):
with CommandGroup(self, "repository", "repository#{}") as g:
g.command("update", "update_repository")
with CommandGroup(self, "source", "source#{}") as g:
g.command("fetch", "fetch_source")
return OrderedDict(self.command_table)
def load_arguments(self, command):
with ArgumentsContext(self, "repository update") as ac:
ac.argument("kind", type=str)
with ArgumentsContext(self, "source fetch") as ac:
ac.argument("kind", type=str)
super(MyCommandsLoader, self).load_arguments(command)
emote = CLI(cli_name="emote", commands_loader_cls=MyCommandsLoader)
exit_code = emote.invoke(sys.argv[1:])
sys.exit(exit_code)
|
import sc2
from sc2.data import race_townhalls, ActionResult
from sc2.player import BotAI
from sc2.unit import Unit
from sc2.constants import OVERLORD, QUEEN, CREEPTUMOR, CREEPTUMORBURROWED, CREEPTUMORQUEEN,\
BUILD_CREEPTUMOR_QUEEN, BUILD_CREEPTUMOR_TUMOR,\
CANCEL_CREEPTUMOR, BEHAVIOR_GENERATECREEPON, HATCHERY, LAIR, ZERGBUILD_CREEPTUMOR
import cv2
import numpy as np
async def expend_creep(self):
self.tumors = self.units(CREEPTUMOR) #Note: The other tumors cannot cast BUILD_CREEPTUMOR_TUMOR
#await self.compute_global_creepmap()
await self.queen_tumors()
await self.tumor_tumors()
await self.overlord_creep()
async def queen_tumors(self):
queens = self.units(QUEEN).idle
if queens.exists:
tum_queens = queens.filter(lambda q: q.energy > 25) #TODO: Find a less hardcoded way to do that
if tum_queens.exists:
q = tum_queens.random
loc = q.position.random_on_distance(1)
await self.do(q(BUILD_CREEPTUMOR_QUEEN, loc))
async def tumor_tumors(self):
# Expending tumors
tumors = self.tumors
if tumors.exists:
for t in tumors:
net_input = self.tumor_creepmap(self.positionsWithoutCreep, castingUnit=t) # Numpy inpu for the neural net
loc = self.findCreepPlantLocation(t, )
await self.do(t(BUILD_CREEPTUMOR_TUMOR, loc))
# Canceling tumors if necessary
unfinished_tumors = self.tumors.not_ready
for ut in unfinished_tumors:
if ut.health_percentage > 1.1*ut.build_progress and ut.health_percentage < 0.55:
await self.do(ut(CANCEL_CREEPTUMOR))
async def overlord_creep(self): #TODO: le faire "on constructuction(LAIR1) ou OVERLORD"
overlords = self.units(OVERLORD).idle
if self.units(LAIR).ready.exists:
for ov in overlords:
await self.do(ov(BEHAVIOR_GENERATECREEPON))
#TODO If overlord not casting creep, ov.cast(creep), eviter d'appeller cette fonction constament
async def compute_global_creepmap(self, force=False): #FIXME
if force or self.time > self._last_creep_map_check + 10:
width = self.state.creep.width
heigth = self.state.creep.height
game_data = np.zeros((width, heigth, 3), np.uint8)
for i in range(heigth):
for j in range(width):
if self.state.creep.is_set((j, i)):
game_data[i, j] = (255, 255, 255)
self.global_creepmap = game_data
# flip horizontally to make our final fix in visual representation:
flipped = cv2.flip(game_data, 0)
resized = cv2.resize(flipped, dsize=None, fx=2, fy=2)
if self.show_minimaps:
cv2.imshow('Global Creep Map', resized)
self._last_creep_map_check = self.time
async def tumor_creepmap(self, tumor: Unit): #TODO
width = self.state.creep.width
heigth = self.state.creep.height
side = int(15) #TODO: Hardcode it
local_creepmap = np.zeros((side, side, 3), np.uint8) # 0: creep, 1: unbuildable, 2:buildable
pos = tumor.position
# LEs indices ne sont pas les bons
for i in range(max(pos.x - side//2, 0), min(pos.x + side//2 + 1, heigth)):
for j in range(max(pos.y - side//2, 0), min(pos.y + side//2 + 1, width)):
if self.state.creep.is_set((j, i)):
cv2.circle(local_creepmap, (i, j), 2, (255, 0, 0), -1)
cv2.imshow('Local creep map', local_creepmap)
return local_creepmap
# Taken from https://github.com/BurnySc2/burny-bots-python-sc2/blob/master/CreepyBot/CreepyBot.py
def getPositionsAroundUnit(self, unit, minRange=0, maxRange=500, stepSize=1, locationAmount=32):
# e.g. locationAmount=4 would only consider 4 points: north, west, east, south
assert isinstance(unit, (Unit, Point2, Point3))
if isinstance(unit, Unit):
loc = unit.position.to2
else:
loc = unit
positions = [Point2(( \
loc.x + distance * math.cos(math.pi * 2 * alpha / locationAmount), \
loc.y + distance * math.sin(math.pi * 2 * alpha / locationAmount))) \
for alpha in range(locationAmount) # alpha is the angle here, locationAmount is the variable on how accurate the attempts look like a circle (= how many points on a circle)
for distance in range(minRange, maxRange+1)] # distance depending on minrange and maxrange
return positions
# Taken from https://github.com/BurnySc2/burny-bots-python-sc2/blob/master/CreepyBot/CreepyBot.py
async def findCreepPlantLocation(self, targetPositions, castingUnit, minRange=None, maxRange=None, stepSize=1, onlyAttemptPositionsAroundUnit=False, locationAmount=32, dontPlaceTumorsOnExpansions=True):
"""function that figures out which positions are valid for a queen or tumor to put a new tumor
Arguments:
targetPositions {set of Point2} -- For me this parameter is a set of Point2 objects where creep should go towards
castingUnit {Unit} -- The casting unit (queen or tumor)
Keyword Arguments:
minRange {int} -- Minimum range from the casting unit's location (default: {None})
maxRange {int} -- Maximum range from the casting unit's location (default: {None})
onlyAttemptPositionsAroundUnit {bool} -- if True, it will only attempt positions around the unit (ideal for tumor), if False, it will attempt a lot of positions closest from hatcheries (ideal for queens) (default: {False})
locationAmount {int} -- a factor for the amount of positions that will be attempted (default: {50})
dontPlaceTumorsOnExpansions {bool} -- if True it will sort out locations that would block expanding there (default: {True})
Returns:
list of Point2 -- a list of valid positions to put a tumor on
"""
assert isinstance(castingUnit, Unit)
positions = []
ability = self._game_data.abilities[ZERGBUILD_CREEPTUMOR.value]
if minRange is None: minRange = 0
if maxRange is None: maxRange = 500
# get positions around the casting unit
positions = self.getPositionsAroundUnit(castingUnit, minRange=minRange, maxRange=maxRange, stepSize=stepSize, locationAmount=locationAmount)
# stop when map is full with creep
if len(self.positionsWithoutCreep) == 0:
return None
# filter positions that would block expansions
if dontPlaceTumorsOnExpansions and hasattr(self, "exactExpansionLocations"):
positions = [x for x in positions if self.getHighestDistance(x.closest(self.exactExpansionLocations), x) > 3]
# TODO: need to check if this doesnt have to be 6 actually
# this number cant also be too big or else creep tumors wont be placed near mineral fields where they can actually be placed
# check if any of the positions are valid
validPlacements = await self._client.query_building_placement(ability, positions)
# filter valid results
validPlacements = [p for index, p in enumerate(positions) if validPlacements[index] == ActionResult.Success]
allTumors = self.units(CREEPTUMOR) | self.units(CREEPTUMORBURROWED) | self.units(CREEPTUMORQUEEN)
# usedTumors = allTumors.filter(lambda x:x.tag in self.usedCreepTumors)
unusedTumors = allTumors.filter(lambda x:x.tag not in self.usedCreepTumors)
if castingUnit is not None and castingUnit in allTumors:
unusedTumors = unusedTumors.filter(lambda x:x.tag != castingUnit.tag)
# filter placements that are close to other unused tumors
if len(unusedTumors) > 0:
validPlacements = [x for x in validPlacements if x.distance_to(unusedTumors.closest_to(x)) >= 10]
validPlacements.sort(key=lambda x: x.distance_to(x.closest(self.positionsWithoutCreep)), reverse=False)
if len(validPlacements) > 0:
return validPlacements
return None |
import os
import pandas as pd
from pandas import DataFrame
from data_loader import load_train_genes_for_cell_line
data_small_folder = "data_small/"
def created_gct_file_train_plus_val() -> dict[int, DataFrame]:
"""
Create gct files for cell line 1 and cell 2 (train and val data merged) for visualization reason
IGV only accept gct format expression data(https: // software.broadinstitute.org/software/igv/ExpressionData)
:return: Dictionary contain gct file for each cell line
:rtype: pd.DataFrame
"""
gct_frame_dict = {}
for cell_line in [1, 2]:
# notice here train_genes is combination of train and val data in original dataset
merged_train_info = load_train_genes_for_cell_line(cell_line=cell_line)
feature_names = ["gene_name", "chr", "gene_start", "gene_end", "gex"]
merged_train_list = merged_train_info.loc[:, feature_names].values.tolist()
gct_list = list()
for gene_name, chr, gene_start, gene_end, gex, in merged_train_list:
gct_list.append([gene_name, f" na |@{chr}:{gene_name}-{gene_end}|", gex, ])
gct_frame_dict[cell_line] = pd.DataFrame(
gct_list, columns=["name", "Description", "sample 1"])
return gct_frame_dict
if __name__ == '__main__':
GCT_frame_dict = created_gct_file_train_plus_val()
for key in GCT_frame_dict.keys():
GCT_frame_dict[key].to_csv(os.path.join(data_small_folder, f"X{key}_trainAndVal.gct"), header=True, index=False,
sep="\t")
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from courses import views
app_name = 'courses'
urlpatterns = [
path('', views.CourseListView.as_view(), name='course-list'),
path('<str:slug>', views.CourseDetailView.as_view(), name='course-detail'),
path('user/buy', views.BuyCourseAPI.as_view()),
path('user/buy/success', views.BuyCourseSuccessAPI.as_view()),
path('user/check', views.CheckIsBought.as_view()),
path('user/favorite_course', views.FavoriteCourseAPI.as_view()),
]
|
#! python3
# -*- coding: utf-8 -*-
"""Internal module to interact with gui
"""
__version__ = "0.4.6"
class Gui: # pylint: disable=too-few-public-methods
"""Class to interact with gui
"""
@staticmethod
def warning(message):
"""Starts Tkinter window with message to user
<br>`param message` string with message to user
<br>`return` None
"""
import sys
from .print9 import Print
from .os9 import OS
try:
try:
not_dot_py = sys.argv[0][-3:] != ".py" # todo check logic
except IndexError:
not_dot_py = True
# if (not_dot_py or (sys.argv[0] != "")) and (not OS.running_in_repl()): # I forgot why sys.argv[0] must not be empty, so ... 06/13/2018
# Print.debug("sys.argv", sys.argv)
# Print.debug("Something wrong with sys.argv. Tkinter doesn't like it.")
except IndexError:
Print.debug("sys.argv", sys.argv)
raise RuntimeError("Something wrong with sys.argv. Tkinter doesn't like it!")
if OS.macos:
from .macos9 import macOS
macOS.notification(message)
if (not OS.macos) and (OS.python_implementation != "pypy"):
import pyautogui
pyautogui.alert(message)
else:
Print.debug("PyPy doesn't support pyautogui, so warning is here:", message)
input("Press Enter to continue")
@classmethod
def notification(cls, message, title="python3", subtitle=None, sound=None):
from .os9 import OS
if OS.macos:
from .macos9 import macOS
macOS.notification(cls, message, title=title, subtitle=subtitle, sound=sound)
elif OS.windows:
# https://gist.github.com/wontoncc/1808234
class WindowsBalloonTip:
def __init__(self, title, msg):
import os
import sys
import time
import win32api
import win32gui
import win32con
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
}
# Register the Window class.
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32api.GetModuleHandle(None)
# fucc
from .random9 import Random
wc.lpszClassName = "PythonTaskbar" + Random.string(100)
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = win32gui.CreateWindow(classAtom, "Taskbar", style,
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT,
0, 0, hinst, None)
win32gui.UpdateWindow(self.hwnd)
iconPathName = os.path.abspath(os.path.join(sys.path[0], "balloontip.ico"))
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
try:
hicon = win32gui.LoadImage(hinst, iconPathName,
win32con.IMAGE_ICON, 0, 0, icon_flags)
except:
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP
nid = (self.hwnd, 0, flags, win32con.WM_USER + 20, hicon, "tooltip")
win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid)
win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY,
(self.hwnd, 0, win32gui.NIF_INFO, win32con.WM_USER + 20,
hicon, "Balloon tooltip", title, 200, msg))
time.sleep(60)
win32gui.DestroyWindow(self.hwnd)
def OnDestroy(self, hwnd, msg, wparam, lparam):
import win32api
import win32gui
nid = (self.hwnd, 0)
win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid)
win32api.PostQuitMessage(0) # Terminate the app.
def notification(title, message):
import pywintypes
cnt = 0
while cnt < 10:
cnt += 1
try:
WindowsBalloonTip(title=title, msg=message)
return
except pywintypes.error as err:
print(err)
pass
from .threading9 import MyThread
t = MyThread(notification, args=(title, message), daemon=True)
t.start()
else:
raise NotImplementedError("OS not in ['Windows', 'macOS'] not supported yet") |
import os
from kubernetes.config import load_incluster_config, load_kube_config, incluster_config as kubernetes_config
from kubernetes import client
from kubernetes.client.rest import ApiException
from django.conf import settings
# override service token mounts when running with telepresence
from comic.eyra.models import Job
if settings.K8S_USE_CLUSTER_CONFIG:
kubernetes_config.SERVICE_TOKEN_FILENAME = \
os.environ.get('TELEPRESENCE_ROOT', '') + kubernetes_config.SERVICE_TOKEN_FILENAME
kubernetes_config.SERVICE_CERT_FILENAME = \
os.environ.get('TELEPRESENCE_ROOT', '') + kubernetes_config.SERVICE_CERT_FILENAME
# the capacity of the PVC volume used for in/output.
IO_PVC_CAPACITY = '50Gi'
# https://github.com/s3tools/s3cmd
# used for up- & downloading data from s3
# s3cmd_prefix = f"""
# s3cmd --access_key={settings.AWS_ACCESS_KEY_ID}\
# --secret_key={settings.AWS_SECRET_ACCESS_KEY}\
# --host={settings.AWS_S3_HOST}\
# --host-bucket="%(bucket).{settings.AWS_S3_HOST}" """
s3cmd_prefix = f"s3cmd --region={settings.AWS_S3_REGION_NAME}"
# Use on Eyra Job, executes job on K8S cluster
class K8sJob(object):
def __init__(self, job: Job, namespace: str=os.environ.get('K8S_NAMESPACE')):
self.job = job
self.namespace = namespace
self.io_pvc = None
def load_kubeconfig(self):
if settings.K8S_USE_CLUSTER_CONFIG:
load_incluster_config()
else:
load_kube_config()
def io_pvc_name(self):
return f'pvc-job-{self.job.pk}'
def job_name(self):
return f'job-{self.job.pk}'
# create persistent volume claim for IO (data input/output)
def create_io_pvc(self):
self.io_pvc = client.CoreV1Api().create_namespaced_persistent_volume_claim(
os.environ.get('K8S_NAMESPACE'),
client.V1PersistentVolumeClaim(
metadata=client.V1ObjectMeta(name=self.io_pvc_name()),
spec=client.V1PersistentVolumeClaimSpec(
access_modes=['ReadWriteOnce'],
resources=client.V1ResourceRequirements(requests={'storage': IO_PVC_CAPACITY})
)
)
)
return self.io_pvc
# the sh script that runs in the init container (downloads input data from S3)
def input_script(self):
s3cmd = "\n".join([
f"{s3cmd_prefix} get s3://{settings.AWS_STORAGE_BUCKET_NAME}/data_files/{data_file_pk} /data/input/{input_name}"
for input_name, data_file_pk in self.job.input_name_data_file_pk_map().items()
])
return f"""
set -e
echo "Preparing data volume..."
mkdir /data/input
pip install s3cmd --quiet
{s3cmd}
echo "done"
"""
# the sh script that runs in the init container (uploads output data to S3)
def output_script(self):
s3cmd = f"{s3cmd_prefix} put /data/output s3://{settings.AWS_STORAGE_BUCKET_NAME}/data_files/{self.job.output.pk}"
return f"""
set -e
echo "Uploading output data..."
pip install s3cmd --quiet
{s3cmd}
echo "Done"
"""
# todo: fix for new db without implementation
# run this job on K8S. Does not wait for completion
def run(self):
self.load_kubeconfig()
self.create_io_pvc()
input_container = client.V1Container(
name=f"input",
image='python:2-alpine',
volume_mounts=[client.V1VolumeMount(mount_path='/data', name='io')],
resources=client.V1ResourceRequirements(requests={
# "cpu": 0.5
}),
command=["sh", "-c", self.input_script()],
)
# Define the main algorithm running container
submission = self.job.submission
main_container = client.V1Container(
name="main",
image=self.job.image,
command=['sh'] if submission.command else None,
args=['-c', submission.command] if submission.command else None,
# resources=client.V1ResourceRequirements(
# requests = {
# "nvidia.com/gpu": "1"
### },
# limits = {
# "nvidia.com/gpu": "1"
# }
# ),
volume_mounts=[client.V1VolumeMount(mount_path='/data', name='io')],
)
output_container = client.V1Container(
name=f"output",
image='python:2-alpine',
volume_mounts=[client.V1VolumeMount(mount_path='/data', name='io')],
resources=client.V1ResourceRequirements(requests={
# "cpu": 0.5
}),
command=["sh", "-c", self.output_script()],
)
# Define the pod running the job. As there are no exit containers possible,
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(name=self.job_name()),
spec=client.V1PodSpec(
automount_service_account_token=False,
restart_policy="Never",
init_containers=[input_container, main_container],
containers=[output_container],
volumes=[client.V1Volume(
name='io',
persistent_volume_claim={'claimName': self.io_pvc_name()}
)],
tolerations=[client.V1Toleration(
key='nvidia.com/gpu',
operator='Exists',
effect='NoSchedule'
)],
)
)
job = client.V1Job(
metadata=client.V1ObjectMeta(name=self.job_name()),
spec=client.V1JobSpec(template=template, backoff_limit=0),
)
client.BatchV1Api().create_namespaced_job(self.namespace, job)
def __enter__(self):
return self
# cleanup when `with K8SJob` block goes out of scope
def __exit__(self, exc_type, exc_val, exc_tb):
for pod in self.get_pod_names():
client.CoreV1Api().delete_namespaced_pod(
name=pod,
namespace=self.namespace,
body={}
)
client.BatchV1Api().delete_namespaced_job(
name=self.job_name(),
namespace=self.namespace,
body={}
)
client.CoreV1Api().delete_namespaced_persistent_volume_claim(
name=self.io_pvc_name(),
namespace=self.namespace,
body={}
)
# has the job failed?
@property
def failed(self):
return self.status().failed
# has the job succeeded?
@property
def succeeded(self):
return self.status().succeeded
# get status dict.
def status(self):
"""Get the status of the job
"""
r = client.BatchV1Api().read_namespaced_job_status(
name=self.job_name(),
namespace=self.namespace,
)
return r.status
# get pod names for this Job (should be a single pod, unless we enable retries (e.g. using backoffLimit)
def get_pod_names(self):
podlist = client.CoreV1Api().list_namespaced_pod(
namespace=self.namespace,
label_selector=f"job-name={self.job_name()}"
)
return [pod.metadata.name for pod in podlist.items]
# get logs dict as { pod_name: { container_name: log } }
def get_logs(self, container=None, previous=False):
if container is None:
containers = ["input", "main", "output"]
else:
containers = [container]
logs = {}
for podname in self.get_pod_names():
for container in containers:
try:
r = client.CoreV1Api().read_namespaced_pod_log(
name=podname,
namespace=self.namespace,
container=container,
follow=False,
pretty=True,
previous=previous,
timestamps=True
)
except ApiException as m:
# print(m)
continue
if podname not in logs:
logs[podname] = {}
logs[podname][container] = r
return logs
def print_logs(self):
print(self.get_text_logs())
# flatten logs dict into text structure
def get_text_logs(self):
logs = self.get_logs()
text_log = ""
for podname, logs in logs.items():
text_log += "\n"
text_log += f"Pod: {podname}\n"
for container, log in logs.items():
text_log += "\n"
text_log += f"Container: {container}\n"
text_log += log
return text_log
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from torchnet.meter import ConfusionMeter
import argparse
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from datasets import Dataset, get_dataloader, get_transform
from utils import AverageMeter, Recorder, accuracy, metrics
from models import model_mappings
from config import get_config
def train(config, model, criterion, optimizer, train_loader, method):
losses = AverageMeter('Loss', ':.4e')
accs = AverageMeter('Accuracy', ':6.4f')
model.train()
print('learning rate =', optimizer.param_groups[0]['lr']) # get learning rate from optimizer status
for t, (inputs, labels, _) in enumerate(tqdm(train_loader)):
inputs, labels = inputs.cuda(), labels.cuda().long()
if method == 'pixelnet':
model.set_train_flag(True)
rand_ind = model.generate_rand_ind(labels.cpu(), n_class=config['n_class'], n_samples=2048)
model.set_rand_ind(rand_ind)
labels = labels.view(labels.size(0), -1)[:, rand_ind]
# compute output
outputs = model(inputs)
predictions = outputs.cpu().argmax(1)
loss = criterion(outputs, labels)
# measure accuracy, record loss
accs.update(accuracy(predictions, labels), inputs.shape[0])
losses.update(loss.item(), inputs.size(0))
# compute gradient and do gradient descent step
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('--- training result ---')
print('loss: %.5f, accuracy: %.5f' % (losses.avg, accs.avg))
return losses.avg, accs.avg
def evaluate(config, model, criterion, validation_loader, method, test_flag=False, save_dir=None):
losses = AverageMeter('Loss', ':.5f')
conf_meter = ConfusionMeter(config['n_class'])
with torch.no_grad():
model.eval()
for t, (inputs, labels, names) in enumerate(tqdm(validation_loader)):
inputs, labels = inputs.cuda(), labels.cuda().long()
if method == 'pixelnet':
model.set_train_flag(False)
# compute output
outputs = model(inputs)
loss = criterion(outputs, labels)
# save predictions if needed
predictions = outputs.cpu().argmax(1)
if test_flag:
for i in range(predictions.shape[0]):
plt.imsave('%s/%s.png' % (save_dir, names[i][:-4]), predictions[i].squeeze(), cmap='gray')
# measure accuracy, record loss
losses.update(loss.item(), inputs.size(0))
conf_meter.add(outputs.permute(0, 2, 3, 1).contiguous().view(-1, config['n_class']), labels.view(-1))
if test_flag:
print('--- evaluation result ---')
else:
print('--- validation result ---')
conf_mat = conf_meter.value()
acc, iou = metrics(conf_mat, verbose=test_flag)
print('loss: %.5f, accuracy: %.5f, IU: %.5f' % (losses.avg, acc, iou))
return losses.avg, acc, iou
def main(args):
if args.seed:
np.random.seed(int(args.seed))
torch.backends.cudnn.deterministic = True
torch.manual_seed(0)
config = get_config(args.dataset, args.version)
method = config['model']
criterion = nn.CrossEntropyLoss().cuda()
try:
model = model_mappings[method](config['n_class']).cuda()
except KeyError:
print('%s model does not exist' % method)
sys.exit(1)
model_dir = './saved/%s_%s.pth' % (config['name'], method)
if args.mode == 'train':
log_dir = './log/%s_%s.log' % (config['name'], method)
train_loader, validation_loader = get_dataloader(config)
if config['optimizer'] == 'Adam':
optimizer = optim.Adam(model.parameters(), lr=config['lr'], weight_decay=5e-4)
elif config['optimizer'] == 'SGD':
optimizer = optim.SGD(model.parameters(), lr=config['lr'], momentum=0.9, weight_decay=5e-4)
else:
print('cannot found %s optimizer' % config['optimizer'])
sys.exit(1)
scheduler = ReduceLROnPlateau(optimizer, patience=3)
recorder = Recorder(('loss_train', 'acc_train', 'loss_val', 'acc_val'))
iou_val_max = 0
for epoch in range(1, config['epoch'] + 1):
print('Epoch %s:' % epoch)
loss_train, acc_train = train(config, model, criterion, optimizer, train_loader, method=method)
loss_val, acc_val, iou_val = evaluate(config, model, criterion, validation_loader, method=method)
scheduler.step(loss_train)
# update loss and accuracy per epoch
recorder.update((loss_train, acc_train, loss_val, acc_val))
if args.save: torch.save(recorder.record, log_dir)
# save model with higher iou
if iou_val > iou_val_max:
print('validation iou improved from %.5f to %.5f.' % (iou_val_max, iou_val))
iou_val_max = iou_val
if args.save:
print('Model saved.')
torch.save({
'epoch': epoch,
'version': args.version,
'config': config,
'model_state_dict': model.state_dict(),
}, model_dir)
elif args.mode == 'evaluate':
test_dir = '%s/%s' % (config['root'], args.test_folder)
test_set = Dataset(test_dir, config['size'], *get_transform(config, is_train=False))
test_loader = DataLoader(test_set, batch_size=1, shuffle=False, num_workers=0, drop_last=False)
model.load_state_dict(torch.load(model_dir)['model_state_dict'])
# save prediction results, make directory if not exists
save_dir = '%s/predictions/%s_%s' % (test_dir, args.version, method)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
evaluate(config, model, criterion, test_loader, method=method, test_flag=True, save_dir=save_dir)
else:
print('%s mode does not exist' % args.mode)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Micrograph Segmentation')
parser.add_argument('dataset', help='name of the dataset folder')
parser.add_argument('mode', choices=['train', 'evaluate'],
help='mode choices: train, evaluate')
parser.add_argument('version', help='version defined in config.py (v1, v2, ...)')
parser.add_argument('--save', action='store_true', help='save the trained model')
parser.add_argument('--test-folder', default='test', help='name of the folder running test')
parser.add_argument('--seed', default=None, help='random seed to reproduce same results')
args = parser.parse_args()
main(args)
|
import unittest
from os.path import join
from mock import patch
from enot.packages.config.rebar import RebarConfig
from enot.packages.dep import Dep
from test.abs_test_class import TestClass
class RebarConfigTests(TestClass):
def __init__(self, method_name):
super().__init__('rebar_config_tests', method_name)
def test_config_no_raw(self):
with open(join(self.test_dir, 'rebar.config'), 'w') as config:
config.write('''{deps, [
{dep1, ".*", {git, "git://github.com/comtihon/dep1"}},
{dep2, ".*", {git, "git://github.com/comtihon/dep2", {branch, "master"}}},
{dep3, ".*", {git, "git://github.com/comtihon/dep3", ""}},
{dep4, ".*", {git, "git://github.com/comtihon/dep4", {tag, "1.0.0"}}},
{dep5, ".*", {git, "git://github.com/comtihon/dep5", "commit_hash"}},
{dep6, ".*", {git, "git://github.com/comtihon/dep6", {ref, "commit_hash"}}}
]}.''')
conf = RebarConfig(self.test_dir)
self.assertEqual(Dep('git://github.com/comtihon/dep1', 'master'), conf.deps['dep1'])
self.assertEqual(Dep('git://github.com/comtihon/dep2', 'master'), conf.deps['dep2'])
self.assertEqual(Dep('git://github.com/comtihon/dep3', 'HEAD'), conf.deps['dep3'])
self.assertEqual(Dep('git://github.com/comtihon/dep4', 'master', '1.0.0'), conf.deps['dep4'])
self.assertEqual(Dep('git://github.com/comtihon/dep5', 'commit_hash'), conf.deps['dep5'])
self.assertEqual(Dep('git://github.com/comtihon/dep6', 'commit_hash'), conf.deps['dep6'])
def test_config_no_vsn_no_raw(self):
with open(join(self.test_dir, 'rebar.config'), 'w') as config:
config.write('''{deps, [
{dep1, {git, "git://github.com/comtihon/dep1"}},
{dep2, {git, "git://github.com/comtihon/dep2", {branch, "master"}}},
{dep3, {git, "git://github.com/comtihon/dep3", ""}},
{dep4, {git, "git://github.com/comtihon/dep4", {tag, "1.0.0"}}},
{dep5, {git, "git://github.com/comtihon/dep5", "commit_hash"}},
{dep6, {git, "git://github.com/comtihon/dep6", {ref, "commit_hash"}}}
]}.''')
conf = RebarConfig(self.test_dir)
self.assertEqual(Dep('git://github.com/comtihon/dep1', 'master'), conf.deps['dep1'])
self.assertEqual(Dep('git://github.com/comtihon/dep2', 'master'), conf.deps['dep2'])
self.assertEqual(Dep('git://github.com/comtihon/dep3', 'HEAD'), conf.deps['dep3'])
self.assertEqual(Dep('git://github.com/comtihon/dep4', 'master', '1.0.0'), conf.deps['dep4'])
self.assertEqual(Dep('git://github.com/comtihon/dep5', 'commit_hash'), conf.deps['dep5'])
self.assertEqual(Dep('git://github.com/comtihon/dep6', 'commit_hash'), conf.deps['dep6'])
@patch('enot.packages.config.config.request_hex_info')
def test_hex_dep(self, mock_hex):
mock_hex.return_value = {'url': 'https://hex.pm/api/packages/hex_dep',
'updated_at': '2017-05-29T02:51:09.352157Z',
'releases': [
{'version': '1.0.0',
'url': 'https://hex.pm/api/packages/hex_dep/releases/1.0.0',
'updated_at': '2017-05-29T02:51:09.373260Z',
'inserted_at': '2017-05-29T02:51:09.373254Z'}],
'owners': [{'username': 'comtihon',
'url': 'https://hex.pm/api/users/comtihon',
'email': 'comtihon@test.com'}],
'name': 'hex_dep',
'meta': {'maintainers': ['Test'],
'links': {'GitHub': 'https://github.com/comtihon/hex_dep'},
'licenses': ['Apache 2'],
'description': 'Just test'},
'inserted_at': '2015-03-01T22:27:54.000000Z',
'downloads': {'week': 0, 'day': 0, 'all': 0}}
with open(join(self.test_dir, 'rebar.config'), 'w') as config:
config.write('''{deps, [
{hex_dep, "1.0.0"}
]}.''')
conf = RebarConfig(self.test_dir)
self.assertEqual(Dep('https://github.com/comtihon/hex_dep', None, tag='1.0.0'), conf.deps['hex_dep'])
if __name__ == '__main__':
unittest.main()
|
import os
import sys
import glob
import joblib
import numpy as np
from tqdm import tqdm
from joblib import delayed
from scipy.misc import imsave
from pyvirchow.misc.parallel import ParallelExecutor
folder = sys.argv[1]
aprun = ParallelExecutor(n_jobs=8)
img_files = glob.glob(os.path.join(folder, '*.img.joblib.pickle'))
def convert_to_npy(filepath):
data = joblib.load(filepath)
filename = filepath.replace('.pickle', '.jpg')
if not os.path.isfile(filename):
imsave(filename, data)
total = len(img_files)
aprun(total=total)(delayed(convert_to_npy)(f) for f in img_files)
#with tqdm(total=total) as pbar:
# for _ in Parallel(n_jobs=8)(delayed(convert_to_npy)(f) for f in img_files):
# pbar.update()
|
from .StructureBuilder import StructureBuilder, ResidueBuilder
from .structure import inverse_trig_transform, trig_transform
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
# x, y coordinates on sine and cosine curves
x = np.arange(0, 4 * np.pi, 0.1)
f_sin = np.sin(x)
f_cos = np.cos(x)
# first subplot with height 2 and width 1
plt.subplot(2, 1, 1)
plt.plot(x, f_sin)
plt.title('Sine')
# second subplot
plt.subplot(2, 1, 2)
plt.plot(x, f_cos)
plt.title('Cosine')
plt.show()
|
from heuristics import CompositeHeuristic, PathLengthHeuristic, RegionHeuristic
from policies import ActionSearchPolicy
pol = ActionSearchPolicy(
CompositeHeuristic(
[
PathLengthHeuristic(15),
RegionHeuristic(),
],
weights=[15, 1],
),
depth_limit=6,
expanded_node_limit=100,
occupancy_map_depth=3,
)
|
#!/usr/bin/python
import sys
import re
"""
Read the ToolsOfTheTrade Markdown file (readme.md) and alpha-sort the
contents - currently markdown "Header 3" and table rows.
This is a terrible 5-minute hack for this particular readme.md
But it does work.
"""
file_object = open("./readme.md")
lines = file_object.readlines()
header_line_numbers = []
i = 0
for i, line in enumerate(lines):
if re.match("#", line):
header_line_numbers.append(i)
# sort the contents of the tables within each header 3
j = 0
while j < len(header_line_numbers):
if re.match("### ", lines[header_line_numbers[j]]): # for header 3 blocks
t_fst_row = header_line_numbers[j] + 4 # first row is 4 after header
t_lst_row = header_line_numbers[j + 1] - 1 # last row is 2 before next header
lines[t_fst_row:t_lst_row] = sorted(lines[t_fst_row:t_lst_row])
j += 1
for line in lines:
sys.stdout.write(line)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_serialization import jsonutils
from unittest import mock
from cyborg.common import exception
from cyborg.tests.unit.api.controllers.v2 import base as v2_test
from cyborg.tests.unit import fake_deployable
from cyborg.tests.unit import fake_device
class TestFPGAProgramController(v2_test.APITestV2):
def setUp(self):
super(TestFPGAProgramController, self).setUp()
self.headers = self.gen_headers(self.context)
self.deployable_uuids = ['0acbf8d6-e02a-4394-aae3-57557d209498']
self.existent_image_uuid = "9a17439a-85d0-4c53-a3d3-0f68a2eac896"
self.nonexistent_image_uuid = "1234abcd-1234-1234-1234-abcde1234567"
self.invalid_image_uuid = "abcd1234"
dep_uuid = self.deployable_uuids[0]
self.dep = fake_deployable.fake_deployable_obj(self.context,
uuid=dep_uuid)
self.dev = fake_device.get_fake_devices_objs()[0]
bdf = {"domain": "0000", "bus": "00", "device": "01", "function": "1"}
self.cpid = {
"id": 0,
"uuid": "e4a66b0d-b377-40d6-9cdc-6bf7e720e596",
"device_id": "1",
"cpid_type": "PCI",
"cpid_info": jsonutils.dumps(bdf).encode('utf-8')
}
@mock.patch('cyborg.objects.Device.get_by_device_id')
@mock.patch('cyborg.objects.Deployable.get_cpid_list')
@mock.patch('cyborg.objects.Deployable.get')
@mock.patch('cyborg.agent.rpcapi.AgentAPI.fpga_program')
def test_program_success(self, mock_program, mock_get_dep,
mock_get_cpid_list, mock_get_by_device_id):
self.headers['X-Roles'] = 'admin'
self.headers['Content-Type'] = 'application/json'
dep_uuid = self.deployable_uuids[0]
mock_get_dep.return_value = self.dep
mock_get_by_device_id.return_value = self.dev
mock_get_cpid_list.return_value = [self.cpid]
mock_program.return_value = True
body = [{"image_uuid": self.existent_image_uuid}]
response = self.patch_json('/deployables/%s/program' % dep_uuid,
[{'path': '/bitstream_id', 'value': body,
'op': 'replace'}], headers=self.headers)
self.assertEqual(HTTPStatus.OK, response.status_code)
data = response.json_body
self.assertEqual(dep_uuid, data['uuid'])
@mock.patch('cyborg.objects.Device.get_by_device_id')
@mock.patch('cyborg.objects.Deployable.get_cpid_list')
@mock.patch('cyborg.objects.Deployable.get')
@mock.patch('cyborg.agent.rpcapi.AgentAPI.fpga_program')
def test_program_failed(self, mock_program, mock_get_dep,
mock_get_cpid_list, mock_get_by_device_id):
self.headers['X-Roles'] = 'admin'
self.headers['Content-Type'] = 'application/json'
dep_uuid = self.deployable_uuids[0]
mock_get_dep.return_value = self.dep
mock_get_by_device_id.return_value = self.dev
mock_get_cpid_list.return_value = [self.cpid]
mock_program.return_value = False
body = [{"image_uuid": self.existent_image_uuid}]
try:
self.patch_json('/deployables/%s/program' % dep_uuid,
[{'path': '/bitstream_id', 'value': body,
'op': 'replace'}], headers=self.headers)
except Exception as e:
exc = e
self.assertIn(exception.FPGAProgramError(
ret=mock_program.return_value).args[0],
exc.args[0]
)
@mock.patch('cyborg.objects.Device.get_by_device_id')
@mock.patch('cyborg.objects.Deployable.get_cpid_list')
@mock.patch('cyborg.objects.Deployable.get')
@mock.patch('cyborg.agent.rpcapi.AgentAPI.fpga_program')
def test_program_invalid_uuid(self, mock_program, mock_get_dep,
mock_get_cpid_list, mock_get_by_device_id):
self.headers['X-Roles'] = 'admin'
self.headers['Content-Type'] = 'application/json'
dep_uuid = self.deployable_uuids[0]
mock_get_dep.return_value = self.dep
mock_get_by_device_id.return_value = self.dev
mock_get_cpid_list.return_value = [self.cpid]
mock_program.return_value = False
body = [{"image_uuid": self.invalid_image_uuid}]
try:
self.patch_json('/deployables/%s/program' % dep_uuid,
[{'path': '/bitstream_id',
'value': body,
'op': 'replace'}],
headers=self.headers)
except Exception as e:
exc = e
self.assertIn(exception.InvalidUUID(self.invalid_image_uuid).args[0],
exc.args[0])
@mock.patch('cyborg.objects.Device.get_by_device_id')
@mock.patch('cyborg.objects.Deployable.get_cpid_list')
@mock.patch('cyborg.objects.Deployable.get')
@mock.patch('cyborg.agent.rpcapi.AgentAPI.fpga_program')
def test_program_wrong_image_uuid(self, mock_program,
mock_get_dep,
mock_get_cpid_list,
mock_get_by_device_id):
self.headers['X-Roles'] = 'admin'
self.headers['Content-Type'] = 'application/json'
dep_uuid = self.deployable_uuids[0]
mock_get_dep.return_value = self.dep
mock_get_by_device_id.return_value = self.dev
mock_get_cpid_list.return_value = [self.cpid]
mock_program.return_value = False
body = [{"image_uuid": self.nonexistent_image_uuid}]
try:
self.patch_json('/deployables/%s/program' % dep_uuid,
[{'path': '/bitstream_id',
'value': body,
'op': 'replace'}],
headers=self.headers)
except Exception as e:
exc = e
self.assertIn(exception.FPGAProgramError(
ret=mock_program.return_value).args[0],
exc.args[0]
)
|
from __future__ import unicode_literals
from django.urls import reverse
from cradmin_legacy.demo.webdemo.views.sharable_link.mixins import QuerysetForRoleMixin
from cradmin_legacy.viewhelpers.detail import DetailView
class ShowView(QuerysetForRoleMixin, DetailView):
template_name = 'webdemo/sharable_link/show.django.html'
context_object_name = 'generictoken'
def get_object(self, queryset=None):
return self.get_queryset().first()
def get_context_data(self, **kwargs):
context = super(ShowView, self).get_context_data(**kwargs)
generictoken = context['object']
if generictoken:
url = reverse('webdemo-inviteadmins-public-accept', kwargs={
'token': generictoken.token
})
context['url'] = self.request.build_absolute_uri(url)
return context
|
import torch as th
import numpy as np
import sys
class Device:
gpu = False
dummy = None
@staticmethod
def set_device(gpuid: int):
if gpuid != -1:
Device.gpu = True
th.cuda.set_device(gpuid)
sys.stderr.write(f"Device set to GPU {gpuid}\n")
Device.dummy = Device.float_tensor(1) # occupy that GPU
else:
sys.stderr.write(f"Device set to CPU")
@staticmethod
def set_seed(seed: int):
np.random.seed(seed)
th.manual_seed(seed)
if Device.gpu:
th.cuda.manual_seed(seed)
@staticmethod
def float_tensor(*args):
if Device.gpu:
return th.cuda.FloatTensor(*args)
else:
return th.FloatTensor(*args)
@staticmethod
def long_tensor(*args):
if Device.gpu:
return th.cuda.LongTensor(*args)
else:
return th.LongTensor(*args)
@staticmethod
def move(x):
if Device.gpu:
return x.cuda()
else:
return x
@staticmethod
def from_numpy(x):
if Device.gpu:
return th.from_numpy(x).cuda()
else:
return th.from_numpy(x)
@staticmethod
def to_numpy(x):
if Device.gpu:
return x.detach().cpu().numpy()
else:
return x.detach().numpy()
@staticmethod
def move_model(m):
if Device.gpu:
m.cuda()
|
# A new bid can be at minimum, the current bid times this factor.
MINIMUM_BID_FACTOR = 1.15
# factors should be calculated, using regression on a real-estate data set.
FACTORS = {
'BASE': 500,
'ROOMS': lambda x: {1: 1, 2: 1.15, 3: 1.25, 4: 1.50}[x],
'SALOONS': lambda x: {1: 1, 2: 1.25}[x],
'BUILDING_TYPES': lambda x: {'Daire': 1, 'Residans': 1.5, 'Mustakil Ev': 2, 'Villa': 2.5}[x],
'POST_TYPES': lambda x: {'Kiralik': 1, 'Satilik': 2000}[x],
'SIZE': lambda x: x/80,
'AGE': lambda x: 1-x/60,
'ADDRESS': lambda x, y: float(y[y.areas == x].indices)
} |
from datetime import datetime, timezone
from http import HTTPStatus
from test_api.conftest import USER_URI, login, make_db_record
from dynamo.util import format_time
def test_get_user(client, tables, monkeypatch):
monkeypatch.setenv('MONTHLY_JOB_QUOTA_PER_USER', '25')
request_time = format_time(datetime.now(timezone.utc))
user = 'user_with_jobs'
items = [
make_db_record('job1', user_id=user, request_time=request_time, status_code='PENDING', name='job1'),
make_db_record('job2', user_id=user, request_time=request_time, status_code='RUNNING', name='job1'),
make_db_record('job3', user_id=user, request_time=request_time, status_code='FAILED', name='job2'),
make_db_record('job4', user_id=user, request_time=request_time, status_code='SUCCEEDED', name=None)
]
for item in items:
tables.jobs_table.put_item(Item=item)
login(client, 'user_with_jobs')
response = client.get(USER_URI)
assert response.status_code == HTTPStatus.OK
assert response.json == {
'user_id': 'user_with_jobs',
'quota': {
'max_jobs_per_month': 25,
'remaining': 21,
},
'job_names': [
'job1',
'job2',
],
}
def test_user_at_quota(client, tables, monkeypatch):
monkeypatch.setenv('MONTHLY_JOB_QUOTA_PER_USER', '25')
request_time = format_time(datetime.now(timezone.utc))
items = [make_db_record(f'job{ii}', request_time=request_time) for ii in range(0, 24)]
for item in items:
tables.jobs_table.put_item(Item=item)
login(client)
response = client.get(USER_URI)
assert response.status_code == HTTPStatus.OK
assert response.json['quota']['remaining'] == 1
tables.jobs_table.put_item(Item=make_db_record('anotherJob', request_time=request_time))
response = client.get(USER_URI)
assert response.status_code == HTTPStatus.OK
assert response.json['quota']['remaining'] == 0
tables.jobs_table.put_item(Item=make_db_record('yetAnotherJob', request_time=request_time))
response = client.get(USER_URI)
assert response.status_code == HTTPStatus.OK
assert response.json['quota']['remaining'] == 0
def test_get_user_custom_quota(client, tables):
username = 'user_with_custom_quota'
login(client, username)
tables.users_table.put_item(Item={'user_id': username, 'max_jobs_per_month': 50})
response = client.get(USER_URI)
assert response.status_code == HTTPStatus.OK
assert response.json == {
'user_id': username,
'quota': {
'max_jobs_per_month': 50,
'remaining': 50,
},
'job_names': [],
}
|
from django.urls import path
from .views import *
app_name = "warriors_app"
urlpatterns = [
path('warriors/', WarriorAPIView.as_view()),
path('profession/create/', ProfessionCreateView.as_view()),
path('skill/create/', SkillCreateView.as_view()),
path('skill/', SkillView.as_view()),
path('warriors/skills/', WarriorSkillAPIView.as_view()),
path('warriors/professions/', WarriorProfessionAPIView.as_view()),
path('warriors/<int:pk>/', WarriorID.as_view()),
path('warriors/delete/<int:pk>/', WarriorDelete.as_view()),
path('warriors/create/', WarriorCreateAPIView.as_view()),
path('warriors/update/<int:pk>/', WarriorUpdate.as_view()),
] |
from utils.bbox import BBoxes
from utils.bbox import stack_bboxes
import torch
import unittest
class TestBBoxes(unittest.TestCase):
def setUp(self):
self.tensor = torch.tensor([[0,0,20,20]])
self.fmt = "xyxy"
def test_init(self):
bbox = BBoxes(self.tensor, self.fmt)
self.assertIsNotNone(bbox)
def test_addition(self):
bbox = BBoxes(self.tensor, self.fmt)
twoBox = bbox + bbox
expect = BBoxes(torch.tensor([[0, 0, 40, 40]]), "xyxy")
self.assertEqual(twoBox, expect)
def test_change_fmt(self):
bbox = BBoxes(self.tensor, self.fmt)
bbox.change_format("xyhw")
expect = BBoxes(torch.tensor([[10, 10, 20, 20]]), "xyhw")
self.assertEqual(bbox, expect)
bbox.change_format("xyhw")
self.assertEqual(bbox,expect)
bbox.change_format("xyxy")
expect = BBoxes(self.tensor, self.fmt)
self.assertEqual(bbox, expect)
def test_stack(self):
bbox = BBoxes(self.tensor, self.fmt)
bbox2 = BBoxes(self.tensor, self.fmt)
new_box = stack_bboxes((bbox, bbox2))
self.assertIsNotNone(new_box)
def test_shape(self):
bbox = BBoxes(self.tensor, self.fmt)
self.assertEqual(bbox.shape[0],1)
|
import os
import time
class WorkSplitter(object):
def __init__(self):
try:
_, columns = os.popen('stty size', 'r').read().split()
self.columns = int(columns)
except:
self.columns = 50
def section(self, name):
name_length = len(name)
left_length = int((self.columns-name_length)/2)
right_length = int(self.columns-name_length-left_length)
output = '='*self.columns+'\n' \
+ "|"+' '*(left_length-1)+name+' '*(right_length-1)+'|\n'\
+ '='*self.columns+'\n'
print(output)
def subsection(self, name):
name_length = len(name)
left_length = int((self.columns-name_length)/2)
right_length = int(self.columns-name_length-left_length)
output = '#' * (left_length-1) + ' ' + name + ' ' + '#' * (right_length-1) + '\n'
print(output)
def subsubsection(self, name):
name_length = len(name)
left_length = int((self.columns-name_length)/2)
right_length = self.columns-name_length-left_length
output = '-' * (left_length-1) + ' ' + name + ' ' + '-' * (right_length-1) + '\n'
print(output)
def inhour(elapsed):
return time.strftime('%H:%M:%S', time.gmtime(elapsed))
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 13 10:20:34 2020
@author: miketaylor
"""
|
from . import test_foo, test_bar
|
#!/usr/bin/env python3
# coding=UTF-8
#
# CWA Data Conversion Tool
#
# Copyright (c) 2011 Technische Universität München,
# Distributed Multimodal Information Processing Group
# http://vmi.lmt.ei.tum.de
# All rights reserved.
#
# Stefan Diewald <stefan.diewald [at] tum.de>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the University or Institute nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The program is used to convert Axivity AX3 accelerometer data files,
# CWA-DATA.CWA to comma separated variable text file format.
#
# The original program was modified by Jason Leake, November 2019, as follows:
#
# - Convert to Python3
# - Open dialog box to get the input filename if not specified on command line
# - Output to an ascii file instead of an Sqlite database
# - Output annotations to another file, <original_file>_metadata.txt
# - Add some more annotation abbreviations
# - Don't print a * for every record converted, just a summary occasionally
# - Some re-arrangement of the code
# - Add cwa function to make trivially callable from other Python modules
#
from datetime import datetime
from math import floor
from os import path
from struct import pack, unpack
from tkinter import filedialog
import argparse
import io
import sys
import time
import tkinter as tk
def byte(value):
return (value + 2 ** 7) % 2 ** 8 - 2 ** 7
def ushort(value):
return value % 2 ** 16
def short(value):
return (value + 2 ** 15) % 2 ** 16 - 2 ** 15
# Local "URL-decode as UTF-8 string" function
def urldecode(input):
output = bytearray()
nibbles = 0
value = 0
# Each input character
for char in input:
if char == '%':
# Begin a percent-encoded hex pair
nibbles = 2
value = 0
elif nibbles > 0:
# Parse the percent-encoded hex digits
value *= 16
if char >= 'a' and char <= 'f':
value += ord(char) + 10 - ord('a')
elif char >= 'A' and char <= 'F':
value += ord(char) + 10 - ord('A')
elif char >= '0' and char <= '9':
value += ord(char) - ord('0')
nibbles -= 1
if nibbles == 0:
output.append(value)
elif char == '+':
# Treat plus as space (application/x-www-form-urlencoded)
output.append(ord(' '))
else:
# Preserve character
output.append(ord(char))
return output.decode('utf-8')
class Parameters:
""" Holds parameters derived from command line options etc
"""
def __init__(self, args=None):
if args is not None:
self.verbose = args.verbose
self.limit = args.limit
self.version = args.version
self.linux = args.linux
self.standardGravity = args.sg
self.writeHeader = not args.noheader
else:
self.verbose = False
self.limit = None
self.version = False
self.linux = False
self.standardGravity = False
self.writeHeader = True
def set(self, verbose, limit, version, linux, sg, noheader):
""" Set parameters to non-default values """
self.verbose = verbose
self.limit = limit
self.version = version
self.linux = linux
self.standardGravity = sg
self.writeHeader = not noheader
class CWA_Sample:
pass
class CWA:
# Placeholder in case we want to understand data produced by
# devices other than AX3
isAnAx3 = True
STANDARD_GRAVITY = 9.80665 # m/s^2, i.e. 58966 furlongs/fortnight^2
def __init__(self, filename):
""" Parameters: filename - input filename
"""
self._filename = filename
self.outputFilename = path.splitext(self._filename)[0] + ".csv"
self.metadataFilename = path.splitext(self._filename)[0] + "_metadata.txt"
def __call__(self, parameters):
if parameters.linux:
lineEnd = "\n"
else:
lineEnd = "\r\n"
if parameters.version:
# My versions contain my github username to avoid
# clashes, in case OpenMovement start producing their
# own version numbering scheme
print("cwa.py, version Isopleth 1.03")
linesGenerated = 0
if len(self._filename) == 0:
print("No filename specified", file=sys.stderr)
return linesGenerated
print(f"Converting {self._filename}, output is "
f"{self.outputFilename} and {self.metadataFilename}")
if not path.exists(self._filename):
print("File does not exist", file=sys.stderr)
return linesGenerated
lastSequenceId = None
lastTimestampOffset = None
lastTimestamp = None
with open(self.outputFilename, 'w') as out:
if parameters.writeHeader:
out.write("datetime, x, y, z{}".format(lineEnd))
with open(self._filename, 'rb') as self.fh:
header = self.fh.read(2).decode("ISO-8859-1")
while len(header) == 2:
if parameters.verbose:
print("Section header is %s" % (header))
if header == 'MD':
self.parse_header(metadataFilename=self.metadataFilename)
elif header == 'UB':
blockSize = unpack('H', self.fh.read(2))[0]
elif header == 'SI':
pass
elif header == 'AX':
packetLength = unpack('H', self.fh.read(2))[0]
deviceId = unpack('H', self.fh.read(2))[0]
sessionId = unpack('I', self.fh.read(4))[0]
sequenceId = unpack('I', self.fh.read(4))[0]
sampleTime = self.read_timestamp(self.fh.read(4))
light = unpack('H', self.fh.read(2))[0]
temperature = unpack('H', self.fh.read(2))[0]
events = self.fh.read(1)
battery = unpack('B', self.fh.read(1))[0]
sampleRate = unpack('B', self.fh.read(1))[0]
numAxesBPS = unpack('B', self.fh.read(1))[0]
timestampOffset = unpack('h', self.fh.read(2))[0]
sampleCount = unpack('H', self.fh.read(2))[0]
if parameters.verbose:
print(f"Sample count is {sampleCount}")
sampleData = io.BytesIO(self.fh.read(480))
checksum = unpack('H', self.fh.read(2))[0]
if packetLength != 508:
print("Packet length is not 508!", file=sys.stderr)
continue
if sampleTime == None:
print("Sample time is undefined!", file=sys.stderr)
continue
if sampleRate == 0:
chksum = 0
else:
# rewind for checksum calculation
self.fh.seek(-packetLength - 4, 1)
# calculate checksum
chksum = 0
for x in range(packetLength // 2 + 2):
chksum += unpack('H', self.fh.read(2))[0]
chksum %= 2 ** 16
if chksum != 0:
continue
if sessionId != self.sessionId:
print(f"Bad session ID {sessionId} - should be {self.sessionId}", file=sys.stderr)
continue
if ((numAxesBPS >> 4) & 15) != 3:
print('[ERROR: Axes!=3 not supported yet -- this will not work properly]', file=sys.stderr)
if (light & 0xfc00) != 0:
print('[ERROR: Scale not supported yet -- this will not work properly]', file=sys.stderr)
if (numAxesBPS & 15) == 2:
bps = 6
elif (numAxesBPS & 15) == 0:
bps = 4
freq = float(3200) / (1 << (15 - sampleRate & 15))
if freq <= 0:
freq = 1
# range = 16 >> (rateCode >> 6)
timeFractional = 0
# if top-bit set, we have a fractional date
if deviceId & 0x8000:
# Need to undo backwards-compatible shim
# by calculating how many whole samples
# the fractional part of timestamp
# accounts for.
timeFractional = (deviceId & 0x7fff) * 2 # use original deviceId field bottom 15-bits as 16-bit fractional time
timestampOffset += (timeFractional * int(freq)) // 65536 # undo the backwards-compatible shift (as we have a true fractional)
timeFractional = float(timeFractional) / 65536
# Add fractional time to timestamp
timestamp = float(time.mktime(sampleTime)) + timeFractional
# --- Time interpolation ---
# Reset interpolator if there's a sequence break or there was no previous timestamp
if lastSequenceId == None or (lastSequenceId + 1) & 0xffff != sequenceId or lastTimestampOffset == None or lastTimestamp == None:
# Bootstrapping condition is a sample one second ago (assuming the ideal frequency)
lastTimestampOffset = timestampOffset - freq
lastTimestamp = timestamp - 1
lastSequenceId = sequenceId - 1
localFreq = float(timestampOffset - lastTimestampOffset) / (timestamp - lastTimestamp)
time0 = timestamp + -timestampOffset / localFreq
# Update for next loop
lastSequenceId = sequenceId
lastTimestampOffset = timestampOffset - sampleCount
lastTimestamp = timestamp
for x in range(sampleCount):
sample = CWA_Sample()
if bps == 6:
sample.x = unpack('h', sampleData.read(2))[0]
sample.y = unpack('h', sampleData.read(2))[0]
sample.z = unpack('h', sampleData.read(2))[0]
elif bps == 4:
temp = unpack('I', sampleData.read(4))[0]
temp2 = (6 - byte(temp >> 30))
sample.x = short(short((ushort(65472) & ushort(temp << 6))) >> temp2)
sample.y = short(short((ushort(65472) & ushort(temp >> 4))) >> temp2)
sample.z = short(short((ushort(65472) & ushort(temp >> 14))) >> temp2)
sample.t = time0 + (x / localFreq)
tStr = "{:.5f}".format(sample.t)
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(int(floor(sample.t))))
fractionTimestamp = tStr.split(".",1)[1]
# Convert 10 us units to ms units
fractionTimestamp = float(fractionTimestamp) / 100
# Integer number of ms
fractionTimestamp = int(fractionTimestamp)
fractionTimestamp = str(fractionTimestamp)
# Pad to three digits. It is after decimal point so add
# digits to left. e.g. if it is 10 ms, then this would
# be represented as 010 after the decimal point, i.e.
# 0.010
while len(fractionTimestamp) < 3:
fractionTimestamp = "0" + fractionTimestamp
# Units for AX3 are 1/256 g
# Standard gravity is 9.80665
outvals = [sample.x, sample.y, sample.z]
if self.isAnAx3:
if parameters.standardGravity:
conversionFactor = self.STANDARD_GRAVITY / 256
else:
conversionFactor = 1/256;
for index in range(len(outvals)):
outvals[index] = outvals[index] * conversionFactor
out.write("{}.{},{:.06f},{:.06f},{:.06f}{}".format(
timestamp, fractionTimestamp, outvals[0],
outvals[1], outvals[2], lineEnd))
linesGenerated += 1
if parameters.limit is not None and linesGenerated >= parameters.limit:
return linesGenerated
if linesGenerated % 1000000 == 0 and linesGenerated != 0:
print(f"{linesGenerated} lines of output generated")
else:
print(f"Unrecognized section header, {header}!", file=sys.stderr)
break
header = self.fh.read(2).decode("ISO-8859-1")
return linesGenerated
# Parse the "MD" format file header
def parse_header(self, metadataFilename = None):
blockSize = unpack('H', self.fh.read(2))[0]
performClear = unpack('B', self.fh.read(1))[0]
deviceId = unpack('H', self.fh.read(2))[0]
sessionId = unpack('I', self.fh.read(4))[0]
deviceIdUpper = unpack('H', self.fh.read(2))[0]
if deviceIdUpper != 0xffff:
deviceId |= deviceIdUpper << 16
loggingStartTime = self.fh.read(4)
loggingEndTime = self.fh.read(4)
loggingCapacity = unpack('I', self.fh.read(4))[0]
allowStandby = unpack('B', self.fh.read(1))[0]
debuggingInfo = unpack('B', self.fh.read(1))[0]
batteryMinimumToLog = unpack('H', self.fh.read(2))[0]
batteryWarning = unpack('H', self.fh.read(2))[0]
enableSerial = unpack('B', self.fh.read(1))[0]
lastClearTime = self.fh.read(4)
samplingRate = unpack('B', self.fh.read(1))[0]
lastChangeTime = self.fh.read(4)
firmwareVersion = unpack('B', self.fh.read(1))[0]
reserved = self.fh.read(22)
annotationBlock = self.fh.read(448 + 512)
if len(annotationBlock) < 448 + 512:
annotationBlock = ""
annotation = ""
for x in annotationBlock:
if x != 255 and x != ' ':
if x == '?':
x = '&'
annotation += str(chr(x))
annotation = annotation.strip()
annotationElements = annotation.split('&')
annotationNames = {
# at device set-up time
"_c": "studyCentre",
"_s": "studyCode",
"_i": "investigator",
"_x": "exerciseCode",
"_v": "volunteerNum",
"_p": "bodyLocation",
"_so": "setupOperator",
"_n": "notes",
"_se": "sex",
"_h": "height",
"_w": "weight",
"_ha": "handedness",
"_sc": "subject code",
# at retrieval time
"_b": "startTime",
"_e": "endTime",
"_ro": "recoveryOperator",
"_r": "retrievalTime",
"_co": "comments"}
annotations = dict()
for element in annotationElements:
kv = element.split('=', 2)
annotationName = urldecode(kv[0])
if kv[0] in annotationNames:
annotationName = annotationNames[kv[0]]
annotations[annotationName] = urldecode(kv[1])
for x in ('startTime', 'endTime', 'retrievalTime'):
if x in annotations:
if '/' in annotations[x]:
annotations[x] = time.strptime(annotations[x], '%d/%m/%Y')
else:
annotations[x] = time.strptime(annotations[x], '%Y-%m-%d %H:%M:%S')
self.annotations = annotations
self.deviceId = deviceId
self.sessionId = sessionId
self.lastClearTime = self.read_timestamp(lastClearTime)
self.lastChangeTime = self.read_timestamp(lastChangeTime)
self.firmwareVersion = firmwareVersion if firmwareVersion != 255 else 0
self.loggingStartTime = self.read_timestamp(loggingStartTime)
self.loggingEndTime = self.read_timestamp(loggingEndTime)
if metadataFilename is not None:
with open(metadataFilename, "w") as out:
out.write(f"Device ID: {self.deviceId}\n")
out.write(f"Session ID: {self.sessionId}\n")
out.write(f"Last clear time: {self.lastClearTime}\n")
out.write(f"Last change time: {self.lastChangeTime}\n")
out.write(f"Logging start time: {self.loggingStartTime}\n")
out.write(f"Logging end time: {self.loggingEndTime}\n")
out.write(f"Firmware version: {self.firmwareVersion}\n")
out.write("Annotations\n")
for key in self.annotations.keys():
out.write(f"{key}: {self.annotations.get(key)}\n")
def read_timestamp(self, stamp):
stamp = unpack('I', stamp)[0]
# bit pattern: YYYYYYMM MMDDDDDh hhhhmmmm mmssssss
year = ((stamp >> 26) & 0x3f) + 2000
month = (stamp >> 22) & 0x0f
day = (stamp >> 17) & 0x1f
hours = (stamp >> 12) & 0x1f
mins = (stamp >> 6) & 0x3f
secs = (stamp >> 0) & 0x3f
try:
t = time.strptime(str(datetime(year,
month,
day,
hours,
mins,
secs)), '%Y-%m-%d %H:%M:%S')
except ValueError:
t = None
return t
def main():
if len(sys.argv) < 2:
root = tk.Tk()
root.withdraw()
filePath = filedialog.askopenfilename(
filetypes = [("Continuous Wave Accelerometer (CWA) format",".CWA")])
parameters = Parameters()
else:
parser = argparse.ArgumentParser(description="Convert Continuous Wave Accelerometer format files to CSV")
parser.add_argument("filename",
help="Input filename")
parser.add_argument("--noheader",
help="Suppress headings on columns",
action = "store_true")
parser.add_argument("--verbose",
help="Verbose output", action="store_true")
parser.add_argument("--limit",
help="Stop after this number of output lines",
type=int)
parser.add_argument("--version",
help="Display program version", action="store_true")
parser.add_argument("--linux",
help="Output Linux line endings",
action="store_true")
parser.add_argument("--sg",
help="Use standard gravity for units",
action="store_true")
args = parser.parse_args()
parameters = Parameters(args)
filePath = args.filename
cwa = CWA(filePath)
linesGenerated = cwa(parameters)
print(f"{linesGenerated} lines of output generated")
def cwa(filePath, verbose=False, limit=None, version=False,
linux=False, sg=False, noheader=False, process=True):
""" This is an easy to use entry point for other modules
"""
cwa = CWA(filePath)
parameters = Parameters()
parameters.set(verbose, limit, version, linux, sg, noheader)
if process:
linesGenerated = cwa(parameters)
print(f"{linesGenerated} lines of output generated")
return [ cwa.outputFilename, cwa.metadataFilename ]
if __name__ == "__main__":
main()
|
#
# test_siteconfig.py
#
# Tests for site config generation (siteconfig.py).
#
# Copyright (C) 2018-2021 by
# Nikhil Ramakrishnan.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""Docwriter site config tests.
This module tests the validity of the `yml` configuration
generated by `siteconfig.py`.
"""
import os
import yaml
from docwriter import siteconfig
from docwriter import utils
config = siteconfig.SiteConfig()
# Config vars
site_name = "Foo Bar Test"
site_description = "Test documentation for Foo Bar."
site_author = "Pytest"
toc_filename = "foo-toc.md"
index_filename = "foo-index.md"
# Add chapters and pagess
c1_sec = ["c1s1", "c1s2", "c1s3"]
c2_sec = ["c2s1", "c2s2"]
pages = {}
pages['chap1'] = c1_sec
pages['chap2'] = c2_sec
def test_config( tmpdir, caplog ):
utils.output_dir = str( tmpdir )
# Set site config
config.set_site_info( site_name, site_description,
site_author )
# Add toc and index
config.add_single_page( "TOC", toc_filename )
config.add_single_page( "Index", index_filename )
# Add chapters and pages
for chap, parts in pages.items():
config.start_chapter( chap )
for sec in parts:
config.add_chapter_page( sec, sec + ".md" )
config.end_chapter()
# Done, Build config
config.build_config()
# Open file and parse yml
filepath = os.path.join( str( tmpdir ), 'mkdocs.yml' )
result = open( filepath, 'rb' ).read()
data = yaml.safe_load(result)
# Assertions
assert data is not None
for record in caplog.records:
# Strict build - there should be no warnings
assert record.levelname != 'WARNING'
|
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" A very simple MNIST inferencer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import caffe
from uai.arch.caffe_model import CaffeAiUcloudModel
class MnistModel(CaffeAiUcloudModel):
""" Mnist example model
"""
def __init__(self, conf):
super(MnistModel, self).__init__(conf)
def load_model(self):
self.model = caffe.Net(self.model_arch_file, self.model_weight_file, caffe.TEST)
def execute(self, data, batch_size):
ret = []
for i in range(batch_size):
transformer = caffe.io.Transformer({'data': self.model.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_raw_scale('data', 255)
im = caffe.io.load_image(data[i], color=False)
self.model.blobs['data'].data[...] = transformer.preprocess('data', im)
self.model.forward()
prob = self.model.blobs['prob'].data[0].flatten()
ret_val = str(prob.argsort()[-1]) + '\n'
ret.append(ret_val)
return ret
|
from __future__ import annotations
import abc
import datetime
import typing
import pydantic
from src import core
__all__ = ("Todo",)
class Todo(pydantic.BaseModel, abc.ABC):
advance_days: int
category: core.TodoCategory
date_added: datetime.date
date_completed: typing.Optional[datetime.date]
description: str
note: str
start_date: typing.Optional[datetime.date]
todo_id: int
user_id: int
class Config:
allow_mutation = False
anystr_strip_whitespace = True
def display(self, /, today: typing.Optional[datetime.date] = None) -> bool:
if today is None:
today = datetime.date.today()
current_date = self.current_date(today)
current_advance_date = current_date - datetime.timedelta(days=self.advance_days)
if self.date_completed and self.date_completed >= current_advance_date: # noqa
return False
elif today >= current_advance_date:
return True
else:
return False
@abc.abstractmethod
def current_date(
self, /, today: datetime.date = datetime.date.today()
) -> datetime.date:
raise NotImplementedError
def days_until(self, /, today: typing.Optional[datetime.date] = None) -> int:
if today is None:
today = datetime.date.today()
return (self.current_date(today) - today).days
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import re
from mpi4py import MPI
from pyfr.bases import BaseBasis
from pyfr.inifile import Inifile
from pyfr.util import proxylist, subclass_map
class BaseSystem(object):
__metaclass__ = ABCMeta
# Relevant derived classes
elementscls = None
intinterscls = None
mpiinterscls = None
bbcinterscls = None
# Number of queues to allocate
_nqueues = None
def __init__(self, backend, rallocs, mesh, initsoln, nreg, cfg):
self._backend = backend
self._cfg = cfg
self._nreg = nreg
# Load the elements and interfaces from the mesh
self._load_eles(rallocs, mesh, initsoln)
self._load_int_inters(rallocs, mesh)
self._load_mpi_inters(rallocs, mesh)
self._load_bc_inters(rallocs, mesh)
# Prepare the queues and kernels
self._gen_queues()
self._gen_kernels()
def _load_eles(self, rallocs, mesh, initsoln):
basismap = subclass_map(BaseBasis, 'name')
# Look for and load each element type from the mesh
self._elemaps = elemaps = OrderedDict()
for bname, bcls in basismap.iteritems():
mk = 'spt_%s_p%d' % (bname, rallocs.prank)
if mk in mesh:
elemaps[bname] = self.elementscls(bcls, mesh[mk], self._cfg)
# Construct a proxylist to simplify collective operations
self._eles = eles = proxylist(elemaps.values())
# Set the initial conditions either from a pyfrs file or from
# explicit expressions in the config file
if initsoln:
# Load the config used to produce the solution
solncfg = Inifile(initsoln['config'].item())
# Process the solution
for k, ele in elemaps.iteritems():
soln = initsoln['soln_%s_p%d' % (k, rallocs.prank)]
ele.set_ics_from_soln(soln, solncfg)
else:
eles.set_ics_from_cfg()
# Allocate these elements on the backend
eles.set_backend(self._backend, self._nreg)
def _load_int_inters(self, rallocs, mesh):
lhs, rhs = mesh['con_p%d' % rallocs.prank]
int_inters = self.intinterscls(self._backend, lhs, rhs, self._elemaps,
self._cfg)
# Although we only have a single internal interfaces instance
# we wrap it in a proxylist for consistency
self._int_inters = proxylist([int_inters])
def _load_mpi_inters(self, rallocs, mesh):
lhsprank = rallocs.prank
self._mpi_inters = proxylist([])
for rhsprank in rallocs.prankconn[lhsprank]:
rhsmrank = rallocs.pmrankmap[rhsprank]
interarr = mesh['con_p%dp%d' % (lhsprank, rhsprank)]
mpiiface = self.mpiinterscls(self._backend, interarr, rhsmrank,
rallocs, self._elemaps, self._cfg)
self._mpi_inters.append(mpiiface)
def _load_bc_inters(self, rallocs, mesh):
bcmap = subclass_map(self.bbcinterscls, 'type')
self._bc_inters = proxylist([])
for f in mesh:
m = re.match('bcon_(.+?)_p%d' % rallocs.prank, f)
if m:
# Get the region name
rgn = m.group(1)
# Determine the config file section
cfgsect = 'soln-bcs-%s' % rgn
# Instantiate
bcclass = bcmap[self._cfg.get(cfgsect, 'type')]
bciface = bcclass(self._backend, mesh[f], self._elemaps,
cfgsect, self._cfg)
self._bc_inters.append(bciface)
def _gen_queues(self):
self._queues = [self._backend.queue() for i in xrange(self._nqueues)]
@abstractmethod
def _gen_kernels(self):
pass
@abstractmethod
def _get_negdivf(self):
pass
def __call__(self, uinbank, foutbank):
# Set the banks to use for each element type
self._eles.scal_upts_inb.active = uinbank
self._eles.scal_upts_outb.active = foutbank
# Delegate to our subclass
self._get_negdivf()
# Wait for all ranks to finish
MPI.COMM_WORLD.barrier()
@property
def ele_banks(self):
return [list(b) for b in self._eles.scal_upts_inb]
@property
def ele_types(self):
return list(self._elemaps)
@property
def ele_shapes(self):
return [(e.nupts, e.neles, e.nvars) for e in self._eles]
@property
def ele_ndofs(self):
return [e.neles*e.nupts*e.nvars for e in self._eles]
def ele_scal_upts(self, idx):
return list(self._eles.get_scal_upts_mat(idx))
|
# REMINDER: this code needs to be rewritten for the new framework. Remove this comment when the code is fully converted.
import os
import transformers
from lm_eval.base import LM
from lm_eval import utils
class GPT3LM(LM):
MAX_LENGTH = 2048
def __init__(self, engine, truncate=False):
"""
:param engine: str
OpenAI API engine (e.g. davinci)
:param truncate: bool
Truncate input if too long (if False and input is too long, throw error)
"""
import openai
self.engine = engine
self.tokenizer = transformers.GPT2TokenizerFast.from_pretrained('gpt2')
self.truncate = truncate
# Read from environment variable OPENAI_API_SECRET_KEY
openai.api_key = os.environ["OPENAI_API_SECRET_KEY"]
@classmethod
def create_from_arg_string(cls, arg_string):
args = utils.simple_parse_args_string(arg_string)
return cls(engine=args.get("engine", "davinci"))
def loglikelihood(self, context, continuation):
# TODO: implement new framework
import openai
context_enc = self.tokenizer.encode(context)
continuation_enc = self.tokenizer.encode(continuation)
inp = (context_enc + continuation_enc)[-1024:]
ctxlen = len(context_enc) - max(0, len(context_enc) + len(continuation_enc) - 1024)
response = openai.Completion.create(
engine=self.engine,
prompt=inp,
echo=True,
max_tokens=0, temperature=0.0,
logprobs=0,
)
logprobs = response.choices[0]["logprobs"]["token_logprobs"]
continuation_logprobs = logprobs[ctxlen:]
return sum(continuation_logprobs)
|
import os
import nback.lib.db.mongodb as mongodb_dump
import nback.lib.db.mysql as mysql_dump
import nback.lib.db.postgresql as postgresql_dump
import nback.lib.logger as logger
import nback.lib.notification as notification
import nback.lib.utils as utils
import nback.lib.storage.aws_s3 as aws_s3
import nback.settings as settings
class Backup(logger.Logger):
# TODO(niklas9):
# * need to add better error handling, notification should run even
# though script crashes at some point (upload, db dump, .. etc)
# * remove usage of os.system and use subprocess instead
log_file = settings.BACKUP_LOG_FILE
ref_class = 'nback'
filename = None
filesize = None
dbs = None
FILENAME_FMT = '%s-%s.tar.%s'
NOTI_TIMESTAMP_FMT = '%Y-%m-%d %H:%M'
TAR_BIN = '/bin/tar'
GZIP_SUFFIX = 'gz'
BZIP2_SUFFIX = 'bz2'
def __init__(self, *args, **kwargs):
logger.Logger.__init__(self, *args, **kwargs)
self.filename = self.gen_filename()
self.log.debug('backup filename set to <%s>' % self.filename)
# init dbs
self.dbs = []
if settings.USE_MONGODB:
self.dbs.append(mongodb_dump.MongoDBDump())
if settings.USE_MYSQL:
self.dbs.append(mysql_dump.MySQLDump())
if settings.USE_POSTGRESQL:
self.dbs.append(postgresql_dump.PostgreSQLDump())
def dump_dbs(self):
if len(self.dbs) == 0:
self.log.warn('no databases setup to run, skipping..')
return
for db in self.dbs:
db.dump()
def tar_files(self):
# TODO(nandersson):
# * add support for xz compression
# * refactor this method, magic variables, more helper methods etc
self.log.info('taring and compressing files...')
self.log.debug('compressing with <%s>' %
settings.BACKUP_COMPRESSION_ALGO)
tar_arg_comp = 'z'
if settings.BACKUP_COMPRESSION_ALGO == self.BZIP2_SUFFIX:
tar_arg_comp = 'j'
tar_args = '-c%sf' % tar_arg_comp
cmd_raw = '%s %s %s'
if settings.BACKUP_TAR_IGNORE_FAILED_READ:
cmd_raw = '%s --ignore-failed-read %s %s'
if len(settings.BACKUP_DIRS_EXCLUDE) > 0:
for ed in settings.BACKUP_DIRS_EXCLUDE:
cmd_raw += ' --exclude=\'%s\'' % ed
cmd = cmd_raw % (self.TAR_BIN, tar_args, self.filename)
dirs = []
for d in settings.BACKUP_DIRS:
self.log.debug('adding dir <%s>..' % d)
dirs.append(d)
for db in self.dbs:
self.log.debug('adding db dir <%s>..' % db.get_tmp_dir())
dirs.append(db.get_tmp_dir())
# put cmd string together with dirs
for d in dirs: cmd += ' %s' % d
if len(dirs) == 0:
self.log.warn('no dirs to backup, proceeding..')
return
self.log.debug('executing cmd <%s>..' % cmd)
os.system(cmd)
self.log.debug('cmd complete')
self.filesize = utils.file_size_fmt(os.path.getsize(self.filename))
self.log.debug('<%s> saved compressed, <%s>' % (self.filename,
self.filesize))
def upload_and_sync(self):
# TODO(niklas9):
# * add base class wrapper for storage, just straight to AWS S3 for now
self.log.info('uploading...')
s3 = aws_s3.AWSS3(settings.AWS_BUCKET, settings.AWS_ACCESS_KEY_ID,
settings.AWS_SECRET_ACCESS_KEY)
s3.connect()
s3.upload(self.filename)
self.log.info('syncing...')
s3.sync(self.filename)
s3.disconnect()
def gen_filename(self):
return self.FILENAME_FMT % (settings.BACKUP_SERVER_NAME,
utils.get_timestamp(),
settings.BACKUP_COMPRESSION_ALGO)
def cleanup(self):
self.log.info('cleaning up...')
for db in self.dbs:
db.cleanup()
os.remove(self.filename)
self.log.debug('tmp files removed')
def send_notifications(self):
# TODO(niklas9):
# * make all magic values here part of settings or smth
timestamp = utils.get_timestamp(fmt=self.NOTI_TIMESTAMP_FMT)
subject = 'Backup %s successful <%s>' % (settings.BACKUP_SERVER_NAME,
timestamp)
body = ('%s\n\n%s\nTotal filesize: %s\nDatabases: %d'
% (subject, self.filename, self.filesize,
len(settings.BACKUP_MYSQL_DBS)))
for email in settings.EMAIL_CONTACTS:
self.log.debug('sending notification to <%s>..' % email)
notification.Email.send(email, subject, body)
self.log.debug('all notifications sent')
|
from .swag_tr import train_swag
from .sgld_tr import train_sgld
from .gp_tr import train_gp
from .bbp_tr import train_bbp
from .dun_tr import train_dun |
""" A collection of timeseries pipelines """
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class AlignPandas(BaseEstimator, TransformerMixin):
def __init__(self, LAGS, HORIZIONS):
self.max_lag = max(LAGS)
self.max_horizion = max(HORIZIONS)
def fit(self, x, y=None):
return self
def transform(self, x):
# catching the edge case with a hack!
if self.max_horizion == 0:
self.max_horizion = 1
if isinstance(x, pd.Series):
return x.iloc[self.max_lag:-self.max_horizion]
elif isinstance(x, pd.DataFrame):
return x.iloc[self.max_lag:-self.max_horizion, :]
else:
raise ValueError('The AlignPandas pipeline uses pandas objects')
class Align(BaseEstimator, TransformerMixin):
def __init__(self, LAGS, HORIZIONS):
self.max_lag = max(LAGS)
self.max_horizion = max(HORIZIONS)
def fit(self, x, y=None):
return self
def transform(self, x):
# catching the edge case with a hack!
if self.max_horizion == 0:
self.max_horizion = 1
return x[self.max_lag:-self.max_horizion]
class HourlyCyclicalFeatures(BaseEstimator, TransformerMixin):
"""
args
"""
hours_in_day = 24
def __init__(self):
pass
def fit(self, x, y=None):
return self
def transform(self, x):
h = x.index.hour
sin = np.sin(2 * np.pi * h / self.hours_in_day)
cos = np.cos(2 * np.pi * h / self.hours_in_day)
out = pd.DataFrame(index=x.index)
out.loc[:, 'sin_h'] = sin
out.loc[:, 'cos_h'] = cos
return out
class HalfHourlyCyclicalFeatures(BaseEstimator, TransformerMixin):
"""
args
"""
hours_in_day = 24
def __init__(self):
pass
def fit(self, x, y=None):
return self
def transform(self, x):
hh = x.index.hour + (x.index.minute / 60)
sin = np.sin(2 * np.pi * hh / self.hours_in_day)
cos = np.cos(2 * np.pi * hh / self.hours_in_day)
out = pd.DataFrame(index=x.index)
out.loc[:, 'sin_hh'] = sin
out.loc[:, 'cos_hh'] = cos
return out
class CyclicalFeatures(BaseEstimator, TransformerMixin):
def __init__(self, max_value):
self.max_value = max_value
def fit(self, x, y=None):
return self
def transform(self, x):
sin = np.sin(2 * np.pi * x / self.max_value)
cos = np.cos(2 * np.pi * x / self.max_value)
out = pd.DataFrame(index=x.index)
out.loc[:, 'sin_hh'] = sin
out.loc[:, 'cos_hh'] = cos
return out
class RollingSum(BaseEstimator, TransformerMixin):
"""
A rolling sum excluding the current observation
nan values that occur at the top of the transformation are
filled with 0
"""
def __init__(self, window):
self.window = window
def fit(self, x, y=None):
return self
def transform(self, x):
return x.shift(1).rolling(
window=self.window,
min_periods=1
).sum().fillna(0)
class WeekendDummy(BaseEstimator, TransformerMixin):
"""
Creates a dummy column indicating weekday or weekend
"""
# the threshold for the .weekday method on a pandas dt index
thresh = 5
def __init__(self):
pass
def fit(self, x, y=None):
return self
def transform(self, x):
"""
create a column of zeros, fill in the weekends using boolean indexing
"""
dummies = pd.DataFrame(np.zeros(x.shape[0]),
index=x.index,
columns=['Weekday/Weekend Dummy'])
dummies.loc[x.index.weekday >= self.thresh, 'Weekday/Weekend Dummy'] = 1
return dummies
|
import json
def create_user(uid, fname, lname, major, classes, redis_client, isTutor, schedule):
redis_client.hset("user{}".format(uid), key="fname", value=fname)
redis_client.hset("user{}".format(uid), key="lname", value=lname)
redis_client.hset("user{}".format(uid), key="major", value=major)
redis_client.hset("user{}".format(uid), key="isTutor", value=isTutor)
for time in create_schedule(schedule):
redis_client.rpush("schedule{}".format(uid), time)
redis_client.delete("classes{}".format(uid))
for c in classes:
redis_client.rpush("classes{}".format(uid), c)
def test_search(client, app):
redis_client = app.config['RDSCXN']
url = 'api/search/get'
create_user(11, 'John', 'Doe', 'Computer Science', ['CS 180', 'CS 111'], redis_client, '1', [1, 15, 37])
create_user(12, 'Jill', 'Doe', 'Biology', ['Bio 121', 'Bio 35', 'Bio 173'], redis_client, '1', [1, 22, 24])
create_user(13, 'JDoNot', 'Show', 'Biology', ['Bio 35', 'CS 111'], redis_client, '0', [1, 22, 27])
search_result({'name': 'J', 'classes': [], 'bytes': create_schedule([])}, [11, 12], client, url)
search_result({'name': 'Jill', 'classes': ['CS 180'], 'bytes': create_schedule([])}, [11, 12], client, url)
search_result({'name': 'JD', 'classes': ['CS 111'], 'bytes': create_schedule([])}, [11], client, url)
search_result({'name': 'JD', 'classes': ['CS 111'], 'bytes': create_schedule([1])}, [11, 12], client, url)
search_result({'name': '', 'classes': ['CS 111'], 'bytes': create_schedule([1])}, [11, 12], client, url)
search_result({'name': '', 'classes': [], 'bytes': create_schedule([15, 22])}, [11, 12], client, url)
search_result({'name': '', 'classes': [], 'bytes': create_schedule([])}, [], client, url)
search_result({'name': 'Garbage Data', 'classes': ['Unknown classes'], 'bytes': create_schedule([13, 27])}, [], client, url)
search_result({'name': 'jill', 'classes': [], 'bytes': create_schedule([])}, [12], client, url)
def search_result(values, expected, client, url):
search1 = json.dumps(values)
search1_response = client.post(url, headers={'Content-Type': 'application/json'}, data=search1)
search1_response_json = search1_response.json
assert search1_response.status_code == 200
assert not search1_response_json['error'], print(search1_response_json['errMsg'])
assert search1_response_json['payload'] == expected
def create_schedule(sched_list):
schedule = [0]*42
for i in range(42):
if i in sched_list:
schedule[i] = 1
return schedule
|
from django.db import transaction
from drf_spectacular.openapi import OpenApiParameter, OpenApiTypes
from drf_spectacular.utils import extend_schema
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from baserow.api.applications.errors import ERROR_APPLICATION_DOES_NOT_EXIST
from baserow.api.decorators import validate_body, map_exceptions
from baserow.api.errors import ERROR_USER_NOT_IN_GROUP
from baserow.api.schemas import get_error_schema
from baserow.contrib.database.api.fields.errors import (
ERROR_MAX_FIELD_COUNT_EXCEEDED,
ERROR_MAX_FIELD_NAME_LENGTH_EXCEEDED,
ERROR_RESERVED_BASEROW_FIELD_NAME,
ERROR_INVALID_BASEROW_FIELD_NAME,
)
from baserow.contrib.database.fields.exceptions import (
MaxFieldLimitExceeded,
MaxFieldNameLengthExceeded,
ReservedBaserowFieldNameException,
InvalidBaserowFieldName,
)
from baserow.contrib.database.models import Database
from baserow.contrib.database.table.exceptions import (
TableDoesNotExist,
TableNotInDatabase,
InvalidInitialTableData,
InitialTableDataLimitExceeded,
InitialTableDataDuplicateName,
)
from baserow.contrib.database.table.handler import TableHandler
from baserow.contrib.database.table.models import Table
from baserow.core.exceptions import UserNotInGroup, ApplicationDoesNotExist
from baserow.core.handler import CoreHandler
from .errors import (
ERROR_TABLE_DOES_NOT_EXIST,
ERROR_TABLE_NOT_IN_DATABASE,
ERROR_INVALID_INITIAL_TABLE_DATA,
ERROR_INITIAL_TABLE_DATA_LIMIT_EXCEEDED,
ERROR_INITIAL_TABLE_DATA_HAS_DUPLICATE_NAMES,
)
from .serializers import (
TableSerializer,
TableCreateSerializer,
TableUpdateSerializer,
OrderTablesSerializer,
)
class TablesView(APIView):
permission_classes = (IsAuthenticated,)
@extend_schema(
parameters=[
OpenApiParameter(
name="database_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Returns only tables that are related to the provided "
"value.",
)
],
tags=["Database tables"],
operation_id="list_database_tables",
description=(
"Lists all the tables that are in the database related to the "
"`database_id` parameter if the user has access to the database's group. "
"A table is exactly as the name suggests. It can hold multiple fields, "
"each having their own type and multiple rows. They can be added via the "
"**create_database_table_field** and **create_database_table_row** "
"endpoints."
),
responses={
200: TableSerializer(many=True),
400: get_error_schema(["ERROR_USER_NOT_IN_GROUP"]),
404: get_error_schema(["ERROR_APPLICATION_DOES_NOT_EXIST"]),
},
)
@map_exceptions(
{
ApplicationDoesNotExist: ERROR_APPLICATION_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
}
)
def get(self, request, database_id):
"""Lists all the tables of a database."""
database = CoreHandler().get_application(
database_id, base_queryset=Database.objects
)
database.group.has_user(request.user, raise_error=True)
tables = Table.objects.filter(database=database)
serializer = TableSerializer(tables, many=True)
return Response(serializer.data)
@extend_schema(
parameters=[
OpenApiParameter(
name="database_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Creates a table for the database related to the provided "
"value.",
)
],
tags=["Database tables"],
operation_id="create_database_table",
description=(
"Creates a new table for the database related to the provided "
"`database_id` parameter if the authorized user has access to the "
"database's group."
),
request=TableCreateSerializer,
responses={
200: TableSerializer,
400: get_error_schema(
[
"ERROR_USER_NOT_IN_GROUP",
"ERROR_REQUEST_BODY_VALIDATION",
"ERROR_INVALID_INITIAL_TABLE_DATA",
"ERROR_INITIAL_TABLE_DATA_LIMIT_EXCEEDED",
"ERROR_RESERVED_BASEROW_FIELD_NAME",
"ERROR_INITIAL_TABLE_DATA_HAS_DUPLICATE_NAMES",
"ERROR_INVALID_BASEROW_FIELD_NAME",
]
),
404: get_error_schema(["ERROR_APPLICATION_DOES_NOT_EXIST"]),
},
)
@transaction.atomic
@map_exceptions(
{
ApplicationDoesNotExist: ERROR_APPLICATION_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
InvalidInitialTableData: ERROR_INVALID_INITIAL_TABLE_DATA,
InitialTableDataLimitExceeded: ERROR_INITIAL_TABLE_DATA_LIMIT_EXCEEDED,
MaxFieldLimitExceeded: ERROR_MAX_FIELD_COUNT_EXCEEDED,
MaxFieldNameLengthExceeded: ERROR_MAX_FIELD_NAME_LENGTH_EXCEEDED,
InitialTableDataDuplicateName: ERROR_INITIAL_TABLE_DATA_HAS_DUPLICATE_NAMES,
ReservedBaserowFieldNameException: ERROR_RESERVED_BASEROW_FIELD_NAME,
InvalidBaserowFieldName: ERROR_INVALID_BASEROW_FIELD_NAME,
}
)
@validate_body(TableCreateSerializer)
def post(self, request, data, database_id):
"""Creates a new table in a database."""
database = CoreHandler().get_application(
database_id, base_queryset=Database.objects
)
table = TableHandler().create_table(
request.user, database, fill_example=True, **data
)
serializer = TableSerializer(table)
return Response(serializer.data)
class TableView(APIView):
permission_classes = (IsAuthenticated,)
@extend_schema(
parameters=[
OpenApiParameter(
name="table_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Returns the table related to the provided value.",
)
],
tags=["Database tables"],
operation_id="get_database_table",
description=(
"Returns the requested table if the authorized user has access to the "
"related database's group."
),
responses={
200: TableSerializer,
400: get_error_schema(["ERROR_USER_NOT_IN_GROUP"]),
404: get_error_schema(["ERROR_TABLE_DOES_NOT_EXIST"]),
},
)
@map_exceptions(
{
TableDoesNotExist: ERROR_TABLE_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
}
)
def get(self, request, table_id):
"""Responds with a serialized table instance."""
table = TableHandler().get_table(table_id)
table.database.group.has_user(request.user, raise_error=True)
serializer = TableSerializer(table)
return Response(serializer.data)
@extend_schema(
parameters=[
OpenApiParameter(
name="table_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Updates the table related to the provided value.",
)
],
tags=["Database tables"],
operation_id="update_database_table",
description=(
"Updates the existing table if the authorized user has access to the "
"related database's group."
),
request=TableUpdateSerializer,
responses={
200: TableSerializer,
400: get_error_schema(
["ERROR_USER_NOT_IN_GROUP", "ERROR_REQUEST_BODY_VALIDATION"]
),
404: get_error_schema(["ERROR_TABLE_DOES_NOT_EXIST"]),
},
)
@transaction.atomic
@map_exceptions(
{
TableDoesNotExist: ERROR_TABLE_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
}
)
@validate_body(TableUpdateSerializer)
def patch(self, request, data, table_id):
"""Updates the values a table instance."""
table = TableHandler().update_table(
request.user,
TableHandler().get_table(
table_id,
base_queryset=Table.objects.select_for_update(),
),
name=data["name"],
)
serializer = TableSerializer(table)
return Response(serializer.data)
@extend_schema(
parameters=[
OpenApiParameter(
name="table_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Deletes the table related to the provided value.",
)
],
tags=["Database tables"],
operation_id="delete_database_table",
description=(
"Deletes the existing table if the authorized user has access to the "
"related database's group."
),
responses={
204: None,
400: get_error_schema(["ERROR_USER_NOT_IN_GROUP"]),
404: get_error_schema(["ERROR_TABLE_DOES_NOT_EXIST"]),
},
)
@transaction.atomic
@map_exceptions(
{
TableDoesNotExist: ERROR_TABLE_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
}
)
def delete(self, request, table_id):
"""Deletes an existing table."""
TableHandler().delete_table(request.user, TableHandler().get_table(table_id))
return Response(status=204)
class OrderTablesView(APIView):
permission_classes = (IsAuthenticated,)
@extend_schema(
parameters=[
OpenApiParameter(
name="database_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Updates the order of the tables in the database related "
"to the provided value.",
),
],
tags=["Database tables"],
operation_id="order_database_tables",
description=(
"Changes the order of the provided table ids to the matching position that "
"the id has in the list. If the authorized user does not belong to the "
"group it will be ignored. The order of the not provided tables will be "
"set to `0`."
),
request=OrderTablesSerializer,
responses={
204: None,
400: get_error_schema(
["ERROR_USER_NOT_IN_GROUP", "ERROR_TABLE_NOT_IN_DATABASE"]
),
404: get_error_schema(["ERROR_APPLICATION_DOES_NOT_EXIST"]),
},
)
@validate_body(OrderTablesSerializer)
@transaction.atomic
@map_exceptions(
{
ApplicationDoesNotExist: ERROR_APPLICATION_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
TableNotInDatabase: ERROR_TABLE_NOT_IN_DATABASE,
}
)
def post(self, request, data, database_id):
"""Updates to order of the tables in a table."""
database = CoreHandler().get_application(
database_id, base_queryset=Database.objects
)
TableHandler().order_tables(request.user, database, data["table_ids"])
return Response(status=204)
|
from estado import *
'''
Created on 12/09/2013
@author: Ronaldo
'''
class turing():
def __init__(self,alfabeto,estados,est_inicial,alfabeto_aux,simb_inicio,simb_branco):
self.alfabeto = alfabeto
self.estados = estados
self.estados.append(estado('?'))
self.estado_inicial = est_inicial
self.alfabeto_aux = alfabeto_aux
self.simb_inicio = simb_inicio
self.simb_branco = simb_branco
self.fita = []
self.cabeca = 0
self.estado_atual = self.estado_inicial
def aceita(self,cadeia,passos=True):
self.cabeca = 0
self.fita = self.simb_inicio + cadeia + self.simb_branco
self.estado_atual = self.estado_inicial
if passos:
print("---- Cadeia "+cadeia+" ----")
while 0 <= self.cabeca < len(self.fita):
saida = self.programa()
if passos:
print(saida)
if passos:
if self.get_estado(self.estado_atual).is_final:
print("Cadeia '"+cadeia+"' aceita!")
else:
print("Cadeia '"+cadeia+"' rejeitada!")
print("-----------------"+("-"*len(cadeia)))
return self.fita[1:-1]
def programa(self):
saida = "programa("+self.estado_atual+","+self.fita[self.cabeca]+") = "
transicao = self.get_estado(self.estado_atual).get_transicao(self.fita[self.cabeca])
# Ve se simbolo esta em alfabeto
if not self.checar_simbolo(self.alfabeto,self.fita[self.cabeca]):
print(self.fita[self.cabeca]+" nao pertence ao alfabeto!")
self.cabeca = len(self.fita)
#
if transicao is not None:
# Se houver algo para escrever, escreve.
if transicao[1] is not None:
if not self.checar_simbolo(self.alfabeto_aux,transicao[1]):
print(self.fita[self.cabeca]+" nao pertence ao alfabeto!")
self.cabeca = len(self.fita)
aux = list(self.fita)
aux.pop(self.cabeca)
aux.insert(self.cabeca,transicao[1])
self.fita = ''.join(aux)
#
# Ve para qual lado mover a cabeca.
if transicao[2]:
self.cabeca += 1
else:
self.cabeca -= 1
#
# Procura e faz a transicao
if transicao[3] is not None:
self.estado_atual = transicao[3]
else:
self.estado_atual = '?'
#
else:
self.cabeca = len(self.fita)
self.estado_atual = '?'
saida += self.estado_atual
return saida
def get_estado(self,nome):
for estado in self.estados:
if estado.get_nome() is nome:
return estado
break
return None
def checar_simbolo(self,alfabeto,simbolo):
if simbolo is self.simb_inicio or self.simb_branco:
return True
for i in alfabeto:
if i is simbolo:
return True
return False |
import numpy as np
from deformations.utility.mesh3d import mesh3d
from deformations.utility.bernstein import get_bernstein_polynomial
def get_min_max(x, *args, **kwargs):
return np.min(x, *args, **kwargs), np.max(x, *args, **kwargs)
def stu_to_xyz(stu_points, stu_origin, stu_axes):
return stu_origin + stu_points*stu_axes
def get_stu_control_points(dims):
stu_lattice = mesh3d(*(np.linspace(0, 1, d+1) for d in dims),
dtype=np.float32)
stu_points = np.reshape(stu_lattice, (-1, 3))
return stu_points
def get_control_points(dims, stu_origin, stu_axes):
stu_points = get_stu_control_points(dims)
xyz_points = stu_to_xyz(stu_points, stu_origin, stu_axes)
return xyz_points
def xyz_to_stu(xyz, origin, stu_axes):
if stu_axes.shape == (3,):
stu_axes = np.diag(stu_axes)
assert(stu_axes.shape == (3, 3))
s, t, u = stu_axes
tu = np.cross(t, u)
su = np.cross(s, u)
st = np.cross(s, t)
diff = xyz - origin
stu = np.stack([
np.dot(diff, tu) / np.dot(s, tu),
np.dot(diff, su) / np.dot(t, su),
np.dot(diff, st) / np.dot(u, st)
], axis=-1)
return stu
def get_stu_params(xyz):
minimum, maximum = get_min_max(xyz, axis=0)
stu_origin = minimum
stu_axes = maximum - minimum
return stu_origin, stu_axes
def get_stu_deformation_matrix(stu, dims):
"""v is a matrix of shape(l+1, m+1, n+1, 3)
with all possible i, j and k combinations and 3
as 3 dimensions wrt to stu"""
v = mesh3d(*(np.arange(0, d+1, dtype=np.int32) for d in dims),
dtype=np.int32)
v = np.reshape(v, (-1, 3))
weights = get_bernstein_polynomial(n=np.array(dims, dtype=np.int32),
v=v,
x=np.expand_dims(stu, axis=-2))
b = np.prod(weights, axis=-1)
return b
def get_deformation_matrix(xyz, dims, stu_origin, stu_axis):
stu = xyz_to_stu(xyz, stu_origin, stu_axis)
return get_stu_deformation_matrix(stu, dims)
def get_ffd(xyz, dims, stu_origin=None, stu_axis=None):
if stu_origin is None or stu_axis is None:
print("Generating origin and axis")
stu_origin, stu_axis = get_stu_params(xyz)
b = get_deformation_matrix(xyz, dims, stu_origin, stu_axis)
p = get_control_points(dims, stu_origin, stu_axis)
return b, p
|
import os
import flask
from pyravin.google.oauth import OAuth
from pyravin.google.calendar import Calendar
from pyravin.google.constant import Scope
app = flask.Flask(__name__)
# Note: A secret key is included in the sample so that it works.
# If you use this code in your application, replace this with a truly secret
# key. See https://flask.palletsprojects.com/quickstart/#sessions.
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
configs = {
"web": {
"client_id":"22222222222.apps.googleusercontent.com",
"project_id":"pyraven",
"auth_uri":"https://accounts.google.com/o/oauth2/auth",
"token_uri":"https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs",
"client_secret":"4444444444444",
"redirect_uris":[
"https://33333.ngrok.io",
"http://4444.ngrok.io",
"https://3334433.ngrok.io/oauth2callback",
"http://444444.ngrok.io/oauth2callback"
]
}
}
scopes = [Scope.OPENID, Scope.USERINFO_EMAIL, Scope.USERINFO_PROFILE, Scope.CALENDAR]
oauth = OAuth(configs, scopes)
def print_index_table():
return ('<table>' +
'<tr><td><a href="/test">Test an API request</a></td>' +
'<tr><td><a href="/get-calendar-events">Get Calendar Events</a></td>' +
'<tr><td><a href="/create-calendar-event">Create Calendar Event</a></td>' +
'<td>Submit an API request and see a formatted JSON response. ' +
' Go through the authorization flow if there are no stored ' +
' credentials for the user.</td></tr>' +
'<tr><td><a href="/authorize">Test the auth flow directly</a></td>' +
'<td>Go directly to the authorization flow. If there are stored ' +
' credentials, you still might not be prompted to reauthorize ' +
' the application.</td></tr>' +
'<tr><td><a href="/revoke">Revoke current credentials</a></td>' +
'<td>Revoke the access token associated with the current user ' +
' session. After revoking credentials, if you go to the test ' +
' page, you should see an <code>invalid_grant</code> error.' +
'</td></tr>' +
'<tr><td><a href="/clear">Clear Flask session credentials</a></td>' +
'<td>Clear the access token currently stored in the user session. ' +
' After clearing the token, if you <a href="/test">test the ' +
' API request</a> again, you should go back to the auth flow.' +
'</td></tr></table>')
@app.route('/')
def index():
return print_index_table()
@app.route('/test')
def test_api_request():
if 'credentials' not in flask.session:
return flask.redirect('authorize')
oauth.set_credentials(flask.session['credentials'])
userinfo = oauth.get_user_info()
flask.session['credentials'] = oauth.get_credentials()
return flask.jsonify(**userinfo)
@app.route('/get-calendar-events')
def test_get_calendar_events():
if 'credentials' not in flask.session:
return flask.redirect('authorize')
calendar = Calendar()
calendar.set_credentials(flask.session['credentials'])
events = calendar.get_events()
flask.session['credentials'] = calendar.get_credentials()
return flask.jsonify(**{"events": events})
@app.route('/create-calendar-event')
def test_create_calendar_event():
if 'credentials' not in flask.session:
return flask.redirect('authorize')
calendar = Calendar()
calendar.set_credentials(flask.session['credentials'])
event = calendar.create_event("primary", {
"sendUpdates": "all",
# Create google meet link
"conferenceDataVersion": 1,
"body": {
"end": {
"dateTime": "2020-12-05T20:00:00-07:00",
"timeZone": "America/Los_Angeles"
},
"start": {
"dateTime": "2020-12-05T18:00:00-07:00",
"timeZone": "America/Los_Angeles"
},
"attendees": [
{
"email": "test@clivern.com"
},
{
"email": "test@gmail.com"
},
{
"email": "test@outlook.com"
}
],
"description": "A chance to hear more about Google's developer products.",
"summary": "Google I/O 2020",
"location": "800 Howard St., San Francisco, CA 94103",
# Create google meet link
"conferenceData": {
"createRequest": {
"conferenceSolutionKey": {
"type": "hangoutsMeet"
},
"requestId": "dghwtet344"
}
}
}
})
flask.session['credentials'] = calendar.get_credentials()
return flask.jsonify(**{"event": event})
@app.route('/authorize')
def authorize():
auth_url, state = oauth.get_authorization_url(flask.url_for('oauth2callback', _external=True))
# Store the state so the callback can verify the auth server response.
flask.session['state'] = state
return flask.redirect(auth_url)
@app.route('/oauth2callback')
def oauth2callback():
flask.session['credentials'] = oauth.fetch_credentials(
flask.session['state'],
flask.url_for('oauth2callback', _external=True),
flask.request.url
)
return flask.redirect(flask.url_for('test_api_request'))
@app.route('/revoke')
def revoke():
if 'credentials' not in flask.session:
return ('You need to <a href="/authorize">authorize</a> before ' +
'testing the code to revoke credentials.')
oauth.set_credentials(flask.session['credentials'])
status = oauth.revoke_credentials()
if status:
return('Credentials successfully revoked.' + print_index_table())
else:
return('An error occurred.' + print_index_table())
@app.route('/clear')
def clear_credentials():
if 'credentials' in flask.session:
del flask.session['credentials']
return ('Credentials have been cleared.<br><br>' +
print_index_table())
if __name__ == '__main__':
# When running locally, disable OAuthlib's HTTPs verification.
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
# Specify a hostname and port that are set as a valid redirect URI
# for your API project in the Google API Console.
app.run('localhost', 8000, debug=True)
|
from examples.exampleRunner import runExample
runExample("mitigation_examples/realisticSupplyChain.json") |
import json
from django.conf import settings
from django.core.cache import cache
from django.http import JsonResponse
from django.core import serializers
from rest_framework.views import APIView
from Apps.Api.utils import Authtication, VisitThrottle, AuthticationWithoutPackageId
from Apps.ARExperiences.models import ARExperienceModel, ARExperienceAsset
from Apps.ARShowcasesCenter.models import ARShowcasesCenterModel, ARShowcasesAndTagsLinkModel, ARShowcasesTagsModel
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.forms.models import model_to_dict
import logging
ar_showcase_detail_fields = ['app_uid', 'user_uid', 'arexperience_uid', 'showcase_uid',
'showcase_name', 'showcase_brief', 'showcase_icon', 'showcase_header', 'showcase_description']
ar_experience_detail_fields = ['name', 'arexperience_uid']
ar_experience_asset_detail_fields = ['android_json', 'android_bundle',
'android_bundle_size', 'ios_json', 'ios_bundle', 'ios_bundle_size']
class ARExperienceView(APIView):
authentication_classes = [AuthticationWithoutPackageId, ]
throttle_classes = [VisitThrottle, ]
def post(self, request, *args, **kwargs):
ret = {'code': 200, 'msg': '', 'data': None}
try:
arexperience_uid = request._request.POST.get('arexperience_uid')
cache_key = f"api_{arexperience_uid}_get_arexperience"
data = cache.get(cache_key)
if data is None:
try:
arexperience_queryset = ARExperienceModel.objects.get(
arexperience_uid=arexperience_uid)
arexperience = model_to_dict(
arexperience_queryset, ar_experience_detail_fields)
arexperience_asset= ARExperienceAsset.objects.get(
arexperience_uid=arexperience_uid)
arexperience_asset = model_to_dict(
arexperience_asset, ar_experience_asset_detail_fields)
arexperience['arexperience_asset'] = arexperience_asset
data = arexperience
except ARExperienceModel.DoesNotExist or ARExperienceAsset.DoesNotExist:
arexperience = None
arexperience_asset = None
cache.set(cache_key, data, settings.API_CACHE_EXPIRED)
ret['data'] = data
return JsonResponse(ret, status=200)
except Exception as e:
logging.exception(e)
return JsonResponse(ret)
class ARShowcaseView(APIView):
authentication_classes = [Authtication, ]
throttle_classes = [VisitThrottle, ]
def post(self, request, *args, **kwargs):
# respone dict
ret = {'code': 200, 'msg': '', 'data': None}
try:
showcase_tag = request._request.POST.get('showcase_tag')
showcase_status = request._request.POST.get('showcase_status', 1)
showcase_uids = []
if showcase_tag is not None:
# get from cache when it is exsit. api_useruid_appuid_tag
query_by_tag_cache_key = f"api_{request.user.user_uid}_{request.auth[0]}_{showcase_tag}_get_showcaseList"
showcases = cache.get(query_by_tag_cache_key)
if showcases is None:
all_tags_queryset = ARShowcasesAndTagsLinkModel.objects.filter(
app_uid=request.auth[0], user_uid=request.user.user_uid, tag_name=showcase_tag)
for showcase_uid in all_tags_queryset:
showcase_uids.append(showcase_uid.showcase_uid)
showcases = list(ARShowcasesCenterModel.objects.filter(app_uid=request.auth[0], user_uid=request.user.user_uid, showcase_uid__in=showcase_uids, showcase_status=showcase_status)
.values('app_uid', 'user_uid', 'arexperience_uid',
'showcase_uid', 'showcase_name', 'showcase_brief',
'showcase_icon', 'showcase_not_index_tags'))
cache.set(query_by_tag_cache_key, showcases,
settings.API_CACHE_EXPIRED)
else:
# get from cache when it is exsit. api_useruid_appuid
query_by_app_uid_cache_key = f"api_{request.user.user_uid}_{request.auth[0]}_get_showcaseList"
showcases = cache.get(query_by_app_uid_cache_key)
if showcases is None:
showcaseQuerysetList = ARShowcasesCenterModel.objects.filter(
app_uid=request.auth[0], user_uid=request.user.user_uid, showcase_status=showcase_status).values('app_uid', 'user_uid', 'arexperience_uid',
'showcase_uid', 'showcase_name', 'showcase_brief',
'showcase_icon', 'showcase_not_index_tags')
showcases = list(showcaseQuerysetList)
cache.set(query_by_app_uid_cache_key,
showcases, settings.API_CACHE_EXPIRED)
ret['data'] = showcases
return JsonResponse(ret, safe=False)
except Exception as e:
logging.exception(e)
return JsonResponse(ret)
class ARShowcaseDetailView(APIView):
authentication_classes = [Authtication, ]
throttle_classes = [VisitThrottle, ]
def post(self, request, *args, **kwargs):
# respone dict
ret = {'code': 200, 'msg': '', 'data': None}
if 'showcase_uid' in request._request.POST:
showcase_uid = request._request.POST.get('showcase_uid', None)
query_showcase_detail_key = f"api_{request.user.user_uid}_{request.auth[0]}_{showcase_uid}_get_detail"
showcase = cache.get(query_showcase_detail_key)
if showcase is None:
try:
showcase_queryset = ARShowcasesCenterModel.objects.get(
user_uid=request.user.user_uid, app_uid=request.auth[0], showcase_uid=showcase_uid,showcase_status=1)
showcase = model_to_dict(
showcase_queryset, ar_showcase_detail_fields)
arexperience_asset = ARExperienceAsset.objects.get(
arexperience_uid=showcase_queryset.arexperience_uid)
showcase['android_size'] = arexperience_asset.android_bundle_size
showcase['ios_size'] = arexperience_asset.ios_bundle_size
cache.set(query_showcase_detail_key, showcase,
settings.API_CACHE_EXPIRED)
except ARShowcasesCenterModel.DoesNotExist:
showcase = None
ret['data'] = showcase
return JsonResponse(ret)
ret['msg'] = 'Invalid showcase_uid'
return JsonResponse(ret)
class ARShowcaseTagsView(APIView):
authentication_classes = [Authtication, ]
throttle_classes = [VisitThrottle, ]
def post(self, request, *args, **kwargs):
ret = {'code': 200, 'msg': '', 'data': None}
query_showcase_tags_key = f"api_{request.user.user_uid}_{request.auth[0]}_get_tags"
tags = cache.get(query_showcase_tags_key)
if tags is None:
tags = (list(ARShowcasesTagsModel.objects.filter(app_uid=request.auth[0], user_uid=request.user.user_uid).order_by('tag_sort_weight')
.values('tag_sort_weight', 'tag_name')))
cache.set(query_showcase_tags_key, tags,
settings.API_CACHE_EXPIRED)
ret['data'] = tags
return JsonResponse(ret)
class ARShowcaseRecommends(APIView):
authentication_classes = [Authtication, ]
throttle_classes = [VisitThrottle, ]
def post(self, request, *args, **kwargs):
ret = {'code': 200, 'msg': '', 'data': None}
try:
query_showcase_recommend_key = f"api_{request.user.user_uid}_{request.auth[0]}_get_recommendList"
recommends = cache.get(query_showcase_recommend_key)
if recommends is None:
recommends = list(ARShowcasesCenterModel.objects.filter(app_uid=request.auth[0], user_uid=request.user.user_uid, showcase_recommend=True, showcase_status=1)
.values('app_uid', 'user_uid', 'arexperience_uid',
'showcase_uid', 'showcase_name', 'showcase_brief',
'showcase_icon', 'showcase_header', 'showcase_not_index_tags'))
cache.set(query_showcase_recommend_key,
recommends, settings.API_CACHE_EXPIRED)
ret['data'] = recommends
return JsonResponse(ret, safe=False)
except Exception as e:
logging.exception(e)
return JsonResponse(ret)
class ARShowcasePublicView(APIView):
authentication_classes = [AuthticationWithoutPackageId, ]
throttle_classes = [VisitThrottle, ]
def post(self, request, *args, **kwargs):
ret = {'code': 200, 'msg': '', 'data': None}
query_by_app_uid_cache_key = f"api_{request.user.user_uid}_{request.auth[0]}_1_1_get_showcaseList"
showcases = cache.get(query_by_app_uid_cache_key)
if showcases is None:
showcaseQuerysetList = ARShowcasesCenterModel.objects.filter(showcase_permission=1,
showcase_status=1).values('app_uid', 'user_uid', 'arexperience_uid',
'showcase_uid', 'showcase_name', 'showcase_brief',
'showcase_icon', 'showcase_not_index_tags')
showcases = list(showcaseQuerysetList)
cache.set(query_by_app_uid_cache_key,
showcases, settings.API_CACHE_EXPIRED)
ret['data'] = showcases
return JsonResponse(ret)
|
import copy
import csv
import os
import json
from functools import reduce
import collections
from lxml import etree
import nltk
import numpy
import networkx
nltk.download('punkt')
"""
## examples
# parse OAB exam, return generator of OABQuestion instances
oab = parse_xml('/home/bruno/git/oab-exams/OAB/raw/2010-01.xml')
questions = questions_in_tree(oab)
first_q = next(questions)
# parse law XML, return tuple (article-ID, list-of-raw-article-text)
lei = law_articles_in_file('/home/bruno/git/oab-exams/lexml/lei-8906.xml')
leis = all_law_articles_in_path('/home/bruno/git/oab-exams/lexml/')
# create an instance of collection of articles, which processes the
# text in each article, creates a node for each, creates a graph of
# them, and caches their TF-IDF vectors
artcol = ArticleCollection(leis, rm_stopwords=True)
laws = read_laws_into_artcollection('/home/bruno/git/oab-exams/lexml/', False, True) # see code for arguments
# add first question to graph constructed from the articles in artcol
# return the shortest path and distance from the question statement
# to each item
paths_dict = question_paths_in_graph(artcol, first_q)
# shallow question answering justified questions in justify.txt, using
# laws in lexml/ and getting the questions at OAB/raw/
result = sqa_justified_questions('doc/justify.txt', 'lexml/', 'OAB/raw/', rm_stopwords=True, separate=False)
# shallow question answering non-justified questions in an exam
paths = sqa_questions_in_exam('/home/bruno/git/oab-exams/OAB/raw/2016-20a.xml', artcol, max_questions=10)
# calculate paths and write them to json
questions_in_exams_to_json('exams_path', artcol, max_questions=10)
"""
#
## reading XML
def parse_xml(path, parser=etree.XMLParser(remove_blank_text=True)):
return etree.parse(path)
def elements_in_tree(tree, element_tag):
assert isinstance(tree, etree._ElementTree)
for element in tree.getiterator(tag=element_tag):
yield element
#
## reading OAB exams
def get_exam_id(tree):
exam_id = tree.getroot()
return exam_id.get('year')+'-'+exam_id.get('edition')
def get_statement_text(question):
return question.find('statement').text
def get_items(question):
return question.find('items').getchildren()
def get_correct_item(question):
for i in get_items(question):
if i.get('correct') == "true":
return i.get('letter')
def make_items_dict(items):
return dict((i.get('letter'), getattr(i, 'text')) for i in items)
class OABQuestion():
def __init__(self, number, exam, valid, statement, items, justification=None):
self.number = number
self.exam = exam
self.valid = valid
self.statement = statement
self.items = items
self.justification = justification
def str_repr(self):
if self.valid and self.justification:
return "OAB:{}|Q{}|ans:{}|just:{}".format(self.exam, self.number, self.valid, self.justification)
elif self.valid:
return "OAB:{}|Q{}|ans:{}|just:{}".format(self.exam, self.number, self.valid, ".")
else:
return "OAB:{}|Q{}|ans:{}".format(self.exam, self.number, "NULL")
def __repr__(self):
return self.str_repr()
def questions_in_tree(tree):
for question in elements_in_tree(tree, 'question'):
yield OABQuestion(question.get('number'),
get_exam_id(tree),
get_correct_item(question),
get_statement_text(question),
make_items_dict(get_items(question)))
#
## reading law XML
# lexML namespaces
def namespace_it(namespace, key, element):
# namespaced element in {namespace}element syntax
return "{{{}}}{}".format(namespace[key], element)
def lazy_articles_in_tree(tree):
for artigo in elements_in_tree(tree, namespace_it(tree.getroot().nsmap, None, 'Artigo')):
yield artigo.get('id'), ''.join(artigo.itertext())
def articles_in_tree(tree):
return list(lazy_articles_in_tree(tree))
def get_urn(law_xml):
assert isinstance(law_xml, etree._ElementTree)
# fixme http://lxml.de/xpathxslt.html#namespaces-and-prefixes
id_element = law_xml.find(namespace_it(law_xml.getroot().nsmap, None, 'Metadado') + '/' + namespace_it(law_xml.getroot().nsmap, None, 'Identificacao'))
return id_element.get('URN')
def law_articles_in_file(law_path):
law_xml = parse_xml(law_path)
law_urn = get_urn(law_xml)
return (law_urn, articles_in_tree(law_xml))
def all_law_articles_in_path(laws_path):
# reads all .xml files in laws_path to a list of law_articles
assert os.path.isdir(laws_path)
laws = []
for file in os.scandir(laws_path):
if file.name.endswith(".xml"):
law = law_articles_in_file(file.path)
laws.append(law)
return laws
#
## text processing
def is_number(token):
try:
float(token.replace(',', '.').replace('º', ''))
except ValueError:
return False
return True
def is_punctuation(token):
if token in '!"#$%&\'()*+,-./:;<=>?@[\\]^_`´{|}~§–':
return True
def is_stopword(token, language='portuguese'):
if token in nltk.corpus.stopwords.words(language):
return True
def is_useful(token, rm_stopwords):
token = token.strip()
if is_number(token) or is_punctuation(token) or (rm_stopwords and is_stopword(token)):
return False
else:
return True
def preprocess_text(text, rm_stopwords):
assert isinstance(rm_stopwords, bool)
return [token.lower().strip() for token in nltk.tokenize.word_tokenize(text) if is_useful(token, rm_stopwords)]
#
## tf-idf and base graph making
def cosine_similarity(vec1, vec2):
denominator = numpy.linalg.norm(vec1) * numpy.linalg.norm(vec2)
if denominator == 0:
return 0
else:
return numpy.dot(vec1, vec2) / denominator
class ArticleCollection(nltk.TextCollection):
# source is [(law-urn [(art-id, raw-art-text)+])+]
def __init__(self, source, rm_stopwords=False, text_preprocessing_fn=preprocess_text, similarity_fn=cosine_similarity):
assert isinstance(source, list)
self.rm_stopwords = rm_stopwords
self._text_preprocessing_fn = text_preprocessing_fn
self._similarity_fn = similarity_fn
# map article id to its index
self.ids, self.raw_texts = self.make_ids_and_raw_texts(source)
self.laws = [law[0] for law in source]
# remove law id
# so that we have useful methods such as .idf(token)
nltk.TextCollection.__init__(self, list(map(lambda x: text_preprocessing_fn(x, self.rm_stopwords), self.raw_texts)))
# index tokens to create TF-IDF vector
self.token_index_dict = {key:ix for ix, key in enumerate(self.vocab().keys())}
self.vocab_size = len(self.vocab().keys())
self.tfidf_vectors = [self.tfidf_vectorize(text) for text in self._texts]
self.size = len(self._texts)
# graph w/ only the articles as nodes, no edges
self.base_graph = self.make_base_graph()
def __repr__(self):
return "ArticleCollection: {}".format(self.laws)
def make_ids_and_raw_texts(self, source):
ids = {}
raw_texts = []
ix = 0
for law in source:
law_id = law[0]
for article in law[1]:
art_id = article[0]
art_id = law_id + art_id
ids[art_id] = ix
raw_texts.append(article[1])
ix += 1
return ids, raw_texts
def tf_tokens(self, tokens):
count = collections.Counter(tokens)
length = len(tokens)
return list(map(lambda x: count[x]/length, tokens))
def tfidf_vectorize(self, text):
# text must be preprocessed first!
tfidf_vector = numpy.zeros(self.vocab_size)
tf_vector = self.tf_tokens(text)
for ix, token in enumerate(text):
idf = self.idf(token)
if idf == 0:
continue
tfidf_vector[self.token_index_dict[token]] = tf_vector[ix] * idf
return tfidf_vector
def inverse_similarity(self, vec1, vec2):
similarity = self._similarity_fn(vec1, vec2)
if similarity == 0:
return numpy.Infinity
else:
return 1 / similarity
def make_base_graph(self):
graph = networkx.DiGraph()
graph.add_nodes_from(self.ids.keys())
return graph
#
## add questions
def add_temporary_node(graph, artcol, text, label, to_nodes=True):
"""
article_collection is where graph and tfidf-calculation happen,
text is raw question statement (which is preprocessed here) and
label is question number in str.
to_nodes is the direction of the edges to be built. should be
from new node to the nodes already present, or from them to the
node being added?
"""
graph.add_node(label)
label_tfidf = artcol.tfidf_vectorize(artcol._text_preprocessing_fn(text, artcol.rm_stopwords))
# to add edges only to the articles, and not every node
for node_id in artcol.ids.keys():
node_ix = artcol.ids[node_id]
if to_nodes:
graph.add_edge(label, node_id, weight=artcol.inverse_similarity(label_tfidf, artcol.tfidf_vectors[node_ix]))
else:
graph.add_edge(node_id, label, weight=artcol.inverse_similarity(label_tfidf, artcol.tfidf_vectors[node_ix]))
return graph
def question_paths_in_graph(article_collection, oab_question):
"""
return distance and shortest path from statement to each item in
oab_question.
note that '1' (str) means question one.
"""
assert isinstance(article_collection, ArticleCollection)
assert isinstance(oab_question, OABQuestion)
# so that base_graph is not changed improperly:
graph = copy.deepcopy(article_collection.base_graph)
# add question statement:
graph = add_temporary_node(graph, article_collection, oab_question.statement, oab_question.number, to_nodes=True)
paths = {}
for question_letter, item_text in oab_question.items.items():
graph = add_temporary_node(graph, article_collection, item_text, question_letter, to_nodes=False)
paths[question_letter] = networkx.algorithms.shortest_paths.bidirectional_dijkstra(graph, oab_question.number, question_letter, weight='weight')
return paths
#
## add justified questions
def read_laws_into_separate_artcol(laws_path, rm_stopwords):
laws = {}
for file in os.scandir(laws_path):
if file.name.endswith(".xml"):
urn, artigos = law_articles_in_file(file.path)
artcol = ArticleCollection([(urn, artigos)], rm_stopwords)
laws[urn] = artcol
return laws
def read_laws_into_artcollection(laws_path, separate, rm_stopwords=False):
# reads all .xml files in laws_path to a dictionary of urn:artcol
assert os.path.isdir(laws_path)
if separate:
laws = read_laws_into_separate_artcol(laws_path, rm_stopwords)
else:
laws_list = all_law_articles_in_path(laws_path)
laws = ArticleCollection(laws_list, rm_stopwords)
return laws
def get_law_artcol(laws, urn, separate):
if separate:
return laws[urn]
else:
return laws
def find_question(oab_exam, question_nr):
assert isinstance(oab_exam, etree._ElementTree)
for question in questions_in_tree(oab_exam):
if question.number == question_nr:
return question
def sqa_justified_questions(justification_path, laws_path, exams_path, rm_stopwords=False, separate=True):
# sqa = shallow question answering
# justification file must be in the format described in docs.
assert os.path.isfile(justification_path)
assert os.path.isdir(exams_path)
laws = read_laws_into_artcollection(laws_path, separate, rm_stopwords)
question_paths = {}
with open(justification_path, 'r') as tsv:
tsv = csv.reader(tsv, delimiter='\t')
for row in tsv:
# row[0]: OAB exam filename
exam_path = os.path.join(exams_path, row[0] + '.xml')
oab_exam = parse_xml(exam_path)
# row[1]: question number
question = find_question(oab_exam, row[1])
# row[3]: justification law URN
artcol = get_law_artcol(laws, row[3], separate)
# row[2]: justification article
question.justification = (row[3], row[2])
paths = question_paths_in_graph(artcol, question)
question_paths[question] = paths
return question_paths
def get_minimum_paths(question_paths):
minimum_paths = {}
for question, item_paths in question_paths.items():
paths = []
for item, item_path in item_paths.items():
paths.append(item_path)
minimum_path = reduce(lambda x,y: y if x[0]>y[0] else x if x[0] < y[0] else x + ("can't decide between {} and {}".format(x[1],y[1]),), paths)
minimum_paths[question] = minimum_path
return minimum_paths
def get_correct_item_paths(question_paths):
correct_paths = {}
for question, item_paths in question_paths.items():
if not question.valid:
continue
correct_letter = question.valid
correct_item_path = item_paths[correct_letter]
correct_paths[question] = correct_item_path
return correct_paths
def check_justification_correct_items(question_paths):
# return True if justification for the correct article match with
# the correct justification
correct_items = {}
for question, item_paths in question_paths.items():
correct_letter = question.valid
correct_item_path = item_paths[correct_letter]
selected_article = correct_item_path[1][1]
justification_urn = question.justification[0]
justification_articles = question.justification[1].split(',')
justification = list(map(lambda x: justification_urn + x, justification_articles))
correct_items[question] = (selected_article in justification)
return correct_items
#
## assign article to question
def sqa_questions_in_exam(exam_path, artcol, max_questions=-1):
assert os.path.isfile(exam_path)
exam = parse_xml(exam_path)
question_paths = {}
for ix, question in enumerate(questions_in_tree(exam)):
if ix == max_questions:
break
paths = question_paths_in_graph(artcol, question)
question_paths[question] = paths
return question_paths
def make_paths_printable(question_paths):
printable_paths = {}
for question, item_paths in question_paths.items():
question_str = question.str_repr()
printable_paths[question_str] = item_paths
return printable_paths
def to_json(dictionary, path):
with open(path, 'w') as f:
json.dump(dictionary, f, indent=4)
def questions_in_exams_to_json(exams_path, artcol, max_questions=-1):
# make this work with all functions later
assert os.path.isdir(exams_path)
paths = {}
for file in os.scandir(exams_path):
if file.name.endswith(".xml"):
exam_question_paths = sqa_questions_in_exam(file.path, artcol, max_questions=max_questions)
paths[file.name] = make_paths_printable(exam_question_paths)
result_path = os.path.join(os.path.dirname(file.path), 'results.json')
to_json(paths, result_path)
|
import marshmallow as mm
from emannotationschemas.schemas.base import ReferenceAnnotation
class FlyNeuropil(ReferenceAnnotation):
neuropil = mm.fields.Str(
required=True,
description="neuropil name",
)
|
# ============================================================================
# 第三章 暖冷房負荷と外皮性能
# 第四節 日射熱取得率
# Ver.11(住宅・住戸の外皮性能の計算プログラム Ver.03~2021.4)
# ----------------------------------------------------------------------------
# 付録B 大部分がガラスで構成されている窓等の開口部における取得日射熱補正係数
# ----------------------------------------------------------------------------
# B.1.1 開口部の上部に日除けが設置されている場合
# ============================================================================
import os
import pandas as pd
import numpy as np
from functools import lru_cache
def calc_f_H_1(region, glass_spec_category, direction, y1, y2, z):
"""開口部の暖房期の取得日射熱補正係数(面する方位に応じない求め方) 式(1)
Args:
region(int): 省エネルギー地域区分
glass_spec_category(str): ガラスの仕様の区分
direction(str): 外皮の部位の方位
y1(float): 日除け下端から一般部及び大部分がガラスで構成されていないドア等の開口部の上端までの垂直方向距離 (mm)
y2(float): 一般部及び大部分がガラスで構成されていないドア等の開口部の高さ寸法 (mm)
z(float): 壁面からの日除けの張り出し寸法(軒等の出寸法は壁表面から先端までの寸法とする)(mm)
Returns:
float: 開口部の暖房期の取得日射熱補正係数(面する方位に応じない求め方) 式(1)
"""
l1 = get_l1(y1, z)
l2 = get_l2(y1, y2, z)
f1 = get_glass_f(region, glass_spec_category, l1, 'H', direction)
f2 = get_glass_f(region, glass_spec_category, l2, 'H', direction)
f = get_f(f1, f2, y1, y2)
return f
def calc_f_C_1(region, glass_spec_category, direction, y1, y2, z):
"""開口部の冷房期の取得日射熱補正係数(面する方位に応じない求め方) 式(1)
Args:
region(int): 省エネルギー地域区分
glass_spec_category(str): ガラスの仕様の区分
direction(str): 外皮の部位の方位
y1(float): 日除け下端から一般部及び大部分がガラスで構成されていないドア等の開口部の上端までの垂直方向距離 (mm)
y2(float): 一般部及び大部分がガラスで構成されていないドア等の開口部の高さ寸法 (mm)
z(float): 壁面からの日除けの張り出し寸法(軒等の出寸法は壁表面から先端までの寸法とする)(mm)
Returns:
float: 開口部の冷房期の取得日射熱補正係数(面する方位に応じない求め方) 式(1)
"""
l1 = get_l1(y1, z)
l2 = get_l2(y1, y2, z)
f1 = get_glass_f(region, glass_spec_category, l1, 'C', direction)
f2 = get_glass_f(region, glass_spec_category, l2, 'C', direction)
f = get_f(f1, f2, y1, y2)
return f
def get_glass_f(region, glass_spec_category, l, H_or_C, direction):
"""lをパラメーターとして地域の区分及びガラスの仕様の区分に応じ、データ「取得日射熱補正係数」より算出した値
Args:
region(int): 省エネルギー地域区分
glass_spec_category(str): ガラスの仕様の区分
l(float): パラメータ
H_or_CS(str): 計算対象
direction(str): 外皮の部位の方位
H_or_C: returns: 取得日射熱補正係数
Returns:
float: 取得日射熱補正係数
"""
glass_index = (glass_spec_category - 1) * 44
if H_or_C == 'C':
HC_index = 0
elif H_or_C == 'H':
HC_index = 22
else:
raise ValueError(H_or_C)
dir_dic = {
'北': 0, '北東': 1, '東': 2, '南東': 3, '南': 4, '南西': 5, '西': 6, '北西': 7
}
dir_index = dir_dic[direction]
index_base = glass_index + HC_index
region_index = 3 + (region - 1) * 8
df = get_glass_f_table()
f_cd = df[2][index_base:index_base + 22].values
index = np.argwhere(f_cd == l)
if len(index) == 1:
# 按分不要の場合
f = float(df[region_index + dir_index][index_base + index[0]])
else:
# 按分が必要な場合
index_a = np.min(np.argwhere(f_cd > l)) - 1
index_b = np.max(np.argwhere(f_cd < l)) + 1
la = float(df[2][index_base + index_a])
lb = float(df[2][index_base + index_b])
fa = float(df[region_index + dir_index][index_base + index_a])
fb = float(df[region_index + dir_index][index_base + index_b])
f = fa + (fb - fa) / (lb - la) * (l - la)
return f
@lru_cache()
def get_glass_f_table():
"""データ「取得日射熱補正係数」
Args:
Returns:
DataFrame: データ「取得日射熱補正係数」
"""
path = os.path.join(os.path.dirname(__file__), 'data', 'glass_f.csv')
df = pd.read_csv(path, header=None, skiprows=2)
return df
def get_f(f1, f2, y1=0, y2=None):
"""開口部の取得日射熱補正係数(面する方位に応じない求め方) 式(1)
Args:
f1(float): l1をパラメーターとして地域の区分及びガラスの仕様の区分に応じ、データ「取得日射熱補正係数」より算出した値
f2(float): l2をパラメーターとして地域の区分及びガラスの仕様の区分に応じ、データ「取得日射熱補正係数」より算出した値
y1(float, optional): 日除け下端から一般部及び大部分がガラスで構成されていないドア等の開口部の上端までの垂直方向距離 (mm) (Default value = 0)
y2(float, optional): 一般部及び大部分がガラスで構成されていないドア等の開口部の高さ寸法 (mm) (Default value = None)
Returns:
float: 開口部の取得日射熱補正係数(面する方位に応じない求め方) 式(1)
"""
if y1 == 0:
f = f2
else:
f = (f2 * (y1 + y2) - f1 * y1) / y2
return f
def get_f_H_2(region, direction, y1, y2, z):
"""開口部の暖房期の取得日射熱補正係数(面する方位に応じた求め方) (2)
Args:
region(int): 省エネルギー地域区分
direction(str): 外皮の部位の方位
y1(float): 日除け下端から一般部及び大部分がガラスで構成されていないドア等の開口部の上端までの垂直方向距離 (mm)
y2(float): 一般部及び大部分がガラスで構成されていないドア等の開口部の高さ寸法 (mm)
z(float): 壁面からの日除けの張り出し寸法(軒等の出寸法は壁表面から先端までの寸法とする)(mm)
Returns:
float: 開口部の暖房期の取得日射熱補正係数
"""
if region in [1, 2, 3, 4, 5, 6, 7]:
if direction in ['南東', '南', '南西']:
# 暖房期における1地域から7地域までの南東面・南面・南西面 (2a)
f_H = min(0.01 * (5 + 20 * (3 * y1 + y2) / z), 0.72)
else:
# 暖房期における1地域から7地域までの南東面・南面・南西面以外 (2b)
f_H = min(0.01 * (10 + 15 * (2 * y1 + y2) / z), 0.72)
elif region in [8]:
return None
else:
raise ValueError(region)
return f_H
def get_f_C_2(region, direction, y1, y2, z):
"""開口部の冷房期の取得日射熱補正係数(面する方位に応じた求め方) (3)
Args:
region(int): 省エネルギー地域区分
direction(str): 外皮の部位の方位
y1(float): 日除け下端から一般部及び大部分がガラスで構成されていないドア等の開口部の上端までの垂直方向距離 (mm)
y2(float): 一般部及び大部分がガラスで構成されていないドア等の開口部の高さ寸法 (mm)
z(float): 壁面からの日除けの張り出し寸法(軒等の出寸法は壁表面から先端までの寸法とする)(mm)
Returns:
float: 開口部の冷房期の取得日射熱補正係数
"""
if region in [1, 2, 3, 4, 5, 6, 7]:
if direction in ['南東', '南', '南西']:
# 冷房期における1地域から7地域までの南東面・南面・南西面 (3a)
f_C = min(0.01 * (24 + 9 * (3 * y1 + y2) / z), 0.93)
else:
# 冷房期における1地域から7地域までの南東面・南面・南西面以外 (3b)
f_C = min(0.01 * (16 + 24 * (2 * y1 + y2) / z), 0.93)
elif region in [8]:
if direction in ['南東', '南', '南西']:
# 冷房期における8地域の南東面・南面・南西面 (3c)
f_C = min(0.01 * (16 + 19 * (2 * y1 + y2) / z), 0.93)
else:
return None
else:
raise ValueError(region)
return f_C
def get_l1(y1, z):
"""式 (4a)
Args:
y1(float): 日除け下端から一般部及び大部分がガラスで構成されていないドア等の開口部の上端までの垂直方向距離 (mm)
z(float): 壁面からの日除けの張り出し寸法(ひさし等のオーバーハング型日除けの出寸法は壁表面から先端までの寸法とする)(mm)
Returns:
float: 式 (4a)
"""
return y1 / z
def get_l2(y1, y2, z):
"""式 (4b)
Args:
y1(float): 日除け下端から一般部及び大部分がガラスで構成されていないドア等の開口部の上端までの垂直方向距離 (mm)
y2(float): 一般部及び大部分がガラスで構成されていないドア等の開口部の高さ寸法 (mm)
z(float): 壁面からの日除けの張り出し寸法(軒等の出寸法は壁表面から先端までの寸法とする)(mm)
Returns:
float: 式 (4b)
"""
return (y1 + y2) / z
|
import os
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from typing import Any
from typing import Tuple
from cftool.dist import Parallel
tight = False
padding = 0.1
"""
Expected file structure:
-- /path/to/your/fonts/folder
|- font_type0
|- font_file0
|- font_file1
...
|- font_type1
|- font_file0
|- font_file1
...
A typical use case is to convert opentype / truetype fonts:
-- /path/to/your/fonts/folder
|- opentype
|- xxx.otf
|- xxx.otf
...
|- truetype
|- xxx.ttf
|- xxx.ttf
...
"""
fonts_folder = "/path/to/your/fonts/folder"
num_jobs = 32
resolution = 512
test_resolution = 32
export_folder = f"export{'-tight' if tight else ''}"
all_folder = os.path.join(export_folder, "all")
split_folder = os.path.join(export_folder, "split")
lower = "abcdefghijklmnopqrstuvwxyz"
upper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
alphabet = lower + upper
limit = test_resolution * (1.0 - padding)
width = resolution * 13
height = resolution * 4
os.makedirs(all_folder, exist_ok=True)
os.makedirs(split_folder, exist_ok=True)
def filter_fn(name: str) -> bool:
if name.startswith("ZillaSlabHighlight"):
return False
return True
def get_border(mask: np.ndarray, axis: int) -> int:
indices = np.argmax(mask, axis=axis)
first = mask[0] if axis == 0 else mask[:, 0]
return (indices[indices != 0 | first]).min().item()
def get_bbox(img: np.ndarray) -> Tuple[int, int, int, int]:
mask = img > 50
left = get_border(mask, axis=1)
right = mask.shape[1] - get_border(mask[:, ::-1], axis=1)
top = get_border(mask, axis=0)
bottom = mask.shape[0] - get_border(mask[::-1], axis=0)
return left, right, top, bottom
def get_bbox_from(char: str, font: Any, res: int) -> Tuple[int, int, int, int]:
image = Image.new("RGB", [res * 2, res * 2])
draw = ImageDraw.Draw(image)
draw.text((res, 0), char, font=font)
left, right, top, bottom = get_bbox(np.array(image.convert("L")))
return left - res, right - res, top, bottom
def get_font(char: str, font_path: str) -> Tuple[Any, float, float]:
font_size = 1.0
while True:
font = ImageFont.truetype(font_path, int(round(font_size)))
fw, fh = font.getsize(char)
if fw >= limit or fh >= limit:
break
font_size += 1.0
left, right, top, bottom = get_bbox_from(char, font, test_resolution)
ratio = resolution / test_resolution
if tight:
span = max(right - left, bottom - top)
span_ratio = limit / span
font_size *= span_ratio
font = ImageFont.truetype(font_path, int(round(font_size)))
res = int(round(test_resolution * span_ratio))
left, right, top, bottom = get_bbox_from(char, font, res)
x_offset = 0.5 * (test_resolution - (left + right))
y_offset = 0.5 * (test_resolution - (top + bottom))
font = ImageFont.truetype(font_path, int(round(font_size * ratio)))
x_offset *= ratio
y_offset *= ratio
return font, x_offset, y_offset
def main_task(folder: str, file: str) -> None:
name = os.path.splitext(file)[0]
if not filter_fn(name):
return None
export_file = f"{name}.png"
export_path = os.path.join(all_folder, export_file)
if os.path.isfile(export_path):
return None
try:
x_offsets = set()
y_offsets = set()
path = os.path.join(folder, file)
image = Image.new("RGB", [width, height])
draw = ImageDraw.Draw(image)
for i in range(52):
ix, iy = i % 13, i // 13
x, y = ix * resolution, iy * resolution
char = (lower if i < 26 else upper)[i % 26]
font, x_offset, y_offset = get_font(char, path)
x_offsets.add(x_offset)
y_offsets.add(y_offset)
draw.text((x + x_offset, y + y_offset), char, font=font)
if max(len(x_offsets), len(y_offsets)) <= 5:
return None
image.save(export_path)
except Exception as err:
print(f"> failed to export {file} : {err}")
def main() -> None:
folders = []
files = []
for font_type in ["opentype", "truetype"]:
type_folder = os.path.join(fonts_folder, font_type)
for file in sorted(os.listdir(type_folder)):
folders.append(type_folder)
files.append(file)
shuffle_indices = np.random.permutation(len(folders))
folders = [folders[i] for i in shuffle_indices]
files = [files[i] for i in shuffle_indices]
Parallel(num_jobs).grouped(main_task, folders, files)
def split_task(file: str) -> None:
name = os.path.splitext(file)[0]
name_folder = os.path.join(split_folder, name)
os.makedirs(name_folder, exist_ok=True)
img = np.array(Image.open(os.path.join(all_folder, file)))
for i in range(4):
for j in range(13):
char = alphabet[i * 13 + j]
path = os.path.join(name_folder, f"{char}.png")
x, y = j * resolution, i * resolution
ij_img = img[y : y + resolution, x : x + resolution]
Image.fromarray(ij_img).save(path)
def split() -> None:
Parallel(num_jobs).grouped(split_task, os.listdir(all_folder))
if __name__ == "__main__":
main()
split()
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import configparser
from transport.settings import IAMAdminClientSettings
from transport import utils
from airavata.api.error.ttypes import TException
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
# create formatter and add it to the handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handler to the logger
logger.addHandler(handler)
class IAMAdminClient(object):
def __init__(self, configuration_file_location=None):
self.iam_admin_settings = IAMAdminClientSettings(configuration_file_location)
self._load_settings(configuration_file_location)
self.iam_admin_client_pool = utils.initialize_iam_admin_client(
self.iam_admin_settings.PROFILE_SERVICE_HOST,
self.iam_admin_settings.PROFILE_SERVICE_PORT,
self.iam_admin_settings.PROFILE_SERVICE_SECURE)
def set_up_gateway(self, authz_token, gateway):
"""
Parameters:
- authz_token
- gateway
"""
try:
return self.iam_admin_client_pool.setUpGateway(authz_token, gateway)
except TException:
logger.exception("Error occurred in set_up_gateway, ", TException)
raise
def is_username_available(self, authz_token, username):
"""
Parameters:
- authz_token
- username
"""
try:
return self.iam_admin_client_pool.isUsernameAvailable(authz_token, username)
except TException:
logger.exception("Error occurred in is_username_available, ", TException)
raise
def register_user(self, authz_token, username, email_address, first_name, last_name, new_password):
"""
Parameters:
- authz_token
- username
- email_address
- first_name
- last_name
- new_password
"""
try:
return self.iam_admin_client_pool.registerUser(authz_token, username, email_address,
first_name, last_name, new_password)
except TException:
logger.exception("Error occurred in register_user, ", TException)
raise
def enable_user(self, authz_token, username):
"""
Parameters:
- authz_token
- username
"""
try:
return self.iam_admin_client_pool.enableUser(authz_token, username)
except TException:
logger.exception("Error occurred in enable_user, ", TException)
raise
def is_user_enabled(self, authz_token, username):
"""
Parameters:
- authzToken
- username
"""
try:
return self.iam_admin_client_pool.isUserEnabled(authz_token, username)
except TException:
logger.exception("Error occurred in is_user_enabled, ", TException)
raise
def is_user_exist(self, authz_token, username):
"""
Parameters:
- authzToken
- username
"""
try:
return self.iam_admin_client_pool.isUserExist(authz_token, username)
except TException:
logger.exception("Error occurred in is_user_exist, ", TException)
raise
def get_user(self, authz_token, username):
"""
Parameters:
- authzToken
- username
"""
try:
return self.iam_admin_client_pool.getUser(authz_token, username)
except TException:
logger.exception("Error occurred in get_user, ", TException)
raise
def get_users(self, authz_token, offset, limit, search):
"""
Parameters:
- authzToken
- offset
- limit
- search
"""
try:
return self.iam_admin_client_pool.getUsers(authz_token, offset, limit, search)
except TException:
logger.exception("Error occurred in get_users, ", TException)
raise
def reset_user_password(self, authz_token, username, new_password):
"""
Parameters:
- authzToken
- username
- newPassword
"""
try:
return self.iam_admin_client_pool.resetUserPassword( authz_token, username, new_password)
except TException:
logger.exception("Error occurred in reset_user_password, ", TException)
raise
def find_users(self, authz_token, email, user_id):
"""
Parameters:
- authzToken
- email
- userId
"""
try:
return self.iam_admin_client_pool.findUsers(authz_token, email, user_id)
except TException:
logger.exception("Error occurred in find_users, ", TException)
raise
def update_user_profile(self, authz_token, user_details):
"""
Parameters:
- authzToken
- userDetails
"""
try:
return self.iam_admin_client_pool.updateUserProfile(authz_token, user_details)
except TException:
logger.exception("Error occurred in update_user_profile, ", TException)
raise
def delete_user(self, authz_token, username):
"""
Parameters:
- authzToken
- username
"""
try:
return self.iam_admin_client_pool.deleteUser(authz_token, username)
except TException:
logger.exception("Error occurred in delete_user, ", TException)
raise
def add_role_to_user(self, authz_token, username, role_name):
"""
Parameters:
- authzToken
- username
- roleName
"""
try:
return self.iam_admin_client_pool.addRoleToUser(authz_token, username, role_name)
except TException:
logger.exception("Error occurred in add_role_to_user, ", TException)
raise
def remove_role_from_user(self, authz_token, username, role_name):
"""
Parameters:
- authzToken
- username
- roleName
"""
try:
return self.iam_admin_client_pool.removeRoleFromUser(authz_token, username, role_name)
except TException:
logger.exception("Error occurred in remove_role_from_user, ", TException)
raise
def get_users_with_role(self, authz_token, role_name):
"""
Parameters:
- authzToken
- roleName
"""
try:
return self.iam_admin_client_pool.getUsersWithRole(authz_token, role_name)
except TException:
logger.exception("Error occurred in create_group, ", TException)
raise
def _load_settings(self, configuration_file_location):
if configuration_file_location is not None:
config = configparser.ConfigParser()
config.read(configuration_file_location)
self.iam_admin_settings.PROFILE_SERVICE_HOST = config.get('ProfileServer', 'PROFILE_SERVICE_HOST')
self.iam_admin_settings.PROFILE_SERVICE_PORT = config.getint('ProfileServer', 'PROFILE_SERVICE_PORT')
self.iam_admin_settings.PROFILE_SERVICE_SECURE = config.getboolean('ProfileServer',
'PROFILE_SERVICE_SECURE')
|
import torch
import logging
from datasets import INT2CHAR, FILE_START, FILE_END, PAD, CharSequenceToTensor
logger = logging.getLogger("genpyseq")
def generate_charseq(
nn, prime_str=FILE_START, max_window_size=None, print_output=True,
max_generate_len=1000, temperature=None):
logger.info(" • max window size: {}".format(max_window_size))
logger.info(" • temperature: {}".format(temperature))
logger.info(" • max generate length: {}".format(max_generate_len))
logger.info("Generating sequence.")
if not prime_str.startswith(FILE_START):
prime_str = FILE_START + prime_str
hidden = nn.init_hidden(1)
input_seq = list(prime_str)
if print_output:
print("~~~~~~~~~Prime~~~~~~~~~")
print("".join(input_seq))
print("~~~~Prime+Predicted~~~~")
print("".join(input_seq))
window_size = len(input_seq)
if max_window_size is not None:
window_size = max_window_size
charseq_to_tensor = CharSequenceToTensor()
# use priming sequence to construct the hidden state
input_tensor, _ = charseq_to_tensor(
(input_seq[-window_size:], input_seq[-window_size:]))
for i in range(len(input_seq)):
output, hidden = nn(input_tensor.narrow(0, i, 1), hidden)
# predict until max_len or FILE_END/PAD character is reached
predicted = input_seq[:]
for i in range(max_generate_len):
if temperature is not None:
# Sample from the network as a multinomial distribution
output_dist = output.data.view(-1).div(temperature).exp()
top_i = torch.multinomial(output_dist, 1)[0]
char = INT2CHAR[top_i.item()]
else:
_, pred_char_idx = output.topk(1)
char = INT2CHAR[pred_char_idx.item()]
if print_output:
print(char, end="")
predicted.append(char)
if char in [FILE_END, PAD]:
return predicted
# full start to end continuation of hidden state
if max_window_size is None:
input_tensor, _ = charseq_to_tensor(((char,), (char,)))
output, hidden = nn(input_tensor, hidden)
else:
# reconstruct hidden state based on sliding window
hidden = nn.init_hidden(1)
input_tensor, _ = charseq_to_tensor(
(predicted[-window_size:], predicted[-window_size:]))
seq_len, _, _ = input_tensor.size()
for h_i in range(seq_len):
output, hidden = nn(input_tensor.narrow(0, h_i, 1), hidden)
if print_output:
print("~~max_gen_len reached~~")
return predicted
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rally_openstack import consts
from rally.task import scenario
from rally.task import types
from rally.task import validation
import vm
@types.convert(smallest_image={"type": "glance_image"}, smallest_flavor={"type": "nova_flavor"})
@validation.add(
"image_valid_on_flavor", flavor_param="smallest_flavor", image_param="smallest_image"
)
@validation.add(
"required_services", services=[consts.Service.NEUTRON,
consts.Service.NOVA]
)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(
context={
"cleanup@openstack": ["neutron", "nova"],
"keypair@openstack": {},
"allow_ssh@openstack": None,
},
name="BrowbeatPlugin.dynamic_workload_min",
platform="openstack",
)
class DynamicWorkloadMin(vm.VMDynamicScenario):
def run(
self, smallest_image, smallest_flavor, ext_net_id, num_vms_to_create_with_fip,
num_create_vms, num_vms_to_migrate, num_stop_start_vms, num_delete_vms,
workloads="all", router_create_args=None, network_create_args=None,
subnet_create_args=None, **kwargs):
workloads_list = workloads.split(",")
self.security_group = self.create_sec_group_with_icmp_ssh()
self.log_info("security group {} created for this iteration".format(self.security_group))
router_create_args["name"] = self.generate_random_name()
router_create_args["tenant_id"] = self.context["tenant"]["id"]
router_create_args.setdefault(
"external_gateway_info", {"network_id": ext_net_id, "enable_snat": True}
)
self.router = self._create_router(router_create_args)
self.log_info("router {} created for this iteration".format(self.router))
self.keypair = self.context["user"]["keypair"]
self.ext_net_name = self.clients("neutron").show_network(ext_net_id)["network"][
"name"]
if workloads == "all" or "create_delete_servers" in workloads_list:
self.boot_servers(smallest_image, smallest_flavor, num_create_vms,
subnet_create_args=subnet_create_args)
self.delete_random_servers(num_delete_vms)
if(workloads == "all" or "migrate_servers" in workloads_list or
"swap_floating_ips_between_servers" in workloads_list or
"stop_start_servers" in workloads_list):
self.boot_servers_with_fip(smallest_image, smallest_flavor, ext_net_id,
num_vms_to_create_with_fip,
network_create_args, subnet_create_args, **kwargs)
if workloads == "all" or "migrate_servers" in workloads_list:
self.migrate_servers_with_fip(num_vms_to_migrate)
if workloads == "all" or "swap_floating_ips_between_servers" in workloads_list:
self.swap_floating_ips_between_servers()
if workloads == "all" or "stop_start_servers" in workloads_list:
self.stop_start_servers_with_fip(num_stop_start_vms)
|
"""
Defines the character objects
"""
from random import randint
# set the level boundaries
LEVEL_BOUNDS = [
[0, 5],
[5, 10],
[10, 25],
[25, 50],
[50, 100],
[100, 250],
]
class ItTech:
"""Defines the player character"""
def __init__(self, name, level):
"""Inititalizes the character's stats"""
# basic stats
self.name = name
self.level = level
self.exp = 0
self.is_manager = False
self.charisma = randint(1, 100)
# initialize skill stats
self.password = 0
self.hardware = 0
self.software = 0
self.antivirus = 0
self.network = 0
self.server = 0
# chance of disasterous outcome
self.disaster = 0
# apply appropriate skill levels
self.skill_load()
def __str__(self):
"""Defines the string response"""
if self.is_manager is True:
rank = "IT Manager"
else:
rank = "IT Helpdesk Technician"
return self.name + " is level " + str(self.level) \
+ " with " + str(self.exp) + " experience points." \
+ "\nRank: " + rank \
+ "\nCharisma: " + str(self.charisma) + "%" \
+ "\nChance of disaster: " + str(self.disaster) + "%" \
+ "\nPassword reset skill: " + str(self.password) + "%" \
+ "\nHardware skill: " + str(self.hardware) + "%" \
+ "\nSoftware skill: " + str(self.software) + "%" \
+ "\nAntivirus skill: " + str(self.antivirus) + "%" \
+ "\nNetwork skill: " + str(self.network) + "%" \
+ "\nServer skill: " + str(self.server) + "%\n"
def __repr__(self):
"""Displays the technician's name when printing lists"""
return self.name
def add_exp(self, exp):
"""
Adds experience points
Levels up when enough experience has been gained
"""
self.exp += exp
# set experienced required to level up
for i in range(0, 5):
if LEVEL_BOUNDS[i][0] <= self.level <= LEVEL_BOUNDS[i][1]:
# max experience for a level
# is set as next level's upper bound
max_exp = LEVEL_BOUNDS[i+1][1]
# level up if appropriate and reset experience
if self.exp >= max_exp:
self.level += 1
self.exp -= max_exp
print(self.name + ": Level up!")
# increase skill stats
self.skill_load()
def skill_load(self):
"""
Initializes and balances skill load
Applies improvements with each level up
"""
if LEVEL_BOUNDS[0][0] <= self.level <= LEVEL_BOUNDS[0][1]:
if self.disaster:
self.disaster -= randint(1, 3)
else:
self.disaster = randint(75, 90)
if self.password:
self.password += randint(1, 3)
else:
self.password = randint(1, 10)
if self.hardware:
self.hardware += randint(1, 3)
else:
self.hardware = randint(1, 10)
self.software = 0
self.antivirus = 0
self.network = 0
self.server = 0
elif LEVEL_BOUNDS[1][0] < self.level <= LEVEL_BOUNDS[1][1]:
if self.disaster:
self.disaster -= randint(1, 3)
else:
self.disaster = randint(50, 75)
if self.password:
self.password += randint(1, 3)
else:
self.password = randint(10, 25)
if self.hardware:
self.hardware += randint(1, 3)
else:
self.hardware = randint(10, 25)
if self.software:
self.software += randint(1, 3)
else:
self.software = randint(1, 10)
if self.antivirus:
self.antivirus += randint(1, 3)
else:
self.antivirus = randint(1, 10)
self.network = 0
self.server = 0
elif LEVEL_BOUNDS[2][0] < self.level <= LEVEL_BOUNDS[2][1]:
if self.disaster:
self.disaster -= randint(1, 3)
else:
self.disaster = randint(25, 50)
if self.password:
self.password += randint(1, 3)
else:
self.password = randint(25, 50)
if self.hardware:
self.hardware += randint(1, 3)
else:
self.hardware = randint(25, 50)
if self.software:
self.software += randint(1, 3)
else:
self.software = randint(10, 25)
if self.antivirus:
self.antivirus += randint(1, 3)
else:
self.antivirus = randint(10, 25)
if self.network:
self.network += randint(1, 3)
else:
self.network = randint(1, 10)
self.server = 0
elif LEVEL_BOUNDS[3][0] < self.level <= LEVEL_BOUNDS[3][1]:
if self.disaster:
self.disaster -= randint(1, 3)
else:
self.disaster = randint(10, 25)
if self.password:
self.password += randint(1, 3)
else:
self.password = randint(50, 75)
if self.hardware:
self.hardware += randint(1, 3)
else:
self.hardware = randint(50, 75)
if self.software:
self.software += randint(1, 3)
else:
self.software = randint(25, 50)
if self.antivirus:
self.antivirus += randint(1, 3)
else:
self.antivirus = randint(25, 50)
if self.network:
self.network += randint(1, 3)
else:
self.network = randint(10, 25)
if self.server:
self.server += randint(1, 3)
else:
self.server = randint(1, 10)
elif LEVEL_BOUNDS[4][0] < self.level <= LEVEL_BOUNDS[4][1]:
if self.disaster:
self.disaster -= randint(1, 3)
else:
self.disaster = randint(1, 10)
if self.password:
self.password += randint(1, 3)
else:
self.password = randint(75, 100)
if self.hardware:
self.hardware += randint(1, 3)
else:
self.hardware = randint(75, 100)
if self.software:
self.software += randint(1, 3)
else:
self.software = randint(50, 75)
if self.antivirus:
self.antivirus += randint(1, 3)
else:
self.antivirus = randint(50, 75)
if self.network:
self.network += randint(1, 3)
else:
self.network = randint(25, 50)
if self.server:
self.server += randint(1, 3)
else:
self.server = randint(10, 25)
# max and min handler
if self.disaster < 0:
self.disaster = 0
if self.password > 100:
self.password = 100
if self.hardware > 100:
self.hardware = 100
if self.software > 100:
self.software = 100
if self.antivirus > 100:
self.antivirus = 100
if self.network > 100:
self.network = 100
if self.server > 100:
self.server = 100
class Customer():
"""Defines the customer object"""
def __init__(self, name, level, issue_type, issue):
"""Provide the initialization variables for the class"""
# basics
self.name = name
self.level = level
# attributes
self.issue_type = issue_type
self.issue = issue
self.patience = randint(1, 100)
# define experience available to technician on success
for i in range(0, 5):
if LEVEL_BOUNDS[i][0] <= self.level <= LEVEL_BOUNDS[i][1]:
# rebase lowest boundary if it's 0 or below
low = LEVEL_BOUNDS[i][0]
if low <= 0:
low = 1
# random value between current level boundaries
self.exp = randint(low, LEVEL_BOUNDS[i][1])
def __str__(self):
"""Defines the string response"""
return self.name + " is level " + str(self.level) \
+ "\nIssue type: " + self.issue_type \
+ "\nIssue: " + self.issue \
+ "\nPatience: " + str(self.patience) + "%" \
+ "\nExperience available: " + str(self.exp) + "\n"
def __repr__(self):
"""Displays the customer's name when printing lists"""
return self.name
def lose_patience(self):
"""Reduces the patience value while waiting for service"""
self.patience -= randint(1, 3)
|
import logging
import cupy as cp
import numpy as np
__all__ = ["RescaleIntensity"]
logger = logging.getLogger(__name__)
class RescaleIntensity(object):
"""
Return image after stretching or shrinking its intensity levels. The algorithm is
based on the implementation in `skimage`_.
.. _skimage:
https://scikit-image.org/docs/dev/api/skimage.exposure.html
"""
def __call__(self, data, in_range="image", out_range="dtype"):
im, iM = RescaleIntensity.as_min_max(data, in_range)
om, oM = RescaleIntensity.as_min_max(data, out_range, clip_neg=(im >= 0))
data = data.clip(im, iM)
@cp.fuse
def _ops(data, im, iM, om, oM):
# normalize
data = (data - im) / (iM - im)
# scale to range
data = data * (oM - om) + om
return data
return _ops(data, im, iM, om, oM)
@staticmethod
def as_min_max(data, range_value, clip_neg=False):
"""
Return intensity range based based on desired value type.
Args:
data (np.ndarray): Input data.
range_value (str/np.dtype/(float, float)): The image range is configured by
this parameter.
clip_neg (bool, optional): If True, clip the negative range.
Returns:
:rtype: (cp.float32, cp.float32)
Raises:
TypeError: If this function cannot resolve the intensity range pair.
"""
if range_value == "image":
m, M = np.asscalar(data.min()), np.asscalar(data.max())
else:
if isinstance(range_value, tuple):
# force extraction tuple
m, M = range_value
else:
if range_value == "dtype":
dtype = np.dtype(data.dtype).type
else:
dtype = range_value
if issubclass(dtype, np.integer):
info = np.iinfo(dtype)
m, M = info.min, info.max
elif issubclass(dtype, np.floating):
m, M = -1.0, 1.0
else:
raise TypeError("unknown data type")
m = 0 if clip_neg else m
m, M = np.float32(m), np.float32(M)
return m, M
if __name__ == "__main__":
"""
rescale_intensity = RescaleIntensity()
arr = cp.array([51, 102, 153], dtype=np.uint8)
print(arr)
print(type(arr))
out = rescale_intensity(arr)
print(out)
print(type(out))
print()
arr = 1.0 * arr
print(arr)
print(type(arr))
out = rescale_intensity(arr)
print(out)
print(type(out))
print()
print(arr)
print(type(arr))
out = rescale_intensity(arr, in_range=(0, 255))
print(out)
print(type(out))
print()
print(arr)
print(type(arr))
out = rescale_intensity(arr, in_range=(0, 102))
print(out)
print(type(out))
print()
arr = np.array([51, 102, 153], dtype=np.uint8)
print(arr)
print(type(arr))
out = rescale_intensity(arr, out_range=(0, 127))
print(out)
print(type(out))
print()
"""
import imageio
from numpy.testing import assert_array_almost_equal
image = imageio.volread("cell_in.tif")
print("{}, {}".format(image.shape, image.dtype))
from utoolbox.util.decorator import timeit
@timeit
def cpu(image):
from skimage.exposure import rescale_intensity
for _ in range(10):
result = rescale_intensity(image, out_range=(0, 1))
result, image = image, result
return image
result1 = cpu(image)
@timeit
def gpu(image):
rescale_intensity = RescaleIntensity()
image = cp.asarray(image)
for _ in range(10):
result = rescale_intensity(image, out_range=(0, 1))
result, image = image, result
return cp.asnumpy(image)
result2 = gpu(image)
imageio.volwrite("cell_out.tif", result2)
assert_array_almost_equal(result1, result2)
|
import tensorflow as tf
from tensorflow.python.framework import graph_util
class BaseModel:
def __init__(self, config, logger):
self.config = config
self.logger = logger
# init the global step
self.init_global_step()
# init the epoch counter
self.init_current_epoch()
# save function that saves the checkpoint in the path defined in the config file
def save_checkpoint(self, sess):
self.logger.logging("flow", "Saving model...")
self.saver.save(sess, self.config["checkpoint_dir"], self.global_step_tensor)
self.logger.logging("flow", "Model saved")
def save_to_protobuf(self, sess, output_node_node, model_path):
var_list = tf.global_variables()
constant_graph = constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, [output_node_node])
with tf.gfile.FastGFile(model_path, mode = 'wb') as f:
f.write(constant_graph.SerializeToString())
self.logger.logging("flow", "model:{} in protobuf format is saved.".format(model_path))
# load latest checkpoint from the experiment path defined in the config file
def load_checkpoint(self, sess):
latest_checkpoint = tf.train.latest_checkpoint(self.config["checkpoint_dir"])
if latest_checkpoint:
self.logger.logging("flow", "Loading model checkpoint {} ...\n".format(latest_checkpoint))
self.saver.restore(sess, latest_checkpoint)
self.logger.logging("flow", "model checkpoint loaded.")
else:
self.logger.logging("flow", "loading checkpoint failed.")
# just initialize a tensorflow variable to use it as epoch counter
def init_current_epoch(self):
with tf.variable_scope('current_epoch'):
self.current_epoch_tensor = tf.Variable(0, trainable=False, name='current_epoch')
self.increment_current_epoch_tensor = tf.assign(self.current_epoch_tensor, self.current_epoch_tensor + 1)
# just initialize a tensorflow variable to use it as global step counter
def init_global_step(self):
# DON'T forget to add the global step tensor to the tensorflow trainer
with tf.variable_scope('global_step'):
self.global_step_tensor = tf.Variable(0, trainable=False, name='global_step')
def init_saver(self):
# just copy the following line in your child class
# self.saver = tf.train.Saver(max_to_keep=self.config.max_to_keep)
raise NotImplementedError
def build_model(self):
raise NotImplementedError
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 NII.
#
# invenio-iiif-manifest is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""API tests."""
from flask import Flask
from invenio_iiif_manifest import InvenioIIIFManifest
from invenio_iiif_manifest.api import can_generate, can_preview, \
generate_identifier_for_invenio_iiif, generate_identifier_pid_key, \
generate_iiif_manifest
def test_can_generate(app, images_meta1, images_meta2, docx_meta):
"""Test metada with image files."""
assert can_generate(images_meta1) is True
assert can_generate(images_meta2) is True
assert can_generate(docx_meta) is False
def test_can_preview(app, image_path, docx_path):
"""Test iiif image file."""
assert can_preview(image_path) is True
assert can_preview(docx_path) is False
def test_generate_identifier_pid_key(app, images_meta1, pid):
"""Test flask-iiif identifier."""
identifier = generate_identifier_pid_key(pid(), images_meta1[0])
assert identifier == "1:jpgfile.jpg"
def test_generate_identifier_for_invenio_iiif(app, images_meta1):
"""Test flask-iiif identifier."""
identifier = generate_identifier_for_invenio_iiif(images_meta1[0])
assert identifier == "object_bucket:object_version:jpgfile.jpg"
# If you want to below test, you have to do "flask run" and remove comments
# out.
# def test_generate_iiif_manifest(app, pid1_meta_on_db):
# """Test iiif manifest"""
# record_meta = pid1_meta_on_db['_deposit']
# images_meta = pid1_meta_on_db['_files']
# pid = 1
# generate_iiif_manifest(pid, record_meta, images_meta)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 15 21:13:04 2018
@author: HuyNguyen
"""
import pandas
from bs4 import BeautifulSoup
import requests
from copy import deepcopy
import re
import numpy
def get_page(page_link):
return requests.get(page_link).content
def get_table(page):
page_soup = BeautifulSoup(page, "html5lib")
table = page_soup.find("table",{"class":"eventTable"})
return table
#start from here
def get_odd_table(match):
page = get_page(match)
table = get_table(page)
table = table.find("tbody")
rows = table.find_all("tr")
result_array = []
for row in rows:
temp = []
row = row.find_all("td")
name = row[0].text
if name == "Any Other Score":
continue
temp.append(name)
temp += [cell.text for cell in row[1:]]
result_array.append(temp)
return result_array
def odd_to_prob(odd):
if not odd:
return 0
if "/" in odd:
a,b = odd.split("/")
a,b = int(a), int(b)
else:
a,b = int(odd), 1
prob = b/(a+b)
return prob
def get_prob_table(odd_table):
rows = deepcopy(odd_table)
# process the odd
rows = [["name"] + [f"bookie{i}" for i in range(1, len(rows[0]))] + ["winner", "winner_score", "loser_score"]] + rows
for row in rows[1:]:
for i in range(1,len(row)):
row[i] = odd_to_prob(row[i])
# process the scenarios
pattern = re.compile(r"([A-Za-z ]+) (\d+)-(\d+)")
matches = re.match(pattern, row[0])
winner, winner_score, loser_score = matches.group(1,2,3)
row += [winner, int(winner_score), int(loser_score)]
data = pandas.DataFrame(rows)
data.columns = data.iloc[0]
data = data.iloc[1:]
return data
def get_bookie_prob_table(prob_table):
table = prob_table
# weight bookie probability
for name in table:
if 'bookie' in name:
weight = sum(table[name])
if weight == 0:
continue
table[name] = table[name] / weight
return table
def adj_prob_table(bookie_prob_table, adj_team, adj_score):
table = bookie_prob_table
teams = list(table.winner.unique())
teams.remove(adj_team)
teams.remove("Draw")
other_team = teams[0]
table["margin"] = table["winner_score"] - table["loser_score"] - adj_score
table["adj_result"] = table["winner"]
for i in range(1,len(table)):
if table.at[i, "winner"] == other_team:
continue
margin = table.at[i,"margin"]
if margin == 0:
table.at[i, "adj_result"] = "Draw"
elif margin < 0:
table.at[i, "adj_result"] = other_team
elif margin > 0:
table.at[i, "adj_result"] = adj_team
else:
print(margin, i)
raise
return table
def get_result(adj_prob):
table = adj_prob
scenarios = table.adj_result.unique()
final_result = {}
for scenario in scenarios:
average_prob = []
all_prob = table[(table.adj_result == scenario)]
for name in all_prob:
if "bookie" in name:
prob = sum(all_prob[name])
average_prob.append(prob)
average_prob = numpy.mean(average_prob)
final_result[scenario] = average_prob
weight_prob = sum(final_result.values())
for scenario in final_result:
final_result[scenario] = final_result[scenario] / weight_prob
return final_result
# use this function
def calculate_result(match, adj_team, adj_score):
link = f"https://www.oddschecker.com/football/world-cup/{match}/correct-score"
odd_table = get_odd_table(link)
prob_table = get_prob_table(odd_table)
bookie_prob_table = get_bookie_prob_table(prob_table)
adj_prob = adj_prob_table(bookie_prob_table,adj_team,adj_score)
result = get_result(adj_prob)
for scenario in result:
print(scenario,result[scenario])
return odd_table ,prob_table, bookie_prob_table, adj_prob, result
# odd_table ,prob_table, bookie_prob_table, adj_prob, result = calculate_result("costa-rica-v-serbia", "Costa Rica", 1.0)
|
## @file taskList.py
# @author Arkin Modi, Leon So, Timothy Choy
# @brief Dict of Tasks
# @date Mar 21, 2020
from task import *
from dict import Dict
## @brief Dict of Tasks
class TaskList(Dict):
pass |
# Generated by Django 2.1.1 on 2018-10-06 13:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0003_auto_20181006_1706'),
]
operations = [
migrations.RenameField(
model_name='language',
old_name='lang',
new_name='name',
),
]
|
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from polls.models import Poll
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
voted = JSONField(default=dict)
rated = JSONField(default=dict)
def __str__(self):
return self.user.username
def voteYes(self, poll_id):
poll = Poll.objects.get(pk=poll_id)
if poll_id in self.voted['+']:
self.voted['+'].remove(poll_id)
poll.unvoteYes()
else:
if poll_id in self.voted['-']:
self.voted['-'].remove(poll_id)
poll.unvoteNo()
self.voted['+'].append(poll_id)
poll.voteYes()
self.save()
def voteNo(self, poll_id):
poll = Poll.objects.get(pk=poll_id)
if poll_id in self.voted['-']:
self.voted['-'].remove(poll_id)
poll.unvoteNo()
else:
if poll_id in self.voted['+']:
self.voted['+'].remove(poll_id)
poll.unvoteYes()
self.voted['-'].append(poll_id)
poll.voteNo()
self.save()
def rateLike(self, poll_id):
poll = Poll.objects.get(pk=poll_id)
if poll_id in self.rated['+']:
self.rated['+'].remove(poll_id)
poll.unrateLike()
else:
if poll_id in self.rated['-']:
self.rated['-'].remove(poll_id)
poll.unrateDislike()
self.rated['+'].append(poll_id)
poll.rateLike()
self.save()
def rateDislike(self, poll_id):
poll = Poll.objects.get(pk=poll_id)
if poll_id in self.rated['-']:
self.rated['-'].remove(poll_id)
poll.unrateDislike()
else:
if poll_id in self.rated['+']:
self.rated['+'].remove(poll_id)
poll.unrateLike()
self.rated['-'].append(poll_id)
poll.rateDislike()
self.save()
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance, voted={'+' : [], '-': []}, rated={'+' : [], '-': []})
post_save.connect(create_user_profile, sender=User)
|
import functools
from log import log
@log
class RetryOnException:
def __init__(self, retries):
self._retries = retries
def __call__(self, function):
functools.update_wrapper(self, function)
def wrapper(*args, **kwargs):
self.logger.info(f"Retries: {self._retries}")
while self._retries != 0:
try:
return function(*args, **kwargs)
self._retries = 0
except Exception as err:
self.logger.info(f"Error occured: {err}")
self._retries -= 1
self._raise_on_condition(self._retries, err)
return wrapper
def _raise_on_condition(self, retries, exception):
if retries == 0:
raise exception
else:
self.logger.info(f"Retries: {retries}")
|
# 3D Spatial Transformer Network
## Introduction
This is the TensorFlow implementation for 2D & 3D Spatial Transformer Network in
[1] Jaderberg M, Simonyan K, et al. Spatial transformer networks[C]. International Conference on Neural Information Processing Systems. MIT Press, 2016:2017-2025.
Re-implemented by Lizhen Duan, Jie Li
Reference: [1] https://github.com/daviddao/spatial-transformer-tensorflow
[2] the code by Xinchen Yan, Arkanath Pathak, Jasmine Hsu, Honglak Lee [I lost the code link, but I know they refer to Orginal implementation in Torch (https://github.com/xcyan/nips16_PTN)]
#### How to use
run cluttered_3Dminist.py for 3D data
run cluttered_minist.py for 2D data
### Installation
* TensorFlow
* matplotlib
### Dataset
2d minist in data/mnist_sequence1_sample_5distortions5x5.npz
3d minist in data/3d-minist/full_dataset_vectors.h5
|
import sys
import warnings
idx = 0
while idx < len(sys.path):
if 'sawyer_control' in sys.path[idx]:
warnings.warn("Undoing ros generated __init__ for parallel until a better fix is found")
del sys.path[idx]
else:
idx += 1
|
import datetime
from models.base import Base
from models.const import CommonStatus
from peewee import CharField, DateTimeField, IntegerField
class UserPhoneDAO(Base):
user_id = IntegerField(index=True)
phone = CharField(index=True)
status = IntegerField(index=True, default=CommonStatus.NORMAL)
created_at = DateTimeField(default=datetime.datetime.now)
updated_at = DateTimeField(default=datetime.datetime.now)
class Meta:
table_name = 'user_phone'
@classmethod
def get_by_user_id(cls, user_id: int) -> 'UserPhoneDAO':
return cls.get(cls.user_id == user_id)
@classmethod
def get_by_phone(cls, phone: str) -> 'UserPhoneDAO':
return cls.get(cls.phone == phone)
def update_status(self, status: int):
self.status = status
self.save()
|
import json
import logging
import os
import falcon
import requests
from weather import get_weather
TOKEN = os.environ['TELEGRAM_BOT_TOKEN']
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
class GetMeResource(object):
def on_get(self, req, resp):
result = requests.get(BASE_URL + 'getMe')
resp.status = str(result.status_code) + ' ' + result.reason
resp.content_type = result.headers['content-type']
resp.body = result.text
class SetWebhookResource(object):
def on_get(self, req, resp):
params = {'url': req.get_param('url', True)}
result = requests.get(BASE_URL + 'setWebhook', params=params)
resp.status = str(result.status_code) + ' ' + result.reason
resp.content_type = result.headers['content-type']
resp.body = result.text
class WebhookResource(object):
def send_message(self, chat_id, text, reply_id):
if not text:
logging.error('no text specified')
return
params = {'chat_id': str(chat_id),
'text': text.encode('utf-8'),
'reply_to_message_id': str(reply_id), }
result = requests.get(BASE_URL + 'sendMessage', params=params)
# Log the contents of the response.
logging.info(result.text)
def on_post(self, req, resp):
if req.content_length in (None, 0):
# Nothing to do
return
# Read the request body.
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body',
'A valid JSON document is required.')
try:
content = json.loads(body.decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPError(falcon.HTTP_753,
'Malformed JSON',
'Could not decode the request body. The '
'JSON was incorrect or not encoded as '
'UTF-8.')
# Log the contents of the request.
logging.info(content)
update_id = content['update_id']
message = content['message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
sender = fr.get('first_name', 'stranger')
chat = message['chat']
chat_id = chat['id']
if 'location' in message:
# Get the user's location.
latitude = message.get('location')['latitude']
longitude = message.get('location')['longitude']
logging.info("Location: {}, {}".format(latitude, longitude))
w = get_weather(latitude, longitude)
if w:
text = ('{}\nTemperature: {} ºC\n'
'Humidity: {}%\nPressure: {} hPa').format(
w.description, w.temperature, w.humidity, w.pressure)
self.send_message(chat_id, text, message_id)
else:
self.send_message(
chat_id,
'I cannot find weather information for that location.',
message_id)
return
if not text:
logging.info('no text')
return
if text.startswith('/'):
if text == '/hello':
self.send_message(
chat_id, 'Hello {0}'.format(sender), message_id)
elif text == '/weather':
self.send_message(
chat_id, 'Please share your location with me!', message_id)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.