content
stringlengths 5
1.05M
|
|---|
# coding=utf-8
from flask import Blueprint,jsonify,current_app,request,session
house_blueprint=Blueprint('house',__name__)
from models import Area,Facility,House,HouseImage,User,Order
import json
from status_code import *
from qiniu_sdk import put_qiniu
#获取地区信息,并进行缓存
def get_areas():
area_dict_list=current_app.redis.get('area_list')
if not area_dict_list:
area_list=Area.query.all()
area_dict_list=[area.to_dict() for area in area_list]
current_app.redis.set('area_list',json.dumps(area_dict_list))
else:
#存储到Redis后会被转换为字符串,所以取出来后需要转换
area_dict_list=json.loads(area_dict_list)
return area_dict_list
#获取设施信息并缓存
def get_facilities():
facility_dict_list=current_app.redis.get('facility_list')
if not facility_dict_list:
facility_list=Facility.query.all()
facility_dict_list=[facility.to_dict() for facility in facility_list]
current_app.redis.set('facility_list',json.dumps(facility_dict_list))
else:
facility_dict_list=json.loads(facility_dict_list)
return facility_dict_list
@house_blueprint.route('/area_facility',methods=['GET'])
def newhouse():
#查询地址
area_dict_list=get_areas()
#查询设施
facility_dict_list=get_facilities()
#构造结果并返回
return jsonify(area=area_dict_list,facility=facility_dict_list)
@house_blueprint.route('/image',methods=['POST'])
def newhouse_image():
#接收房屋编号
house_id=request.form.get('house_id')
#接收图片信息
f1=request.files.get('house_image')
#保存到七牛云
url=put_qiniu(f1)
#保存图片对象
image=HouseImage()
image.house_id=house_id
image.url=url
image.add_update()
#房屋的默认图片
house=House.query.get(house_id)
if not house.index_image_url:
house.index_image_url=url
house.add_update()
#返回图片信息
return jsonify(code=RET.OK,url=current_app.config.get('QINIU_URL')+url)
@house_blueprint.route('/',methods=['POST'])
def newhouse_save():
#接收数据
params=request.form.to_dict()
facility_ids=request.form.getlist('facility')
#验证数据的有效性
#创建对象并保存
house=House()
house.user_id=session['user_id']
house.area_id=params.get('area_id')
house.title=params.get('title')
house.price=params.get('price')
house.address=params.get('address')
house.room_count=params.get('room_count')
house.acreage=params.get('acreage')
house.beds=params.get('beds')
house.unit=params.get('unit')
house.capacity=params.get('capacity')
house.deposit=params.get('deposit')
house.min_days=params.get('min_days')
house.max_days=params.get('max_days')
#根据设施的编号查询设施对象
if facility_ids:
facility_list=Facility.query.filter(Facility.id.in_(facility_ids)).all()
house.facilities=facility_list
house.add_update()
#返回结果
return jsonify(code=RET.OK,house_id=house.id)
@house_blueprint.route('/',methods=['GET'])
def myhouse():
user_id=session['user_id']
user=User.query.get(user_id)
if user.id_name:
#已经完成实名认证,查询当前用户的房屋信息
house_list=House.query.filter(House.user_id==user_id).order_by(House.id.desc())
house_list2=[]
for house in house_list:
house_list2.append(house.to_dict())
return jsonify(code=RET.OK,hlist=house_list2)
else:
#没有完成实名认证
return jsonify(code=RET.USERERR)
@house_blueprint.route('/<int:id>',methods=['GET'])
def house_detail(id):
#查询房屋信息
house=House.query.get(id)
#查询设施信息
facility_list=get_facilities()
#判断当前房屋信息是否为当前登录的用户发布,如果是则不显示预订按钮
booking=1
if 'user_id' in session:
if house.user_id==session['user_id']:
booking=0
return jsonify(house=house.to_full_dict(),facility_list=facility_list,booking=booking)
@house_blueprint.route('/index',methods=['GET'])
def index():
#查找是否登录
code = RET.DATAERR
user_name=''
if 'user_id' in session:
user=User.query.filter_by(id=session['user_id']).first()
user_name=user.name
code=RET.OK
#返回最新的5个房屋信息
hlist=House.query.order_by(House.id.desc())[:5]
hlist2=[house.to_dict() for house in hlist]
#查找地区信息
alist=get_areas()
return jsonify(code=code,name=user_name,hlist=hlist2,alist=alist)
@house_blueprint.route('/search',methods=['GET'])
def search():
#接收参数
dict=request.args
area_id=int(dict.get('aid'))
begin_date=dict.get('sd')
end_date=dict.get('ed')
sort_key=dict.get('sk')
#满足地区条件
hlist=House.query.filter(House.area_id==area_id)
#不能查询自己发布的房源,排除当前用户发布的房屋
if 'user_id' in session:
hlist=hlist.filter(House.user_id!=(session['user_id']))
#满足时间条件,当订单完成后再完成时间限制
order_list=Order.query.filter(Order.status!='REJECTED')
#情况一:
# order_list1=Order.query.filter(Order.begin_date>=begin_date,Order.end_date<=end_date)
#情况二:
order_list2=order_list.filter(Order.begin_date<begin_date,Order.end_date>end_date)
#情况三:
order_list3=order_list.filter(Order.end_date>=begin_date,Order.end_date<=end_date)
#情况四:
order_list4=order_list.filter(Order.begin_date>=begin_date,Order.begin_date<=end_date)
#获取订单中的房屋编号
house_ids=[order.house_id for order in order_list2]#[1,2,3]
for order in order_list3:
house_ids.append(order.house_id)
for order in order_list4:
if order.house_id not in house_ids:
house_ids.append(order.house_id)
hlist=hlist.filter(House.id.notin_(house_ids))
#排序规则,默认根据最新排列
sort=House.id.desc()
if sort_key=='booking':
sort=House.order_count.desc()
elif sort_key=='price-inc':
sort=House.price.asc()
elif sort_key=='price-des':
sort=House.price.desc()
hlist=hlist.order_by(sort)
hlist2=[]
for house in hlist:
hlist2.append(house.to_dict())
#获取地区信息
if request.args.get('area','0')=='1':
alist=get_areas()
else:
alist=[]
return jsonify(hlist=hlist2,alist=alist)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
import os
from pyppeteer.launcher import launch
from syncer import sync
from wdom.document import get_document
from wdom import server
from ..base import TestCase
server_config = server.server_config
class PyppeteerTestCase(TestCase):
if os.getenv('TRAVIS', False):
wait_time = 0.1
else:
wait_time = 0.05
@classmethod
def setUpClass(cls):
cls.browser = sync(launch(args=['--no-sandbox']))
cls.page = sync(cls.browser.newPage())
@classmethod
def tearDownClass(cls):
sync(cls.browser.close())
def setUp(self):
from syncer import sync
super().setUp()
self.doc = get_document()
self.root = self.get_elements()
self.doc.body.prepend(self.root)
self.server = server.start_server(port=0)
self.address = server_config['address']
self.port = server_config['port']
self.url = 'http://{}:{}'.format(self.address, self.port)
sync(self.page.goto(self.url))
self.element = sync(self.get_element_handle(self.root))
def tearDown(self):
server.stop_server(self.server)
super().tearDown()
import time
time.sleep(0.01)
def get_elements(self):
raise NotImplementedError
async def get_element_handle(self, elm):
result = await self.page.querySelector(
'[wdom_id="{}"]'.format(elm.wdom_id))
return result
async def get_text(self, elm=None):
elm = elm or self.element
result = await self.page.evaluate('(elm) => elm.textContent', elm)
return result
async def get_attribute(self, name, elm=None):
elm = elm or self.element
result = await self.page.evaluate(
'(elm) => elm.getAttribute("{}")'.format(name), elm)
return result
async def wait(self, timeout=None):
timeout = timeout or self.wait_time
_t = timeout / 10
for _ in range(10):
await asyncio.sleep(_t)
async def wait_for_element(self, elm):
await self.page.waitForSelector(
'[wdom_id="{}"]'.format(elm.wdom_id),
{'timeout': 100},
)
|
# Copyright 2016 - 2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Wen Guan, <wen.guan@cern.ch>, 2016-2017
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2016
# - Joaquin Bogado <jbogado@linti.unlp.edu.ar>, 2018
"""
methods of objectstore
"""
import boto
import boto.s3.connection
import logging
import traceback
import urlparse
from dogpile.cache import make_region
from dogpile.cache.api import NoValue
from rucio.common import config
from rucio.common import exception
logging.getLogger("boto").setLevel(logging.WARNING)
logging.getLogger("boto.s3.connection").setLevel(logging.WARNING)
REGION = make_region().configure('dogpile.cache.memcached',
expiration_time=3600,
arguments={'url': "127.0.0.1:11211", 'distributed_lock': True})
# for local test
REGION = make_region().configure('dogpile.cache.memory',
expiration_time=3600)
def _get_credentials(rse, endpoint):
"""
Pass an endpoint and return its credentials.
:param endpoint: URL endpoint string.
:param rse: RSE name.
:returns: Dictionary of credentials.
"""
key = '%s_%s' % (rse, endpoint)
result = REGION.get(key)
if type(result) is NoValue:
try:
logging.debug("Loading account credentials")
result = config.get_rse_credentials(None)
if result and rse in result:
result = result[rse]
result['is_secure'] = result['is_secure'][endpoint]
REGION.set(key, result)
else:
raise Exception("Failed to load account credentials")
logging.debug("Loaded account credentials")
except KeyError as e:
raise exception.CannotAuthenticate('RSE %s endpoint %s not in rse account cfg: %s' % (rse, endpoint, e))
except:
raise exception.RucioException("Failed to load credentials for RSE(%s) endpoint(%s), error: %s" % (rse, endpoint, traceback.format_exc()))
return result
def _get_connection(rse, endpoint):
"""
Pass an endpoint and return a connection to object store.
:param rse: RSE name.
:param endpoint: URL endpoint string.
:returns: Connection object.
"""
key = "connection:%s_%s" % (rse, endpoint)
result = REGION.get(key)
if type(result) is NoValue:
try:
logging.debug("Creating connection object")
result = None
credentials = _get_credentials(rse, endpoint)
if 'access_key' in credentials and credentials['access_key'] and \
'secret_key' in credentials and credentials['secret_key'] and \
'is_secure' in credentials and credentials['is_secure'] is not None:
parsed = urlparse.urlparse(endpoint)
hostname = parsed.netloc.partition(':')[0]
port = parsed.netloc.partition(':')[2]
result = boto.connect_s3(aws_access_key_id=credentials['access_key'],
aws_secret_access_key=credentials['secret_key'],
host=hostname,
port=int(port),
is_secure=credentials['is_secure'],
calling_format=boto.s3.connection.OrdinaryCallingFormat())
REGION.set(key, result)
logging.debug("Created connection object")
else:
raise exception.CannotAuthenticate("Either access_key, secret_key or is_secure is not defined for RSE %s endpoint %s" % (rse, endpoint))
except exception.RucioException as e:
raise e
except:
raise exception.RucioException("Failed to get connection for RSE(%s) endpoint(%s), error: %s" % (rse, endpoint, traceback.format_exc()))
return result
def _get_bucket(rse, endpoint, bucket_name, operation='read'):
"""
Pass an endpoint and return a connection to object store.
:param rse: RSE name.
:param endpoint: URL endpoint string.
:returns: Connection object.
"""
key = "%s:%s:%s" % (rse, endpoint, bucket_name)
result = REGION.get(key)
if type(result) is NoValue:
try:
logging.debug("Creating bucket object")
result = None
conn = _get_connection(rse, endpoint)
bucket = conn.get_bucket(bucket_name)
if operation == 'read':
if bucket is None:
raise exception.SourceNotFound('Bucket %s not found on %s' % (bucket_name, rse))
else:
result = bucket
REGION.set(key, result)
else:
result = conn.create_bucket(bucket_name)
REGION.set(key, result)
except exception.RucioException as e:
raise e
except:
raise exception.RucioException("Failed to get bucket on RSE(%s), error: %s" % (rse, traceback.format_exc()))
return result
def _get_endpoint_bucket_key(url):
"""
Parse URL.
:param url: URL string.
:returns: endpoint, bucket, key.
"""
try:
parsed = urlparse.urlparse(url)
scheme = parsed.scheme
hostname = parsed.netloc.partition(':')[0]
port = parsed.netloc.partition(':')[2]
endpoint = ''.join([scheme, '://', hostname, ':', port])
while '//' in parsed.path:
parsed = parsed._replace(path=parsed.path.replace('//', '/'))
path = parsed.path
if path.startswith('/'):
path = path[1:]
bucket_name = path.split('/')[0]
key_name = path.replace(bucket_name + '/', '')
return endpoint, bucket_name, key_name
except:
raise exception.RucioException("Failed to parse url %s, error: %s" % (url, traceback.format_exc()))
def connect(rse, url):
"""
connect to RSE.
:param url: URL string.
:param rse: RSE name.
"""
try:
endpoint, bucket_name, key_name = _get_endpoint_bucket_key(url)
conn = _get_connection(rse, endpoint)
conn.create_bucket(bucket_name)
except:
raise exception.RucioException("Failed to connect url %s, error: %s" % (url, traceback.format_exc()))
def get_signed_urls(urls, rse, operation='read'):
"""
Pass list of urls and return their signed urls.
:param urls: A list of URL string.
:param rse: RSE name.
:returns: Dictionary of Signed URLs.
"""
result = {}
for url in urls:
try:
endpoint, bucket_name, key_name = _get_endpoint_bucket_key(url)
signed_url = None
if operation == 'read':
# signed_url = conn.generate_url(3600, 'GET', bucket_name, key_name, query_auth=True, force_http=False)
bucket = _get_bucket(rse, endpoint, bucket_name)
key = bucket.get_key(key_name)
if key is None:
signed_url = exception.SourceNotFound('Key %s not found on %s' % (key_name, endpoint))
else:
try:
signed_url = key.generate_url(3600, 'GET', query_auth=True, merge_meta=False, force_http=False)
except TypeError:
# merge_meta option is not supported
signed_url = key.generate_url(3600, 'GET', query_auth=True, force_http=False)
else:
conn = _get_connection(rse, endpoint)
_get_bucket(rse, endpoint, bucket_name, operation='write')
signed_url = conn.generate_url(3600, 'PUT', bucket_name, key_name, query_auth=True, force_http=False)
result[url] = signed_url
except boto.exception.S3ResponseError as e:
if e.status in [404, 403]:
result[url] = exception.DestinationNotAccessible(e)
else:
result[url] = exception.ServiceUnavailable(e)
except exception.RucioException as e:
result[url] = e
except:
result[url] = exception.RucioException("Failed to get signed url for %s, error: %s" % (url, traceback.format_exc()))
return result
def get_metadata(urls, rse):
"""
Pass list of urls and return their metadata.
:param urls: A list of URL string.
:param rse: RSE name.
:returns: Dictonary of metadatas.
"""
result = {}
for url in urls:
try:
endpoint, bucket_name, key_name = _get_endpoint_bucket_key(url)
bucket = _get_bucket(rse, endpoint, bucket_name)
metadata = None
key = bucket.get_key(key_name)
if key is None:
metadata = exception.SourceNotFound('Key %s not found on %s' % (key_name, endpoint))
else:
metadata = {'filesize': key.size}
result[url] = metadata
except boto.exception.S3ResponseError as e:
if e.status in [404, 403]:
raise exception.DestinationNotAccessible(e)
else:
raise exception.ServiceUnavailable(e)
except exception.RucioException as e:
result[url] = e
except:
result[url] = exception.RucioException("Failed to get metadata for %s, error: %s" % (endpoint, traceback.format_exc()))
return result
def _delete_keys(bucket, keys):
"""
Delete objects in the same bucket.
:param bucket: Bucket object.
:param keys: List of keys.
:returns: Dictonary of {'status': status, 'output': output}.
"""
result = {}
status = -1
output = None
try:
deleted_result = bucket.delete_keys(keys)
for deleted in deleted_result.deleted:
result[deleted.key] = {'status': 0, 'output': None}
for error in deleted_result.errors:
result[error.key] = {'status': -1, 'output': error.message}
except:
status = -1
output = "Failed to delete keys, error: %s" % (traceback.format_exc())
for key in keys:
if key not in result:
result[key] = {'status': status, 'output': output}
return result
def delete(urls, rse):
"""
Delete objects.
:param urls: A list of URL string.
:param rse: RSE name.
:returns: Dictonary of {'status': status, 'output': output}.
"""
result = {}
bucket_keys = {}
for url in urls:
try:
endpoint, bucket_name, key_name = _get_endpoint_bucket_key(url)
bucket_key = '%s+%s' % (endpoint, bucket_name)
if bucket_key not in bucket_keys:
bucket_keys[bucket_key] = {}
bucket_keys[bucket_key][key_name] = url
except:
result[url] = {'status': -1, 'output': "Failed to delete url: %s, error: %s" % (url, traceback.format_exc())}
for bucket_key in bucket_keys:
try:
endpoint, bucket_name = bucket_key.split('+')
bucket = _get_bucket(rse, endpoint, bucket_name)
ret = _delete_keys(bucket, list(bucket_keys[bucket_key].keys()))
for key in ret:
result[bucket_keys[bucket_key][key]] = ret[key]
except:
ret = {'status': -1, 'output': "Failed to delete url: %s, error: %s" % (url, traceback.format_exc())}
for key in list(bucket_keys[bucket_key].keys()):
url = bucket_keys[bucket_key][key]
if url not in result:
result[url] = ret
return result
def delete_dir(url_prefix, rse):
"""
Delete objects starting with prefix.
:param url_prefix: URL string.
:param rse: RSE name.
:returns {'status': status, 'output': output}
"""
try:
endpoint, bucket_name, key_name = _get_endpoint_bucket_key(url_prefix)
bucket = _get_bucket(rse, endpoint, bucket_name)
i = 0
keys = []
for key in bucket.list(prefix=key_name):
keys.append(key.name)
i += 1
if i == 1000:
ret = _delete_keys(bucket, keys)
for ret_key in ret:
if ret[ret_key]['status'] != 0:
return ret[ret_key]['status'], ret[ret_key]['output']
i = 0
keys = []
if len(keys):
ret = _delete_keys(bucket, keys)
for ret_key in ret:
if ret[ret_key]['status'] != 0:
return ret[ret_key]['status'], ret[ret_key]['output']
return 0, None
except:
return -1, "Failed to delete dir: %s, error: %s" % (url_prefix, traceback.format_exc())
def rename(url, new_url, rse):
"""
Rename object.
:param url: URL string.
:param new_url: URL string.
:param rse: RSE name.
"""
try:
endpoint, bucket_name, key_name = _get_endpoint_bucket_key(url)
bucket = _get_bucket(rse, endpoint, bucket_name)
key = bucket.get_key(key_name)
if key is None:
raise exception.SourceNotFound('Key %s not found on %s' % (key_name, endpoint))
new_endpoint, new_bucket_name, new_key_name = _get_endpoint_bucket_key(new_url)
if endpoint != new_endpoint:
raise exception.RucioException("New endpont %s is different with old endpoint %s, cannot rename to different OS" % (new_endpoint, endpoint))
key.copy(new_bucket_name, new_key_name)
key.delete()
except boto.exception.S3ResponseError as e:
if e.status in [404, 403]:
raise exception.DestinationNotAccessible(e)
else:
raise exception.ServiceUnavailable(e)
except exception.RucioException as e:
raise e
except:
raise exception.RucioException("Failed to get metadata for %s, error: %s" % (endpoint, traceback.format_exc()))
|
"""
#' The linearized matrix L
#'
#' Function computes the derivative of the model with respect to the between subject variability
#' terms in the model (b's and bocc's) evaluated at
#' a defined point
#' (b_ind and bocc_ind).
#'
#' @inheritParams mf3
#' @param bpop The fixed effects parameter values. Supplied as a vector.
#' @param b_ind The point at which to evaluate the derivative
#' @param bocc_ind The point at which to evaluate the derivative
#' @param poped_db A PopED database.
#'
#' @return As a list:
#' \item{y}{A matrix of size (samples per individual x number of random effects)}
#' \item{poped_db}{A PopED database}
#' @example tests/testthat/examples_fcn_doc/warfarin_optimize.R
#' @example tests/testthat/examples_fcn_doc/examples_LinMatrixL.R
#' @export
#' @keywords internal
## Function translated automatically using 'matlab.to.r()'
## Author: Caiya Zhang, Yuchen Zheng
"""
import numpy as np
from project.gradff import gradff
from project.gradfg import gradfg
def LinMatrixL (model_switch,xt_ind,x,a,bpop,b_ind,bocc_ind,poped_db):
if poped_db["parameters"]["NumRanEff"] == 0:
y = 0
else:
returnArgs = gradff(model_switch, xt_ind, x, a, bpop, b_ind, bocc_ind, poped_db)
grad_ff_tmp = returnArgs[0]
poped_db = returnArgs[1]
y = np.matmul(grad_ff_tmp, gradfg(x, a, bpop, b_ind, bocc_ind, poped_db))
return {"y": y, "poped_db": poped_db}
|
POWER = 0xFF02FD
RED = 0xFF1AE5
GREEN = 0xFF9A65
BLUE = 0xFFA25D
BRIGHTEN = 0xFF3AC5
DIM = 0xFFBA45
WHITE = 0xFF22DD
# TODO Add more supported IR codes
|
from __future__ import division, unicode_literals
from past.utils import old_div
import math
import time
from util import hook, urlnorm, timesince, http
title_length = 80
expiration_period = 60 * 60 * 24 # 1 day
ignored_urls = [urlnorm.normalize("http://google.com")]
# We have a bunch of much better plugins to handle these
ignored_title_urls = [
'vine.co/',
'reddit.com',
'imdb.com',
'liveleak.com',
'twitter.com',
'youtube.com',
'vimeo.com',
'tinyurl.com',
'rottentomatoes.com',
'steampowered.com',
'steamcommunity.com',
'//t.co',
'youtu.be/',
'yooouuutuuube'
]
def db_init(db):
db.execute("create table if not exists urlhistory"
"(chan, url, nick, time)")
db.commit()
def insert_history(db, chan, url, nick):
db.execute("insert into urlhistory(chan, url, nick, time) "
"values(?,?,?,?)", (chan, url, nick, time.time()))
db.commit()
def get_history(db, chan, url):
db.execute("delete from urlhistory where time < ?",
(time.time() - expiration_period,))
return db.execute("select nick, time from urlhistory where "
"chan=? and url=? order by time desc", (chan, url)).fetchall()
def get_title(url):
for ignored_url in ignored_title_urls:
if ignored_url in url:
return None
try:
html = http.get_html(url)
title = html.xpath('/html/head/title')[0]
title = title.text.strip()
except:
return None
return (title[:title_length] + '..') if len(title) > title_length else title
def nicklist(nicks):
nicks.sort(key=lambda n: n.lower())
if len(nicks) <= 2:
return ' and '.join(nicks)
else:
return ', and '.join((', '.join(nicks[:-1]), nicks[-1]))
def format_reply(url, history):
title = get_title(url)
if not history:
return title
last_nick, recent_time = history[0]
last_time = timesince.timesince(recent_time)
hour_span = math.ceil(old_div((time.time() - history[-1][1]), 3600))
hour_span = '%.0f hours' % hour_span if hour_span > 1 else 'hour'
hlen = len(history)
ordinal = ["once", "twice", "%d times" % hlen][min(hlen, 3) - 1]
if len(dict(history)) == 1:
last = "last linked %s ago" % last_time
else:
last = "last linked by %s %s ago" % (last_nick, last_time)
if title:
title = "%s - " % (title)
else:
title = ""
return "%sthat url has been posted %s in the past %s by %s (%s)." % (
title,
ordinal,
hour_span,
nicklist([h[0] for h in history]),
last
)
@hook.regex(r'([a-zA-Z]+://|www\.)[^ ]+')
def urlinput(match, nick='', chan='', db=None, bot=None):
db_init(db)
url = urlnorm.normalize(match.group())
if url not in ignored_urls:
url = url
history = get_history(db, chan, url)
insert_history(db, chan, url, nick)
inp = match.string.lower()
for name in dict(history):
if name.lower() in inp: # person was probably quoting a line
return # that had a link. don't remind them.
return format_reply(url, history)
|
# Generated by Django 2.0.5 on 2018-05-10 08:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inscript', '0002_auto_20180506_1716'),
]
operations = [
migrations.AlterField(
model_name='course',
name='lab_slot',
field=models.CharField(blank=True, choices=[('MAVI7', 'MaVi 7/3'), ('MAVI8', 'MaVi 8+/3'), ('MAVI10', 'MaVi 10/3'), ('MAVI11', 'MaVi 11+/3'), ('MAVI13', 'MaVi 13/3'), ('MAVI14', 'MaVi 14+/3'), ('MAVI16', 'MaVi 16/3'), ('MAVI18', 'MaVi 18/3'), ('LUJU7', 'LuJu 7/3'), ('LUJU8', 'LuJu 8+/3'), ('LUJU10', 'LuJu 10/3'), ('LUJU11', 'LuJu 11+/3'), ('LUJU13', 'LuJu 13/3'), ('LUJU14', 'LuJu 14+/3'), ('LUJU16', 'LuJu 16/3'), ('LUJU18', 'LuJu 18/3'), ('MI7', 'Mi 7/6'), ('MI10', 'Mi 10/6'), ('MI13', 'Mi 13/6'), ('MI14', 'Mi 14+/6')], default='', max_length=10),
),
migrations.AlterField(
model_name='course',
name='time_slot',
field=models.CharField(choices=[('MAVI7', 'MaVi 7/3'), ('MAVI8', 'MaVi 8+/3'), ('MAVI10', 'MaVi 10/3'), ('MAVI11', 'MaVi 11+/3'), ('MAVI13', 'MaVi 13/3'), ('MAVI14', 'MaVi 14+/3'), ('MAVI16', 'MaVi 16/3'), ('MAVI18', 'MaVi 18/3'), ('LUJU7', 'LuJu 7/3'), ('LUJU8', 'LuJu 8+/3'), ('LUJU10', 'LuJu 10/3'), ('LUJU11', 'LuJu 11+/3'), ('LUJU13', 'LuJu 13/3'), ('LUJU14', 'LuJu 14+/3'), ('LUJU16', 'LuJu 16/3'), ('LUJU18', 'LuJu 18/3'), ('MI7', 'Mi 7/6'), ('MI10', 'Mi 10/6'), ('MI13', 'Mi 13/6'), ('MI14', 'Mi 14+/6')], max_length=10),
),
migrations.AlterField(
model_name='teacher',
name='telephone',
field=models.CharField(blank=True, default='', max_length=15),
),
]
|
#!/usr/bin/env python3
import tvm
import math
from tvm import relay
from tvm.relay.testing.lstm import lstm_cell, get_workload
import numpy as np
import sys
def generate_random_tensor(ty):
return tvm.nd.array(np.random.uniform(-1.0, 1.0, tuple([int(i) for i in ty.shape])).astype(ty.dtype))
def main(argv):
dtype = 'float32'
num_hidden = int(argv[1])
batch_size = 1
input_type = relay.TensorType((batch_size, num_hidden), dtype)
state_type = relay.TupleType([input_type, input_type])
weight_type = relay.TensorType((4*num_hidden, num_hidden), dtype)
bias_type = relay.TensorType((4*num_hidden,), dtype)
# inputs = relay.Var('inputs', input_type)
# states = relay.Var('states', state_type)
# cell_state = relay.Var('cell_state', input_type)
# hidden_state = relay.Var('hidden_state', input_type)
# i2h_weight = relay.Var('i2h_weight', weight_type)
# i2h_bias = relay.Var('i2h_bias', bias_type)
# h2h_weight = relay.Var('h2h_weight', weight_type)
# h2h_bias = relay.Var('h2h_bias', bias_type)
# mod = tvm.IRModule()
# mod['lstm'] = lstm_cell(num_hidden)
# mod['main'] = relay.Function([inputs, cell_state, hidden_state,
# i2h_weight, i2h_bias, h2h_weight, h2h_bias],
# mod.get_global_var('lstm')(inputs, relay.Tuple([cell_state, hidden_state]),
# i2h_weight, i2h_bias, h2h_weight, h2h_bias))
mod, p = get_workload(batch_size, num_hidden)
ex = relay.create_executor('vm', mod=mod, ctx=tvm.cpu(), target='llvm')
i_val = generate_random_tensor(input_type)
cell_val = np.zeros((batch_size, num_hidden), np.float32)
hidden_val = np.zeros((batch_size, num_hidden), np.float32)
i2h_w_val = generate_random_tensor(weight_type)
i2h_b_val = generate_random_tensor(bias_type)
h2h_w_val = generate_random_tensor(weight_type)
h2h_b_val = generate_random_tensor(bias_type)
# order: i_sz, o_sz, input, cell, hidden, i2h_weight, h2h_weight, i2h_bias, h2h_bias
f = open(argv[2], 'wb')
f.write(num_hidden.to_bytes(4, 'little'))
f.write(num_hidden.to_bytes(4, 'little'))
i_val.asnumpy().tofile(f)
cell_val.tofile(f)
hidden_val.tofile(f)
i2h_w_val.asnumpy().tofile(f)
h2h_w_val.asnumpy().tofile(f)
i2h_b_val.asnumpy().tofile(f)
h2h_b_val.asnumpy().tofile(f)
print("Wrote %d bytes" % f.tell())
print("inputs:", i_val)
print("cell:", cell_val)
print("hidden:", hidden_val)
print("i2h_weights:", i2h_w_val)
print("h2h_weights:", h2h_w_val)
print("i2h_bias:", i2h_b_val)
print("h2h_bias:", h2h_b_val)
# i2h_dense = np.add(i2h_w_val.asnumpy().dot(i_val.asnumpy()[0]), i2h_b_val.asnumpy())
# h2h_dense = np.add(h2h_w_val.asnumpy().dot(hidden_val[0]), h2h_b_val.asnumpy())
# print("i2h dense: ", i2h_dense)
# print("h2h dense: ", h2h_dense)
# comb_dense = np.add(i2h_dense, h2h_dense)
# print("combined dense:", comb_dense)
# def sig(x):
# return (1.0 / (1.0 + math.exp(-x)))
# vsig = np.vectorize(sig)
# in_gate = vsig(comb_dense[:num_hidden])
# forget_gate = vsig(comb_dense[num_hidden:num_hidden*2])
# in_trx = np.tanh(comb_dense[num_hidden*2:num_hidden*3])
# out_gate = vsig(comb_dense[num_hidden*3:])
# next_c = np.add(np.multiply(forget_gate, cell_val), np.multiply(in_gate, in_trx))
# next_h = np.multiply(out_gate, np.tanh(next_c))
# print("next_c:", next_c)
# print("next_h:", next_h)
out = ex.evaluate()(i_val, i2h_w_val, i2h_b_val, h2h_w_val, h2h_b_val)
print("output: ", out)
out.asnumpy().tofile(f)
print("Wrote %d bytes" % f.tell())
f.close()
if __name__ == '__main__':
main(sys.argv)
|
from .queued_run_coordinator_daemon import QueuedRunCoordinatorDaemon
|
from app import app
import os
from flask import render_template, flash, redirect, url_for
from app.forms import LoginForm
from config import Config
@app.route('/', methods=['GET'])
@app.route('/index', methods=['GET'])
def index():
"""
service = get_service()
spreadsheet_id = os.environ["GOOGLE_SPREADSHEET_ID"]
range_name = os.environ["GOOGLE_CELL_RANGE"]
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id, range=range_name).execute()
values = result.get('values', [])
"""
result = Config.SERVICE.spreadsheets().values().get(
spreadsheetId=Config.SHEET, range=Config.RANGE_SITUAZIONE).execute()
values = result.get('values', [])
return render_template('index.html', values=values)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
flash('Login requested for user {}, remember_me={}'.format(
form.username.data, form.remember_me.data))
return redirect(url_for('index'))
return render_template('login.html', title='Sign In', form=form)
|
# lesson3/exercises.py
# Variables
#
# This file contains exercises about Python variables.
# Variables
print("What's a variable?")
# 1) What's a variable?
# Creating web apps, games, and search engines all involve storing and working
# with different types of data.
# They do so using variables. A variable stores a piece of data, and gives it
# a specific name.
# For example:
result = 10
print("5 + 5 = {}".format(result))
# The variable result now stores the number 10. This is called an "assignment".
# Note the ordering of the code. The variable result on the left side of the
# =, and the number 10 on the right.
# There are some restrictions on variable names.
# Variable names can't contains spaces. Instead we use underscores ("_").
# They also shouldn't contain special signs like "%"
# Try uncommenting each of the lines below and see what happens.
# Which lines work, which don't?
# my variable = 5
# my_variable = 10
# 9ds = 15
# ds9 = 42
# %correct = 100
# Exercise 1:
# Now, create a variable called my_age and set its value to your current age.
# At last, print the variable.
# 2) Variable types
print("\nVariable types")
# Numbers are one data type we use in programming. There are many other types
# used in programming. In this lesson, we will learn about four of them.
# The first data type is called an int and you used it to represent the value
# in the my_age variable from exercise 1.
# As the name suggests, ints represent integer numbers like 0, 1, 2, -5, etc.
# Another data type is called a float. Floats also represent numbers, but more
# specifically, they represent decimal numbers, like 1.23, 3.14, -1.00, etc.
# In Python, 5 is an integer, but 5.0 is a float.
# You can use variables to store floats like this:
pi = 3.1415
print("The value of pi is {}".format(pi))
# A third data type is called a boolean.
# A boolean is like a light switch. It can only have two values. Just like a
# light switch can only be on or off, a boolean can only be True or False.
# You can use variables to store booleans like this:
a = True
b = False
print("a is {}, but b is {}".format(a, b))
# Note that both True and False have their first letter capitalized.
# At last, the final data type we will be learning in this lesson is a string.
# A string is used to represent words, phrases or characters. Strings are
# always defined either with a single quote or a double quote.
my_first_string = "Hello World!"
my_second_string = 'Hello World!'
print("This is my first string")
print(my_first_string)
print("This is my second string")
print(my_second_string)
print("They are the same. Woah!")
# The difference between the two is that using double quotes makes it easy to
# include apostrophes.
mystring = "Don't worry about apostrophes"
# Exercise 2
# Create the following variables.
# my_name with the value of your name
# my_age with the value of your age
# ice_cream_price with the value of the price of ice cream (if you don't know, guessing is totally fine)
# like_rain with the value of wheter you like rain or not
# Uncomment the line below to print your variables.
# print("Hi. I am {} and I am {} years old. Ice cream now costs {} reais and the fact that i like rain is {}".format(my_name, my_age, ice_cream_price, like_rain))
# Which types were the variables you created?
# 3. Operations with variables.
# Your learned how to do arithmetics on lesson 2. Now, you will see that we can
# do the same with variables. Take a look at the file arithmetics.py for some
# exercises about variable arithmetics.
# 4. Reassinging variables
# Now you know how to use variables to store values.
# Say my_int = 7. This doesn't have to be the value of the variable forever.
# You can change the value of a variable by assigning it to a new value, or
# "reassigning" it.
# Exercise 3
# Change the value of my_int from 7 to 3.
# my_int is set to 7 below. What do you think
# will happen if we reset it to 3 and print the result?
my_int = 7
# Change the value of my_int to 3!
# ADD YOUR CODE HERE!
# This line will print the varible my_int to the console:
print("my_int is not 7 anymore, it's {}".format(my_int))
# 5. Variables in terms of other variables.
# Not all variables need to assigned to constant values. They can also be
# assigned to other variables or expressions. Take a look at the file
# variables.py and fill in the blanks.
# You can also reassign variables in terms of itself.
x = 15 # This line assigns x to the value of 15.
x = x + 1 # This line computes the expression x+1, and assigns its value o the
# variable x. So, what's the final value of x? Print it and see it
# for yourself.
x += 1 # This line is another way of writing x = x + 1 in Python. What's the
# value of x after this?
# Exercise 4: Bill calculator.
# Take a look at the file bill_calculator.py and fill in the missing code.
# Exercise 5: Area of a circle.
# Take a look at the file areas.py and write your own code!
|
__source__ = 'https://leetcode.com/problems/score-of-parentheses/'
# Time: O(N)
# Space: O(1)
#
# Description: Leetcode # 856. Score of Parentheses
#
# Given a balanced parentheses string S,
# compute the score of the string based on the following rule:
#
# () has score 1
# AB has score A + B, where A and B are balanced parentheses strings.
# (A) has score 2 * A, where A is a balanced parentheses string.
#
#
# Example 1:
#
# Input: "()"
# Output: 1
# Example 2:
#
# Input: "(())"
# Output: 2
# Example 3:
#
# Input: "()()"
# Output: 2
# Example 4:
#
# Input: "(()(()))"
# Output: 6
#
#
# Note:
#
# S is a balanced parentheses string, containing only ( and ).
# 2 <= S.length <= 50
#
import unittest
# 20ms 100%
class Solution(object):
def scoreOfParentheses(self, S):
"""
:type S: str
:rtype: int
"""
ans = bal = 0
for i, x in enumerate(S):
if x == '(':
bal += 1
else:
bal -= 1
if S[i-1] == '(':
ans += 1 << bal
return ans
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/score-of-parentheses/solution/
#
Approach 1: Divide and Conquer
Complexity Analysis
Time Complexity: O(N^2), where N is the length of S. An example worst case is (((((((....))))))).
Space Complexity: O(N), the size of the implied call stack.
# 4ms 82.83%
class Solution {
public int scoreOfParentheses(String S) {
return F(S, 0, S.length());
}
private int F(String S, int i, int j) {
//Score of balanced string S[i:j]
int ans = 0, bal = 0;
// Split string into primitives
for (int k = i; k < j; ++k) {
bal += S.charAt(k) == '(' ? 1 : -1;
if (bal == 0) {
if (k - i == 1) ans++;
else ans += 2 * F(S, i+1, k);
i = k+1;
}
}
return ans;
}
}
Approach 2: Stack
Complexity Analysis
Time Complexity: O(N), where N is the length of S.
Space Complexity: O(N), the size of the stack.
# For example, when counting (()(())), our stack will look like this:
#
# [0, 0] after parsing (
# [0, 0, 0] after (
# [0, 1] after )
# [0, 1, 0] after (
# [0, 1, 0, 0] after (
# [0, 1, 1] after )
# [0, 3] after )
# [6] after )
# 4ms 82.83%
class Solution {
public int scoreOfParentheses(String S) {
Stack<Integer> stack = new Stack();
stack.push(0); // The score of the current frame
for (char c: S.toCharArray()) {
if (c == '(') stack.push(0);
else {
int v = stack.pop();
int w = stack.pop();
stack.push(w + Math.max(2 * v, 1));
}
}
return stack.pop();
}
Approach 3: Count Cores
Complexity Analysis
Time Complexity: O(N), where N is the length of S.
Space Complexity: O(1)
# 3ms 100%
# #1. For every ) that immediately follows a (, the answer is 1 << balance,
# as balance is the number of exterior set of parentheses that contains this core.
class Solution {
public int scoreOfParentheses(String S) {
int ans = 0, bal = 0;
for (int i = 0; i < S.length(); ++i) {
if (S.charAt(i) == '(') bal++;
else {
bal--;
if (S.charAt(i - 1) == '(') {
ans += 1 << bal;
}
}
}
return ans;
}
}
'''
|
from django.shortcuts import render
from vianeyRest.models import Usuario,Materia,Persona
from vianeyRest.serializers import UsuarioSerializer,MateriaSerializer,PersonaSerializer
from rest_framework import generics
# Create your views here.
class UsuarioList(generics.ListCreateAPIView):
queryset = Usuario.objects.all()
serializer_class = UsuarioSerializer
class UsuarioDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Usuario.objects.all()
serializer_class = UsuarioSerializer
class MateriaList(generics.ListCreateAPIView):
queryset = Materia.objects.all()
serializer_class = MateriaSerializer
class MateriaDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Materia.objects.all()
serializer_class = MateriaSerializer
class PersonaList(generics.ListCreateAPIView):
queryset = Persona.objects.all()
serializer_class = PersonaSerializer
class PersonaDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Persona.objects.all()
serializer_class = PersonaSerializer
|
import datetime
import os
from typing import Dict, List, Optional, Tuple, Union
from prompt_toolkit.clipboard import pyperclip
from pydantic import BaseModel
# config_file = os.path.join(os.path.expanduser("~"), r".v_bank.config")
store_file = os.path.join(os.path.expanduser("~"), ".v_bank_store.json")
class V(BaseModel):
main_key: str
value: str
keys: List[str] = []
_id: datetime.datetime = datetime.datetime.now()
def __repr__(self):
return f"V(main_key={self.main_key}, value={self.value}, ex_keys={self.keys})"
def __str__(self):
return repr(self)
class Bank(BaseModel):
v_set: List[V] = list()
keys: Dict[str, V] = dict()
last: Optional[V] = None
def store(self) -> str:
with open(store_file, "w+", encoding="utf-8") as f:
f.write(self.json(indent=4))
return store_file
@classmethod
def read(cls) -> "Bank":
if os.path.exists(store_file):
return cls.parse_file(store_file)
else:
return cls()
def set_key(self, fields: Tuple[str, ...], force: bool = False) -> Union[str, V]:
if len(fields) == 2:
main_key, value = fields
key = None
elif len(fields) == 3:
key, main_key, value = fields
else: # fields > 3 or fields < 2:
return "Error: invalid number of fields"
if key in self.keys:
self._delete_v(self.keys[key], force=force)
account = V(main_key=main_key, value=value)
self.v_set.append(account)
if key:
self.keys[key] = account
account.keys.append(key)
return account
return account
def get(self, key: str) -> Optional[V]:
if self.last and key == self.last.main_key:
pyperclip.pyperclip.copy(self.last.value)
return self.last
if key in self.keys:
stuff = self.keys[key]
pyperclip.pyperclip.copy(stuff.main_key)
self.last = stuff
return stuff
else:
for stuff in self.v_set:
if key == stuff.main_key:
pyperclip.pyperclip.copy(stuff.value)
self.last = stuff
return stuff
return None
def find(self, key: str) -> List[V]:
res = []
for _key in self.keys:
if key in _key:
res.append(self.keys[_key])
for account in self.v_set:
if key in account.main_key or key in account.value:
res.append(account)
return res
def delete(self, key: str, force: bool = False) -> str:
if key in self.keys:
self._delete_v(self.keys[key], force)
return "delete"
else:
for stuff in self.v_set:
if key == stuff.main_key or key in stuff.value:
self._delete_v(stuff, force)
return "delete"
def _delete_v(self, stuff: V, force: bool = False) -> bool:
if force or input(f"Are you sure to delete {stuff}? (y/n) ") == "y":
self.v_set.remove(stuff)
for key in stuff.keys:
del self.keys[key]
self.last = None
return True
return False
def clean(self, force: bool = False) -> str:
if force or input("Are you sure to clean value-bank ? (y/n) ") == "y":
self.v_set.clear()
self.keys.clear()
self.last = None
with open(store_file, "w+", encoding="utf-8") as f:
f.write("{}")
return store_file
|
# TODO: Properly compile this before running on OTHR computers
import pyximport; pyximport.install()
from reversialphazero.command_line_interface import CommandLineInterface
def main():
weights_file = './final-long-running-test/checkpoint-00062.zip'
nn_name = 'reversialphazero.distributed_8_by_8.neural_network.SimpleNeuralNetwork'
command_line_interface = CommandLineInterface(ai_client=True, nn_class_name=nn_name,
ai_client_weights_file=weights_file, name='AI-Client')
command_line_interface.parse_args()
command_line_interface.execute()
if __name__ == "__main__":
main()
|
class TestReport2Splunk(object):
def test_pass(self):
assert True
def test_fail(self):
assert False
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# SKS.SKS.get_interfaces
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfaces import IGetInterfaces
from noc.core.text import parse_table
class Script(BaseScript):
name = "SKS.SKS.get_interfaces"
interface = IGetInterfaces
rx_port = re.compile(
r"^(?P<port>(?:Gi|Te|Po)\S+)\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+"
r"(?P<oper_status>Up|Down|Not Present)",
re.MULTILINE | re.IGNORECASE,
)
rx_port1 = re.compile(
r"^(?P<port>(?:Gi|Te|Po)\S+)\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+" r"(?P<admin_status>Up|Down)",
re.MULTILINE | re.IGNORECASE,
)
rx_descr = re.compile(
r"^(?P<port>(?:Gi|Te|Po)\S+)\s+(?P<descr>.+)$", re.MULTILINE | re.IGNORECASE
)
rx_vlan = re.compile(
r"^\s+(?P<vlan_id>\d+)\s+\S+\s+(?P<type>Untagged|Tagged)\s+" r"(?P<membership>\S+)\s*\n",
re.MULTILINE,
)
rx_vlan_ipif = re.compile(
r"^(?P<address>\S+)\s+vlan\s*(?P<vlan_id>\d+)\s+" r"(?:Static|DHCP)\s+Valid"
)
rx_mac = re.compile(r"^System MAC Address:\s+(?P<mac>\S+)", re.MULTILINE)
rx_enabled = re.compile(
r"^\s*(?P<port>(?:Gi|Te|Po)\S+)\s+Enabled", re.MULTILINE | re.IGNORECASE
)
rx_lldp = re.compile(r"^(?P<port>(?:Gi|Te|Po)\S+)\s+(?:Rx|Tx)", re.MULTILINE | re.IGNORECASE)
rx_iface = re.compile(
r"^(?P<ifname>\S+\d) is( administratively)? "
r"(?P<admin_status>up|down), "
r"line protocol is (?P<oper_status>up|down)\s*\n"
r"^\s+Ifindex is (?P<snmp_ifindex>\d+).*\n"
r"(^\s+Description: (?P<descr>.+?)\s*\n)?"
r"^\s+Hardware is (?P<hardware>\S+)"
r"(, [Aa]ddress is (?P<mac>\S+)\s*\(.+\))?\s*\n"
r"(^\s+Interface address is (?P<ip>\S+)\s*\n)?"
r"^\s+MTU (?P<mtu>\d+) bytes.+\n"
r"(^\s+Encapsulation .+\n)?"
r"(^\s+Members in this Aggregator: (?P<agg_list>.+)\n)?",
re.MULTILINE,
)
IFTYPES = {
"100BASE-TX": "physical",
"Giga-TX": "physical",
"Giga-FX": "physical",
"Giga-FX-SFP": "physical",
"Giga-Combo-TX": "physical",
"Giga-Combo-FX": "physical",
"10Giga-FX": "physical",
"EtherSVI": "SVI",
"PortAggregator": "aggregated",
"Null": "null",
}
def get_gvrp(self):
try:
v = self.cli("show gvrp configuration")
if "GVRP Feature is currently Disabled" not in v:
return self.rx_enabled.findall(v)
except self.CLISyntaxError:
return []
return []
def get_stp(self):
try:
v = self.cli("show spanning-tree")
return self.rx_enabled.findall(v)
except self.CLISyntaxError:
return []
def get_ctp(self):
try:
v = self.cli("show loopback-detection")
if "Loopback detection: Disabled" not in v:
return self.rx_enabled.findall(v)
except self.CLISyntaxError:
return []
return []
def get_lldp(self):
try:
v = self.cli("show lldp configuration")
if "LLDP state: Enabled" in v:
return self.rx_lldp.findall(v)
except self.CLISyntaxError:
return []
return []
def get_old_sks(self, c):
interfaces = []
descr = []
adm_status = []
switchport_support = True
gvrp = self.get_gvrp()
stp = self.get_stp()
ctp = self.get_ctp()
lldp = self.get_lldp()
for line in c.split("\n"):
match = self.rx_descr.match(line.strip())
if match:
if match.group("port") == "Port":
continue
descr += [match.groupdict()]
for line in self.cli("show interfaces configuration").split("\n"):
match = self.rx_port1.match(line.strip())
if match:
adm_status += [match.groupdict()]
for match in self.rx_port.finditer(self.cli("show interfaces status")):
ifname = match.group("port")
if ifname.startswith("Po"):
iftype = "aggregated"
else:
iftype = "physical"
for i in adm_status:
if ifname == i["port"]:
st = bool(i["admin_status"] == "Up")
break
iface = {
"name": ifname,
"type": iftype,
"admin_status": st,
"oper_status": match.group("oper_status") == "Up",
"enabled_protocols": [],
"subinterfaces": [],
}
if ifname in gvrp:
iface["enabled_protocols"] += ["GVRP"]
if ifname in stp:
iface["enabled_protocols"] += ["STP"]
if ifname in ctp:
iface["enabled_protocols"] += ["CTP"]
if ifname in lldp:
iface["enabled_protocols"] += ["LLDP"]
sub = {
"name": ifname,
"admin_status": st,
"oper_status": match.group("oper_status") == "Up",
"enabled_afi": ["BRIDGE"],
"tagged_vlans": [],
}
for i in descr:
if ifname == i["port"]:
iface["description"] = i["descr"]
sub["description"] = i["descr"]
break
if switchport_support:
# 1.5.11.3 supported, but 1.5.3 is not supported "show interfaces switchport" command
try:
s = self.cli("show interfaces switchport %s" % ifname)
for match1 in self.rx_vlan.finditer(s):
vlan_id = match1.group("vlan_id")
if match1.group("membership") == "System":
continue
if match1.group("type") == "Untagged":
sub["untagged_vlan"] = int(vlan_id)
else:
sub["tagged_vlans"] += [int(vlan_id)]
except self.CLISyntaxError:
self.logger.info("Model not supported switchport information")
switchport_support = False
iface["subinterfaces"] += [sub]
interfaces += [iface]
mac = self.scripts.get_chassis_id()[0]["first_chassis_mac"]
for line in self.cli("show ip interface").split("\n"):
match = self.rx_vlan_ipif.match(line.strip())
if match:
ifname = "vlan" + match.group("vlan_id")
iface = {
"name": ifname,
"type": "SVI",
"admin_status": True,
"oper_status": True,
"mac": mac,
"subinterfaces": [
{
"name": ifname,
"admin_status": True,
"oper_status": True,
"mac": mac,
"enabled_afi": ["IPv4"],
"ipv4_addresses": [match.group("address")],
"vlan_ids": [int(match.group("vlan_id"))],
}
],
}
interfaces += [iface]
# Not implemented
"""
for l in self.cli("show ipv6 interface").split("\n"):
continue
"""
return interfaces
def get_new_sks(self):
interfaces = []
for match in self.rx_iface.finditer(self.cli("show interface")):
iface = {
"name": match.group("ifname"),
"type": self.IFTYPES[match.group("hardware")],
"admin_status": match.group("admin_status") == "up",
"oper_status": match.group("oper_status") == "up",
"snmp_ifindex": match.group("snmp_ifindex"),
}
sub = {
"name": match.group("ifname"),
"admin_status": match.group("admin_status") == "up",
"oper_status": match.group("oper_status") == "up",
"mtu": match.group("mtu"),
}
if iface["type"] == "physical":
sub["enabled_afi"] = ["BRIDGE"]
c = self.cli("show vlan interface %s" % iface["name"])
t = parse_table(c, allow_wrap=True, n_row_delim=",")
for i in t:
if i[1] == "Access":
sub["untagged_vlan"] = int(i[4])
elif i[1] == "Trunk":
sub["untagged_vlan"] = int(i[2])
sub["tagged_vlans"] = self.expand_rangelist(i[3])
else:
# Need more examples
raise self.NotSupportedError()
if iface["type"] == "aggregated" and match.group("agg_list"):
for i in match.group("agg_list").split():
ifname = self.profile.convert_interface_name(i)
for agg_iface in interfaces:
if agg_iface["name"] == ifname:
agg_iface["aggregated_interface"] = iface["name"]
break
if iface["name"].startswith("VLAN"):
sub["vlan_ids"] = [iface["name"][4:]]
if match.group("descr"):
iface["description"] = match.group("descr")
sub["description"] = match.group("descr")
if match.group("mac"):
iface["mac"] = match.group("mac")
sub["mac"] = match.group("mac")
if match.group("ip"):
sub["ip_addresses"] = [match.group("ip")]
sub["enabled_afi"] = ["IPv4"]
iface["subinterfaces"] = [sub]
interfaces += [iface]
return interfaces
def execute_cli(self):
try:
c = self.cli("show interfaces description")
except self.CLISyntaxError:
c = None
if c:
interfaces = self.get_old_sks(c)
else:
interfaces = self.get_new_sks()
return [{"interfaces": interfaces}]
|
from flask import Flask, request, jsonify
from keras.preprocessing import image
from itertools import compress
from io import BytesIO
from keras_applications import inception_v3
import base64, json, requests, numpy as np
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/hello/', methods=['GET', 'POST'])
def hello_world():
return 'Hello, World!'
@app.route('/homeappliance/predict/', methods=['POST'])
def home_appliance():
img = image.img_to_array(image.load_img(BytesIO(base64.b64decode(request.form['b64'])),
target_size=(224, 224))) / 255.
payload = {
"instances": [{'input_image': img.tolist()}]
}
r = requests.post('http://163.122.226.25:9001/v1/models/ApplianceDamageAnalyzer:predict', json=payload)
classes = ['building', 'minor', 'moderate', 'nodamage', 'severe', 'vehicle']
pred = json.loads(r.content.decode('utf-8'))
return jsonify(inception_v3.decode_predictions(np.array(pred['predictions'])))
@app.route('/vehiclebuilding/predict/', methods=['POST'])
def vehicle_building():
img = image.img_to_array(image.load_img(BytesIO(base64.b64decode(request.form['b64'])),
target_size=(224, 224))) / 255.
payload = {
"instances": [{'input_image': img.tolist()}]
}
r = requests.post('http://163.122.226.25:9000/v1/models/DamageAnalyzer:predict', json=payload)
classes = ['building', 'minor', 'moderate', 'nodamage', 'severe', 'vehicle']
pred = json.loads(r.content.decode('utf-8'))
# filtr = np.vectorize(lambda x: 1 if x > 0.5 else 0)(pred['predictions'])[0]
# response = {'predicitons': list(compress(classes, filtr))}
sorted_preds = list(zip(classes, np.array(pred['predictions'][0]).tolist()))
sorted_preds.sort(key=lambda x: -x[1])
return jsonify(sorted_preds)
|
import pyaudio
import numpy as np
from math import pi, sin
from pygame import midi
from itertools import count
midi.init()
mi=midi.Input(device_id=midi.get_default_input_id())
st=pyaudio.PyAudio().open(44100,1,pyaudio.paInt16,output=True,frames_per_buffer=256)
try:
nd={}
while True:
if nd:st.write(np.int16([sum([int(next(osc)*32767) for _,osc in nd.items()]) for _ in range(256)]).tobytes())
if mi.poll():
for(s,n,v,_),_ in mi.read(16):
if s==0x80 and n in nd:del nd[n]
elif s==0x90 and n not in nd:nd[n]=(sin(c)*v*0.1/127 for c in count(0,(2*pi*midi.midi_to_frequency(n))/44100))
except KeyboardInterrupt as err:
mi.close()
st.close()
|
lower_bound = '236491'
upper_bound = '713787'
# first formulate password rules
def test_increase(password: str) -> bool:
return password == ''.join(sorted(password))
def test_length(password: str) -> bool:
return len(password) == 6
def test_range(password: str) -> bool:
return int(password) in range(int(lower_bound), int(upper_bound)+1)
def test_adjacency(password: str) -> bool:
pw_list = list(password)
last_digit = password[0]
matches = {}
digit_counter = 1
for digit in pw_list[1:len(pw_list)]:
if digit == last_digit:
digit_counter += 1
matches[digit] = digit_counter
else:
digit_counter = 1
last_digit = digit
if len(matches) == 0:
return False
result = any(value == 2 for value in matches.values())
return result
if __name__ == '__main__':
possible_combinations = [str(i).zfill(6) for i in range(int(lower_bound), int(upper_bound)+1)]
solution_sorted = list(filter(test_increase, possible_combinations))
solution_adjacent = list(filter(test_adjacency, solution_sorted))
print(len(solution_adjacent))
|
# -*- coding: utf-8 -*-
from setuptools import setup
packages = \
['snsql',
'snsql._ast',
'snsql._ast.expressions',
'snsql.reader',
'snsql.sql',
'snsql.sql._mechanisms',
'snsql.sql.parser',
'snsql.sql.reader',
'snsql.xpath',
'snsql.xpath.parser']
package_data = \
{'': ['*']}
install_requires = \
['PyYAML>=5.4.1,<6.0.0',
'antlr4-python3-runtime==4.8',
'graphviz>=0.17,<0.18',
'opendp>=0.3.0,<0.4.0',
'pandasql>=0.7.3,<0.8.0']
setup_kwargs = {
'name': 'smartnoise-sql',
'version': '0.2.1.1',
'description': 'Differentially Private SQL Queries',
'long_description': '[](https://opensource.org/licenses/MIT) [](https://www.python.org/)\n\n<a href="https://smartnoise.org"><img src="https://github.com/opendp/smartnoise-sdk/raw/main/images/SmartNoise/SVG/Logo%20Mark_grey.svg" align="left" height="65" vspace="8" hspace="18"></a>\n\n## SmartNoise SQL\n\nDifferentially private SQL queries. Tested with:\n* PostgreSQL\n* SQL Server\n* Spark\n* Pandas (SQLite)\n* PrestoDB\n\nSmartNoise is intended for scenarios where the analyst is trusted by the data owner. SmartNoise uses the [OpenDP](https://github.com/opendp/opendp) library of differential privacy algorithms.\n\n## Installation\n\n```\npip install smartnoise-sql\n```\n\n## Querying a Pandas DataFrame\n\nUse the `from_df` method to create a private reader that can issue queries against a pandas dataframe.\n\n```python\nimport snsql\nfrom snsql import Privacy\nimport pandas as pd\nprivacy = Privacy(epsilon=1.0, delta=0.01)\n\ncsv_path = \'PUMS.csv\'\nmeta_path = \'PUMS.yaml\'\n\npums = pd.read_csv(csv_path)\nreader = snsql.from_df(pums, privacy=privacy, metadata=meta_path)\n\nresult = reader.execute(\'SELECT sex, AVG(age) AS age FROM PUMS.PUMS GROUP BY sex\')\n```\n\n## Querying a SQL Database\n\nUse `from_connection` to wrap an existing database connection.\n\n```python\nimport snsql\nfrom snsql import Privacy\nimport psycopg2\n\nprivacy = Privacy(epsilon=1.0, delta=0.01)\nmeta_path = \'PUMS.yaml\'\n\npumsdb = psycopg2.connect(user=\'postgres\', host=\'localhost\', database=\'PUMS\')\nreader = snsql.from_connection(pumsdb, privacy=privacy, metadata=meta_path)\n\nresult = reader.execute(\'SELECT sex, AVG(age) AS age FROM PUMS.PUMS GROUP BY sex\')\n```\n\n## Communication\n\n- You are encouraged to join us on [GitHub Discussions](https://github.com/opendp/opendp/discussions/categories/smartnoise)\n- Please use [GitHub Issues](https://github.com/opendp/smartnoise-sdk/issues) for bug reports and feature requests.\n- For other requests, including security issues, please contact us at [smartnoise@opendp.org](mailto:smartnoise@opendp.org).\n\n## Releases and Contributing\n\nPlease let us know if you encounter a bug by [creating an issue](https://github.com/opendp/smartnoise-sdk/issues).\n\nWe appreciate all contributions. Please review the [contributors guide](../contributing.rst). We welcome pull requests with bug-fixes without prior discussion.\n\nIf you plan to contribute new features, utility functions or extensions to this system, please first open an issue and discuss the feature with us.',
'author': 'SmartNoise Team',
'author_email': 'smartnoise@opendp.org',
'maintainer': None,
'maintainer_email': None,
'url': 'https://smartnoise.org',
'packages': packages,
'package_data': package_data,
'install_requires': install_requires,
'python_requires': '>3.6,<3.11',
}
setup(**setup_kwargs)
|
import numpy as np
import sys
import os
# Handle import of module fluxions differently if module
# is being loaded as __main__ or a module in a package.
if __name__ == '__main__':
cwd = os.getcwd()
os.chdir('../..')
import fluxions as fl
os.chdir(cwd)
else:
import fluxions as fl
# *************************************************************************************************
def report_success():
"""Report that a test was successful"""
test_name = sys._getframe(1).f_code.co_name
print(f'{test_name:25}: **** PASS ****')
def test_basic_usage():
"""Test basic usage of Fluxions objects"""
# Create a variable, x
x = fl.Var('x', 1.0)
#f0 = x - 1
f0 = x - 1
assert(f0.val({'x':1}) == 0)
assert(f0.diff({'x':1}) == 1)
var_tbl = {'x':1}
seed_tbl = {'x':1}
val, diff = f0(var_tbl, seed_tbl)
assert val == 0
assert diff == 1
assert repr(f0) == "Subtraction(Var(x, 1.0), Const(1.0))"
# f1(x) = 5x
f1 = 5 * x
assert(f1.shape() == (1, 1))
# Evaluate f1(x) at the bound value of x
assert(f1() == (5.0, 5.0))
assert(f1(None)==(5.0, 5.0))
assert(f1(1,1) == (5.0, 5.0))
assert(f1(np.array(1),np.array(1)) == (5.0, 5.0))
# Evaluate f1(x) using function calling syntax
assert(f1(2) == (10.0, 5.0))
# Evaluate f1(x) using dictionary binding syntax
assert(f1.val({'x':2}) == 10)
assert(f1.diff({'x':2}) == 5)
assert(f1({'x':2}) == (10.0, np.array([5.])))
assert repr(f1) == "Multiplication(Var(x, 1.0), Const(5.0))"
# f2(x) = 1 + (x * x)
f2 = 1 + x * x
assert(f2(4.0) == (17.0, 8.0))
assert(f2.val({'x':2}) == 5)
assert(f2.diff({'x':3}) == 6)
# f3(x) = (1 + x)/(x * x)
f3 = (1 + x) / (x * x)
assert(f3.val({'x':2}) == 0.75)
assert(f3.diff({'x':2}) == -0.5)
assert repr(f3) == "Division(Addition(Var(x, 1.0), Const(1.0)), Multiplication(Var(x, 1.0), Var(x, 1.0)))"
# f4(x) = (1 + 5x)/(x * x)
f4 = (1 + 5 * x) / (x * x)
assert(f4.val({'x':2}) == 2.75)
assert(f4.diff({'x':2}) == -1.5)
# Take a power
f5 = fl.Power(x, 2)
assert(f5.val(8) == 64)
assert(f5.diff(8) == 16)
assert(f5() == (1.0, 2.0))
assert(f5(1) == (1.0, 2.0))
assert(f5({'x':1}) == (1.0, 2.0))
assert repr(f5) == "Power(Var(x, 1.0), 2)"
#check assignment
a = fl.Fluxion()
b = fl.Unop(a)
c = fl.Var('x')
assert(c.diff(0) == 1)
assert(c.diff({'x':1}) == 1)
assert(c.diff({'x':1},{'x':2}) == 2)
assert(np.array_equal(c.diff({'x':1,'y':1},{'x':2,'y':1}), np.array([[2., 0.]])))
assert(c(1)==(1, np.array([1])))
#check division
f6 = 1/x
assert(f6.val({'x':1,'y':1}) == 1)
assert(np.array_equal(f6.diff({'x':1,'y':1}),np.array([[-1., 0.]])))
#check subtraction and division
f7 = (1 - x + 1 - 1) / ((x * x)/1)
assert(f7.val({'x':2}) == -0.25)
assert(f7.diff({'x':2}) == 0)
# check negation
f8 = -x
assert(f8.val({'x':2}) == -2)
assert(f8.diff({'x':2}) == -1)
y = fl.Var('y')
f9 = -(x * y)
assert(f9.val({'x':-2, 'y':3}) == 6)
val, diff = f9(1,1,1,1)
assert(val == np.array([[-1.]]))
assert(val == np.array([[-1., -1.]])).all()
# Report results
report_success()
def test_basics_vectors():
"""Test using Fluxions objects with vector inputs"""
# Create some vectors
n = 10
xs = np.expand_dims(np.linspace(0,1,num=n), axis=1)
ys = np.linspace(1,2,num=n)
ys_ex = np.expand_dims(np.linspace(1,2,num=n), axis=1)
# Create variables x and y bound to vector values
x = fl.Var('x', xs)
y = fl.Var('y', ys)
# f1(x) = 5x
f1 = 5 * x
assert(f1.val(xs) == 5*xs).all()
assert(f1.diff({'x':xs}) == 5.0*np.ones(np.shape(xs))).all()
val,diff = f1(ys)
assert(val == 5.0*ys_ex).all()
assert(diff == 5.0*np.ones(np.shape(xs))).all()
# f2(x) = 1 + (x * x)
f2 = 1 + x * x
assert(f2.val({'x':xs}) == 1 + np.power(xs,2)).all()
assert(f2.diff({'x':xs}) == 2.0*xs).all()
# f3(y) = (1 + y)/(y * y)
f3 = (1 + y) / (y * y)
assert(f3.val({'y':ys}) == np.divide(1+ys_ex,np.power(ys_ex,2))).all()
assert np.isclose(f3.diff({'y':ys_ex}), np.divide(-2-ys_ex,np.multiply(np.power(ys_ex,2),ys_ex))).all()
# f(x) = (1 + 5x)/(x * x)
f4 = (1 + 5*x) / (x * x)
assert(f4.val({'x':ys}) == np.divide(1+5*ys_ex,np.power(ys_ex,2))).all()
assert np.isclose(f4.diff({'x':ys}),np.divide(-2-5*ys_ex,np.multiply(np.power(ys_ex,2),ys_ex))).all()
# f5(x,y) = 5x+y
f5 = 5 * x + y
var_tbl_scalar = {'x':2, 'y':3}
var_tbl_vector = {'x':xs, 'y':xs}
assert(f5.val(var_tbl_scalar) == 13)
assert(f5.diff(var_tbl_scalar) == np.array([5, 1])).all()
assert(f5.val(var_tbl_vector) == 5*xs + xs).all()
assert(f5.diff(var_tbl_vector) == np.asarray([np.array([5, 1])]*n)).all()
# f(x,y) = 5xy
f6 = 5 * x * y
assert(f6.val(var_tbl_scalar) == 30)
assert(f6.diff(var_tbl_scalar) == np.array([15, 10])).all()
assert(f6.val(var_tbl_vector) == np.multiply(5*xs,xs)).all()
assert(f6.diff(var_tbl_vector) == np.transpose([5*xs,5*xs])).all()
# f(x,y,z) = 3x+2y+z
z = fl.Var('z')
f7 = 3 * x + 2 * y + z
var_tbl_scalar = {'x':1,'y':1,'z':1}
assert(f7.val(var_tbl_scalar) == 6)
assert(f7.diff(var_tbl_scalar) == np.array([3, 2, 1])).all()
var_tbl_vector = {'x':xs,'y':xs,'z':xs}
assert(f7.val(var_tbl_vector) == 3*xs + 2*xs + xs).all()
assert(f7.diff(var_tbl_vector) == np.asarray([np.array([3, 2, 1])]*10)).all()
var_tbl_vector = {'z':xs}
f7.val(var_tbl_vector)
assert(f7.val(var_tbl_vector) == 3*xs + 2*xs + xs+2).all()
# f(x,y,z) = (3x+2y+z)/xyz
f8 = (x * 3 + 2 * y + z)/(x * y * z)
assert(f8.val(var_tbl_scalar) == 6)
assert(f8.diff(var_tbl_scalar) == np.array([-3., -4., -5.])).all()
# Rebind 'x', 'y', ans 'z' to the values in ys (slightly tricky!)
var_tbl_vector = {'x':ys,'y':ys,'z':ys}
assert(f8.val(var_tbl_vector) == (3*ys_ex + 2*ys_ex + ys_ex)/(ys_ex*ys_ex*ys_ex)).all()
assert np.isclose(f8.diff(var_tbl_vector),
np.transpose([-3*ys/np.power(ys,4), -4*ys/np.power(ys,4), -5*ys/np.power(ys,4)])).all()
#f(x,y) = xy
f9 = y*x
assert(f9.val({'x':0,'y':0,'z':1})==0).all()
assert(f9.diff({'x':0,'y':0,'z':1})==np.asarray([np.array([0, 0, 0])])).all()
# Report results
report_success()
# Run the test
test_basic_usage()
test_basics_vectors()
|
"""
MPP Solar Inverter Command Library
library of utility and helpers for MPP Solar PIP-4048MS inverters
mpputils.py
"""
import logging
from .mppcommands import mppCommands
from .mppcommands import NoDeviceError
logger = logging.getLogger()
def getVal(_dict, key, ind=None):
if key not in _dict:
return ""
if ind is None:
return _dict[key]
else:
return _dict[key][ind]
class mppUtils:
"""
MPP Solar Inverter Utility Library
"""
def __init__(self, serial_device=None, baud_rate=2400):
if (serial_device is None):
raise NoDeviceError("A serial device must be supplied, e.g. /dev/ttyUSB0")
self.mp = mppCommands(serial_device, baud_rate)
self._serial_number = None
def getKnownCommands(self):
return self.mp.getKnownCommands()
def getResponseDict(self, cmd):
return self.mp.execute(cmd).response_dict
def getResponse(self, cmd):
return self.mp.execute(cmd).response
def getSerialNumber(self):
if self._serial_number is None:
response = self.mp.execute("QID").response_dict
self._serial_number = response["serial_number"][0]
return self._serial_number
def getFullStatus(self):
"""
Helper function that returns all the status data
"""
status = {}
# serial_number = self.getSerialNumber()
data = self.mp.execute("Q1").response_dict
data.update(self.mp.execute("QPIGS").response_dict) # TODO: check if this actually works...
# Need to get 'Parallel' info, but dont know what the parallel number for the correct inverter is...
# parallel_data = self.mp.getResponseDict("QPGS0")
# This 'hack' only works for 2 inverters in parallel.
# if parallel_data['serial_number'][0] != self.getSerialNumber():
# parallel_data = self.mp.getResponseDict("QPGS1")
# status_data.update(parallel_data)
items = ['SCC Flag', 'AllowSccOnFlag', 'ChargeAverageCurrent', 'SCC PWM temperature',
'Inverter temperature', 'Battery temperature', 'Transformer temperature',
'Fan lock status', 'Fan PWM speed', 'SCC charge power', 'Sync frequency',
'Inverter charge status', 'AC Input Voltage', 'AC Input Frequency',
'AC Output Voltage', 'AC Output Frequency', 'AC Output Apparent Power',
'AC Output Active Power', 'AC Output Load', 'BUS Voltage', 'Battery Voltage',
'Battery Charging Current', 'Battery Capacity', 'Inverter Heat Sink Temperature',
'PV Input Current for Battery', 'PV Input Voltage', 'Battery Voltage from SCC',
'Battery Discharge Current']
for item in items:
key = '{}'.format(item).lower().replace(" ", "_")
status[key] = {"value": data[key][0], "unit": data[key][1]}
# Still have 'Device Status' from QPIGS
# Still have QPGSn
return status
def getSettings(self):
"""
Query inverter for all current settings
"""
# serial_number = self.getSerialNumber()
default_settings = self.mp.execute("QDI").response_dict
current_settings = self.mp.execute("QPIRI").response_dict
flag_settings = self.mp.execute("QFLAG").response_dict
# current_settings.update(flag_settings) # Combine current and flag settings dicts
settings = {}
# {"Battery Bulk Charge Voltage": {"unit": "V", "default": 56.4, "value": 57.4}}
items = ["Battery Type", "Output Mode", "Battery Bulk Charge Voltage", "Battery Float Charge Voltage",
"Battery Under Voltage", "Battery Redischarge Voltage", "Battery Recharge Voltage", "Input Voltage Range",
"Charger Source Priority", "Max AC Charging Current", "Max Charging Current", "Output Source Priority",
"AC Output Voltage", "AC Output Frequency", "PV OK Condition", "PV Power Balance",
"Buzzer", "Power Saving", "Overload Restart", "Over Temperature Restart", "LCD Backlight", "Primary Source Interrupt Alarm",
"Record Fault Code", "Overload Bypass", "LCD Reset to Default", "Machine Type", "AC Input Voltage", "AC Input Current",
"AC Output Current", "AC Output Apparent Power", "AC Output Active Power", "Battery Voltage", "Max Parallel Units"]
for item in items:
key = '{}'.format(item).lower().replace(" ", "_")
settings[key] = {"value": getVal(current_settings, key, 0),
"unit": getVal(current_settings, key, 1),
"default": getVal(default_settings, key, 0)}
for key in flag_settings:
_key = '{}'.format(key).lower().replace(" ", "_")
settings[_key]['value'] = getVal(flag_settings, key, 0)
return settings
|
with open("input.txt") as f:
pub_1, pub_2 = map(int, f.read().splitlines())
mod = 20201227
loop_size, value, pub = 0, 1, None
while not pub:
value = (value * 7) % mod
loop_size += 1
if value == pub_1:
pub = pub_2
elif value == pub_2:
pub = pub_1
value = 1
for _ in range(loop_size):
value = (value * pub) % mod
print("Part 1:", value)
print("Part 2: No part 2!")
|
#!/usr/local/bin/python3
# coding: utf-8
# YYeTsBot - share_excel.py
# 12/18/21 19:21
#
__author__ = "Benny <benny.think@gmail.com>"
import openpyxl
import pathlib
import sys
web_path = pathlib.Path(__file__).parent.parent.resolve().as_posix()
sys.path.append(web_path)
from Mongo import Mongo
from tqdm import tqdm
from utils import ts_date
wb = openpyxl.open("aliyun.xlsx")
data = {}
for ws in wb.worksheets:
line = 0
for line in range(1, ws.max_row + 1):
name = ws.cell(line, 1).value
link = ws.cell(line, 2).value
line += 1
data[name] = link
template = {
"username": "Benny",
"ip": "127.0.0.1",
"date": "",
"browser": "cli",
"content": "",
"resource_id": 234,
"type": "parent"
}
col = Mongo().db["comment"]
share_doc = {
"status": 1.0,
"info": "OK",
"data": {
"info": {
"id": 234,
"cnname": "网友分享",
"enname": "",
"aliasname": "",
"channel": "share",
"channel_cn": "",
"area": "",
"show_type": "",
"expire": "1610401225",
"views": 0
},
"list": []
}
}
Mongo().db["yyets"].update_one({"data.info.id": 234}, {"$set": share_doc}, upsert=True)
for name, link in tqdm(data.items()):
template["content"] = f"{name}\n{link}"
template["date"] = ts_date()
col.insert_one(template.copy())
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 装饰器的应用
__author__ = 'sphenginx'
def funA(fn):
print("C语言中文网")
fn() # 执行传入的fn参数
print("http://c.biancheng.net")
return "装饰器函数的返回值"
@funA
def funB():
print("学习 Python")
if __name__ == '__main__':
print(funB)
|
# Generated by Django 3.1.12 on 2021-07-01 20:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("polio", "0015_auto_20210630_2051"),
]
operations = [
migrations.CreateModel(
name="Config",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("slug", models.SlugField(unique=True)),
("content", models.JSONField()),
("created_at", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated_at", models.DateTimeField(auto_now=True)),
],
),
]
|
import subprocess
import json
import yaml
from ckan_cloud_operator import logs
class DeisCkanInstanceSolr(object):
def __init__(self, instance):
self.instance = instance
self.solr_spec = self.instance.spec.solrCloudCollection
def update(self):
self.instance.annotations.update_status('solr', 'created', lambda: self._update(), force_update=True)
def delete(self):
collection_name = self.solr_spec['name']
print(f'Deleting solrcloud collection {collection_name}')
from ckan_cloud_operator.providers.solr import manager as solr_manager
solr_manager.delete_collection(collection_name)
def get_replication_factor(self):
from ckan_cloud_operator.providers.solr import manager as solr_manager
return solr_manager.get_replication_factor()
def get_num_shards(self):
from ckan_cloud_operator.providers.solr import manager as solr_manager
return solr_manager.get_num_shards()
def get(self):
from ckan_cloud_operator.providers.solr import manager as solr_manager
collection_name = self.instance.spec.solrCloudCollection['name']
return solr_manager.get_collectoin_status(collection_name)
def is_ready(self):
return self.get().get('ready')
def _update(self):
status = self.get()
if status['ready']:
schema_name = status['schemaName']
schema_version = status['schemaVersion']
logs.info(f'Using existing solr schema: {schema_name} {schema_version}')
elif 'configName' in self.solr_spec:
config_name = self.solr_spec['configName']
from ckan_cloud_operator.providers.solr import manager as solr_manager
solr_manager.create_collection(status['collection_name'], config_name)
else:
raise NotImplementedError(f'Unsupported solr cloud collection spec: {self.solr_spec}')
|
import sys
import os
import time
import pylidc as pl
import numpy as np
from sklearn.utils import shuffle
import better_exceptions
import pickle
from numba import jit
from sklearn.feature_extraction import image
import keras
import time
import random
from tqdm import tqdm
import pickle
import cProfile
import unet_3d
from model import get_model, get_unet_model
import datetime
from resnet3d import Resnet3DBuilder
from keras.optimizers import Adam
import multiprocessing as mp
import ctypes
from functools import partial
from contextlib import closing
from multiprocessing import Pool
# turn off futurn warning
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
sys.path.append('../')
sys.path.append('../..')
sys.path.append('/home/toosyou/projects/LungTumor')
import data_util
from preprocessing import scan_index_split
from generate_batch import get_scan, get_patches, extract_patch
from keras_retinanet.callbacks import RedirectModel
LIDC_IDRI_NP_PREFIX = '/mnt/ext/lidc_idri_np'
LIDC_IDRI_BATCHES_PREFIX = '/mnt/ext3/lidc_idri_batches'
def batch_generatorV3(set, batch_size, negative_ratio=0.9, n_batch_per_scan=10, n_scan_bundle=5):
# load positive patches
positive_patches = np.load(os.path.join(LIDC_IDRI_BATCHES_PREFIX, set, 'positive.npy'), mmap_mode='r')
# load all detections from fast_detection_model
all_detections = pickle.load(open(os.path.join(LIDC_IDRI_BATCHES_PREFIX, set, 'all_detections.pl'), 'rb'))
indexes = scan_index_split(1018)[{'train': 0, 'valid': 1, 'test': 2}[set]]
while True:
# load random scan
negative_Xs, negative_ys = list(), list()
for i in range(n_scan_bundle):
index_scan = np.random.choice(indexes)
volume, lung_mask, nodule_mask, layer_probability = get_scan(index_scan)
if volume is None:
continue
for index_detection in np.random.randint(len(all_detections[index_scan]), size=int(n_batch_per_scan*batch_size*negative_ratio)):
d = all_detections[index_scan][index_detection] # [x0, y0, x1, y1, z, score]
x, y, z = int((d[0]+d[2])/2), int((d[1]+d[3])/2), int(d[4])
patch, label = extract_patch(volume, nodule_mask, x, y, z)
if patch is None:
continue
# normalize
negative_Xs.append(patch)
negative_ys.append([label, 1-label])
negative_Xs, negative_ys = np.array(negative_Xs), np.array(negative_ys)
for i in range(n_batch_per_scan*n_scan_bundle):
# randomly choose positive patches
positive_X = positive_patches[np.random.randint(positive_patches.shape[0], size=int(batch_size*(1.-negative_ratio))), ...]
positive_y = np.array([[1, 0]]*positive_X.shape[0])
negative_indexes = np.random.randint(negative_Xs.shape[0], size=int(batch_size*negative_ratio))
negative_X = negative_Xs[negative_indexes, ...]
negative_y = negative_ys[negative_indexes, ...]
# generate batch
X = np.append(negative_X, positive_X, axis=0)
y = np.append(negative_y, positive_y, axis=0)
X = (X - 418.) / 414. # normalize
yield shuffle(X, y)
def batch_generatorV2(set, batch_size, negative_ratio=0.9, n_batch_per_scan=10, n_scan_bundle=5):
# load positive patches
positive_patches = np.load(os.path.join(LIDC_IDRI_BATCHES_PREFIX, set, 'positive.npy'))
indexes = scan_index_split(1018)[{'train': 0, 'valid': 1, 'test': 2}[set]]
while True:
# load random scan
negative_Xs, negative_ys = np.ndarray((0, 64, 64, 16, 1), dtype=np.float), np.ndarray((0, 2), dtype=np.float)
for i in range(n_scan_bundle):
volume, lung_mask, nodule_mask, layer_probability = get_scan(np.random.choice(indexes))
if volume is None:
continue
tmp_Xs, tmp_ys = get_patches(volume=volume,
size=int(batch_size*negative_ratio*n_batch_per_scan),
is_positive=False,
lung_mask=lung_mask,
nodule_mask=nodule_mask,
layer_probability=layer_probability,
patch_size=(64, 64, 16))
negative_Xs = np.append(tmp_Xs, negative_Xs, axis=0)
negative_ys = np.append(tmp_ys, negative_ys, axis=0)
for i in range(n_batch_per_scan*n_scan_bundle):
# randomly choose positive patches
positive_X = positive_patches[np.random.randint(positive_patches.shape[0], size=int(batch_size*(1.-negative_ratio))), ...]
positive_y = np.array([[1, 0]]*positive_X.shape[0])
negative_indexes = np.random.randint(negative_Xs.shape[0], size=int(batch_size*negative_ratio))
negative_X = negative_Xs[negative_indexes, ...]
negative_y = negative_ys[negative_indexes, ...]
# generate batch
X = np.append(negative_X, positive_X, axis=0)
y = np.append(negative_y, positive_y, axis=0)
X = (X - 418.) / 414. # normalize
yield shuffle(X, y)
def batch_generator(set, batch_size):
X_path = os.path.join(LIDC_IDRI_BATCHES_PREFIX, set, 'X')
y_path = os.path.join(LIDC_IDRI_BATCHES_PREFIX, set, 'y')
all_files = os.listdir(X_path)
while True:
filename = np.random.choice(all_files)
X_fileanme = os.path.join(X_path, filename)
y_filename = os.path.join(y_path, filename)
X = np.load(X_fileanme)
y = np.load(y_filename)
for i in range(X.shape[0] // batch_size):
indexes = np.random.randint(X.shape[0], size=batch_size)
yield X[indexes], y[indexes]
del X
del y
if __name__ == '__main__':
# model, training_model = get_unet_model()
# model, training_model = get_model()
model = Resnet3DBuilder.build_resnet_50((64, 64, 16, 1), 2)
training_model = keras.utils.multi_gpu_model(model)
training_model.compile(optimizer=Adam(amsgrad=True), loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
callbacks=[
RedirectModel(keras.callbacks.ModelCheckpoint(
os.path.join(
'./model_checkpoints',
'{epoch:02d}.h5'
),
verbose=1,
), model),
keras.callbacks.TensorBoard(
log_dir='./logs/' + datetime.datetime.now().strftime('%Y%m%d%H%M')
),
# keras.callbacks.ReduceLROnPlateau(
# monitor='val_loss',
# factor=0.1,
# patience=3
# )
]
train_generator = batch_generatorV2('train', 128, n_batch_per_scan=20, negative_ratio=0.8)
valid_generator = batch_generatorV2('valid', 128, n_batch_per_scan=20, negative_ratio=0.8)
training_model.fit_generator(train_generator,
steps_per_epoch=1024,
epochs=100,
validation_data=valid_generator,
validation_steps=100,
use_multiprocessing=False,
# workers=4,
callbacks=callbacks)
|
# -*- coding: utf-8 -*-
'''
dburi的对象
created by HanFei on 19/2/28
'''
import re
import os
import urllib.parse
default_port = {
'mongodb': 27017,
'mysql': 3306,
'postgres': 5432,
'redis': 6379,
'hbase': 9090,
'elasticsearch':9200
}
def parse_extra(str):
qs = dict( (k, v if len(v)>1 else v[0] )
for k, v in urllib.parse.parse_qs(str).items() )
return qs
def parse_db_str(conn_str):
pattern = re.compile(r'''
(?P<name>[\w\+]+)://
(?:
(?P<user>[^:/]*)
(?::(?P<passwd>[^/]*))?
@)?
(?:
(?P<host>[^/:]*)
(?::(?P<port>[^/]*))?
)?
(?:/(?P<db>\w*))?
(?:\?(?P<extra>(.*)))?
'''
, re.X)
m = pattern.match(conn_str)
if m is not None:
components = m.groupdict()
if components['extra']:
for extra_k,extra_v in parse_extra(components['extra']).items():
components[extra_k] = extra_v
if components['db'] is not None:
tokens = components['db'].split('?', 2)
components['db'] = tokens[0]
if components['passwd'] is not None:
components['passwd'] = urllib.parse.unquote(components['passwd'])
name = components['name']
if components['port'] is None:
components['port'] = default_port[name]
else:
components['port'] = int(components['port'])
result = {}
for key, value in components.items():
if value:
result[key] = value
return result
else:
raise ArgumentError(
"Could not parse rfc1738 URL from string '%s', the format should match 'dialect+driver://username:password@host:port/database'" % name)
|
import numpy as np
from pyhack.py_runko_aux import *
from pyhack.boris import boris_rp, boris_iter
def vv_pos(tile,dtf=1):
c = tile.cfl
cont = tile.get_container(0)
pos = py_pos(cont)
vel = py_vel(cont)*c
E,B = py_em(cont)
nq = pos.shape[0]
dims = pos.shape[1]
g = gui(c,vel)[:,np.newaxis]
v_half = vel + dtf/2 * cont.q * (E+np.cross(vel*g/c,B))
pos = pos + dtf*v_half*gui(c,v_half)[:,np.newaxis]
tile.delete_all_particles()
for i in range(0,nq):
cont.add_particle(pos[i,:],vel[i,:]/c,1.0)
E_old = np.copy(E)
return E_old
def vv_vel(tile,dtf=1,E_old=0):
c = tile.cfl
cont = tile.get_container(0)
pos = py_pos(cont)
vel = py_vel(cont)*c
E,B = py_em(cont)
Eh = (E+E_old)/2
nq = pos.shape[0]
dims = pos.shape[1]
# vel = boris_rp(vel,Eh,B,c,cont.q,dtf=dtf)
ginv = gui(c,vel+dtf*0.5*(cont.q*Eh))
vel = boris_iter(vel,Eh,B,dtf,c,0,ginv,cont.q)
tile.delete_all_particles()
for i in range(0,nq):
cont.add_particle(pos[i,:],vel[i,:]/c,1.0)
|
"""
file: donkey_sim.py
author: Tawn Kramer
date: 2018-08-31
"""
import base64
import logging
import math
import os
import time
import types
from io import BytesIO
from typing import Any, Callable, Dict, List, Tuple, Union
import numpy as np
from PIL import Image
from gym_donkeycar.core.fps import FPSTimer
from gym_donkeycar.core.message import IMesgHandler
from gym_donkeycar.core.sim_client import SimClient
logger = logging.getLogger(__name__)
class DonkeyUnitySimContoller:
def __init__(self, conf: Dict[str, Any]):
logger.setLevel(conf["log_level"])
self.address = (conf["host"], conf["port"])
self.handler = DonkeyUnitySimHandler(conf=conf)
self.client = SimClient(self.address, self.handler)
def set_car_config(
self,
body_style: str,
body_rgb: Tuple[int, int, int],
car_name: str,
font_size: int,
) -> None:
self.handler.send_car_config(body_style, body_rgb, car_name, font_size)
def set_cam_config(self, **kwargs) -> None:
self.handler.send_cam_config(**kwargs)
def set_reward_fn(self, reward_fn: Callable) -> None:
self.handler.set_reward_fn(reward_fn)
def set_episode_over_fn(self, ep_over_fn: Callable) -> None:
self.handler.set_episode_over_fn(ep_over_fn)
def wait_until_loaded(self) -> None:
time.sleep(0.1)
while not self.handler.loaded:
logger.warning("waiting for sim to start..")
time.sleep(1.0)
logger.info("sim started!")
def reset(self) -> None:
self.handler.reset()
def get_sensor_size(self) -> Tuple[int, int, int]:
return self.handler.get_sensor_size()
def take_action(self, action: np.ndarray):
self.handler.take_action(action)
def observe(self) -> Tuple[np.ndarray, float, bool, Dict[str, Any]]:
return self.handler.observe()
def quit(self) -> None:
self.client.stop()
def exit_scene(self) -> None:
self.handler.send_exit_scene()
def render(self, mode: str) -> None:
pass
def is_game_over(self) -> bool:
return self.handler.is_game_over()
def calc_reward(self, done: bool) -> float:
return self.handler.calc_reward(done)
class DonkeyUnitySimHandler(IMesgHandler):
def __init__(self, conf: Dict[str, Any]):
self.conf = conf
self.SceneToLoad = conf["level"]
self.loaded = False
self.max_cte = conf["max_cte"]
self.timer = FPSTimer()
# sensor size - height, width, depth
self.camera_img_size = conf["cam_resolution"]
self.image_array = np.zeros(self.camera_img_size)
self.image_array_b = None
self.last_obs = self.image_array
self.time_received = time.time()
self.last_received = self.time_received
self.hit = "none"
self.cte = 0.0
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.speed = 0.0
self.missed_checkpoint = False
self.dq = False
self.over = False
self.client = None
self.fns = {
"telemetry": self.on_telemetry,
"scene_selection_ready": self.on_scene_selection_ready,
"scene_names": self.on_recv_scene_names,
"car_loaded": self.on_car_loaded,
"cross_start": self.on_cross_start,
"race_start": self.on_race_start,
"race_stop": self.on_race_stop,
"DQ": self.on_DQ,
"ping": self.on_ping,
"aborted": self.on_abort,
"missed_checkpoint": self.on_missed_checkpoint,
"need_car_config": self.on_need_car_config,
"collision_with_starting_line": self.on_collision_with_starting_line,
}
self.gyro_x = 0.0
self.gyro_y = 0.0
self.gyro_z = 0.0
self.accel_x = 0.0
self.accel_y = 0.0
self.accel_z = 0.0
self.vel_x = 0.0
self.vel_y = 0.0
self.vel_z = 0.0
self.lidar = []
# car in Unity lefthand coordinate system: roll is Z, pitch is X and yaw is Y
self.roll = 0.0
self.pitch = 0.0
self.yaw = 0.0
# variables required for lidar points decoding into array format
self.lidar_deg_per_sweep_inc = 1
self.lidar_num_sweep_levels = 1
self.lidar_deg_ang_delta = 1
self.last_lap_time = 0.0
self.current_lap_time = 0.0
self.starting_line_index = -1
self.lap_count = 0
def on_connect(self, client: SimClient) -> None:
logger.debug("socket connected")
self.client = client
def on_disconnect(self) -> None:
logger.debug("socket disconnected")
self.client = None
def on_abort(self, message: Dict[str, Any]) -> None:
self.client.stop()
def on_need_car_config(self, message: Dict[str, Any]) -> None:
logger.info("on need car config")
self.loaded = True
self.send_config(self.conf)
def on_collision_with_starting_line(self, message: Dict[str, Any]) -> None:
if self.current_lap_time == 0.0:
self.current_lap_time = message["timeStamp"]
self.starting_line_index = message["starting_line_index"]
elif self.starting_line_index == message["starting_line_index"]:
time_at_crossing = message["timeStamp"]
self.last_lap_time = float(time_at_crossing - self.current_lap_time)
self.current_lap_time = time_at_crossing
self.lap_count += 1
lap_msg = f"New lap time: {round(self.last_lap_time, 2)} seconds"
logger.info(lap_msg)
@staticmethod
def extract_keys(dict_: Dict[str, Any], list_: List[str]) -> Dict[str, Any]:
return_dict = {}
for key in list_:
if key in dict_:
return_dict[key] = dict_[key]
return return_dict
def send_config(self, conf: Dict[str, Any]) -> None:
if "degPerSweepInc" in conf:
raise ValueError("LIDAR config keys were renamed to use snake_case name instead of CamelCase")
logger.info("sending car config.")
# both ways work, car_config shouldn't interfere with other config, so keeping the two alternative
self.set_car_config(conf)
if "car_config" in conf.keys():
self.set_car_config(conf["car_config"])
logger.info("done sending car config.")
if "cam_config" in conf.keys():
cam_config = self.extract_keys(
conf["cam_config"],
[
"img_w",
"img_h",
"img_d",
"img_enc",
"fov",
"fish_eye_x",
"fish_eye_y",
"offset_x",
"offset_y",
"offset_z",
"rot_x",
"rot_y",
"rot_z",
],
)
self.send_cam_config(**cam_config)
logger.info(f"done sending cam config. {cam_config}")
if "cam_config_b" in conf.keys():
cam_config_b = self.extract_keys(
conf["cam_config_b"],
[
"img_w",
"img_h",
"img_d",
"img_enc",
"fov",
"fish_eye_x",
"fish_eye_y",
"offset_x",
"offset_y",
"offset_z",
"rot_x",
"rot_y",
"rot_z",
],
)
self.send_cam_config(**cam_config_b, msg_type="cam_config_b")
logger.info(f"done sending cam config B. {cam_config_b}")
self.image_array_b = np.zeros(self.camera_img_size)
if "lidar_config" in conf.keys():
if "degPerSweepInc" in conf:
raise ValueError("LIDAR config keys were renamed to use snake_case name instead of CamelCase")
lidar_config = self.extract_keys(
conf["lidar_config"],
[
"deg_per_sweep_inc",
"deg_ang_down",
"deg_ang_delta",
"num_sweeps_levels",
"max_range",
"noise",
"offset_x",
"offset_y",
"offset_z",
"rot_x",
],
)
self.send_lidar_config(**lidar_config)
logger.info(f"done sending lidar config., {lidar_config}")
# what follows is needed in order not to break older conf
cam_config = self.extract_keys(
conf,
[
"img_w",
"img_h",
"img_d",
"img_enc",
"fov",
"fish_eye_x",
"fish_eye_y",
"offset_x",
"offset_y",
"offset_z",
"rot_x",
"rot_y",
"rot_z",
],
)
if cam_config != {}:
self.send_cam_config(**cam_config)
logger.info(f"done sending cam config. {cam_config}")
logger.warning(
"""This way of passing cam_config is deprecated,
please wrap the parameters in a sub-dictionary with the key 'cam_config'.
Example: GYM_CONF = {'cam_config':"""
+ str(cam_config)
+ "}"
)
lidar_config = self.extract_keys(
conf,
[
"deg_per_sweep_inc",
"deg_ang_down",
"deg_ang_delta",
"num_sweeps_levels",
"max_range",
"noise",
"offset_x",
"offset_y",
"offset_z",
"rot_x",
],
)
if lidar_config != {}:
self.send_lidar_config(**lidar_config)
logger.info(f"done sending lidar config., {lidar_config}")
logger.warning(
"""This way of passing lidar_config is deprecated,
please wrap the parameters in a sub-dictionary with the key 'lidar_config'.
Example: GYM_CONF = {'lidar_config':"""
+ str(lidar_config)
+ "}"
)
def set_car_config(self, conf: Dict[str, Any]) -> None:
if "body_style" in conf:
self.send_car_config(
conf["body_style"],
conf["body_rgb"],
conf["car_name"],
conf["font_size"],
)
def set_racer_bio(self, conf: Dict[str, Any]) -> None:
if "bio" in conf:
self.send_racer_bio(
conf["racer_name"],
conf["car_name"],
conf["bio"],
conf["country"],
conf["guid"],
)
def on_recv_message(self, message: Dict[str, Any]) -> None:
if "msg_type" not in message:
logger.warn("expected msg_type field")
return
msg_type = message["msg_type"]
logger.debug("got message :" + msg_type)
if msg_type in self.fns:
self.fns[msg_type](message)
else:
logger.warning(f"unknown message type {msg_type}")
# ------- Env interface ---------- #
def reset(self) -> None:
logger.debug("reseting")
self.send_reset_car()
self.timer.reset()
time.sleep(1)
self.image_array = np.zeros(self.camera_img_size)
self.image_array_b = None
self.last_obs = self.image_array
self.time_received = time.time()
self.last_received = self.time_received
self.hit = "none"
self.cte = 0.0
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.speed = 0.0
self.over = False
self.missed_checkpoint = False
self.dq = False
self.gyro_x = 0.0
self.gyro_y = 0.0
self.gyro_z = 0.0
self.accel_x = 0.0
self.accel_y = 0.0
self.accel_z = 0.0
self.vel_x = 0.0
self.vel_y = 0.0
self.vel_z = 0.0
self.lidar = []
self.current_lap_time = 0.0
self.last_lap_time = 0.0
self.lap_count = 0
# car
self.roll = 0.0
self.pitch = 0.0
self.yaw = 0.0
def get_sensor_size(self) -> Tuple[int, int, int]:
return self.camera_img_size
def take_action(self, action: np.ndarray) -> None:
self.send_control(action[0], action[1])
def observe(self) -> Tuple[np.ndarray, float, bool, Dict[str, Any]]:
while self.last_received == self.time_received:
time.sleep(0.001)
self.last_received = self.time_received
observation = self.image_array
done = self.is_game_over()
reward = self.calc_reward(done)
info = {
"pos": (self.x, self.y, self.z),
"cte": self.cte,
"speed": self.speed,
"hit": self.hit,
"gyro": (self.gyro_x, self.gyro_y, self.gyro_z),
"accel": (self.accel_x, self.accel_y, self.accel_z),
"vel": (self.vel_x, self.vel_y, self.vel_z),
"lidar": (self.lidar),
"car": (self.roll, self.pitch, self.yaw),
"last_lap_time": self.last_lap_time,
"lap_count": self.lap_count,
}
# Add the second image to the dict
if self.image_array_b is not None:
info["image_b"] = self.image_array_b
# self.timer.on_frame()
return observation, reward, done, info
def is_game_over(self) -> bool:
return self.over
# ------ RL interface ----------- #
def set_reward_fn(self, reward_fn: Callable[[], float]):
"""
allow users to set their own reward function
"""
self.calc_reward = types.MethodType(reward_fn, self)
logger.debug("custom reward fn set.")
def calc_reward(self, done: bool) -> float:
# Normalization factor, real max speed is around 30
# but only attained on a long straight line
max_speed = 10
if done:
return -1.0
if self.cte > self.max_cte:
return -1.0
# Collision
if self.hit != "none":
return -2.0
# going fast close to the center of lane yields best reward
return (1.0 - (self.cte / self.max_cte) ** 2) * (self.speed / max_speed)
# ------ Socket interface ----------- #
def on_telemetry(self, message: Dict[str, Any]) -> None:
img_string = message["image"]
image = Image.open(BytesIO(base64.b64decode(img_string)))
# always update the image_array as the observation loop will hang if not changing.
self.image_array = np.asarray(image)
self.time_received = time.time()
if "image_b" in message:
img_string_b = message["image_b"]
image_b = Image.open(BytesIO(base64.b64decode(img_string_b)))
self.image_array_b = np.asarray(image_b)
if "pos_x" in message:
self.x = message["pos_x"]
self.y = message["pos_y"]
self.z = message["pos_z"]
if "speed" in message:
self.speed = message["speed"]
if "gyro_x" in message:
self.gyro_x = message["gyro_x"]
self.gyro_y = message["gyro_y"]
self.gyro_z = message["gyro_z"]
if "accel_x" in message:
self.accel_x = message["accel_x"]
self.accel_y = message["accel_y"]
self.accel_z = message["accel_z"]
if "vel_x" in message:
self.vel_x = message["vel_x"]
self.vel_y = message["vel_y"]
self.vel_z = message["vel_z"]
if "roll" in message:
self.roll = message["roll"]
self.pitch = message["pitch"]
self.yaw = message["yaw"]
# Cross track error not always present.
# Will be missing if path is not setup in the given scene.
# It should be setup in the 4 scenes available now.
if "cte" in message:
self.cte = message["cte"]
if "lidar" in message:
self.lidar = self.process_lidar_packet(message["lidar"])
# don't update hit once session over
if self.over:
return
if "hit" in message:
self.hit = message["hit"]
self.determine_episode_over()
def on_cross_start(self, message: Dict[str, Any]) -> None:
logger.info(f"crossed start line: lap_time {message['lap_time']}")
def on_race_start(self, message: Dict[str, Any]) -> None:
logger.debug("race started")
def on_race_stop(self, message: Dict[str, Any]) -> None:
logger.debug("race stoped")
def on_missed_checkpoint(self, message: Dict[str, Any]) -> None:
logger.info("racer missed checkpoint")
self.missed_checkpoint = True
def on_DQ(self, message: Dict[str, Any]) -> None:
logger.info("racer DQ")
self.dq = True
def on_ping(self, message: Dict[str, Any]) -> None:
"""
no reply needed at this point. Server sends these as a keep alive to make sure clients haven't gone away.
"""
pass
def set_episode_over_fn(self, ep_over_fn: Callable[[], bool]):
"""
allow userd to define their own episode over function
"""
self.determine_episode_over = types.MethodType(ep_over_fn, self)
logger.debug("custom ep_over fn set.")
def determine_episode_over(self):
# we have a few initial frames on start that are sometimes very large CTE when it's behind
# the path just slightly. We ignore those.
if math.fabs(self.cte) > 2 * self.max_cte:
pass
elif math.fabs(self.cte) > self.max_cte:
logger.debug(f"game over: cte {self.cte}")
self.over = True
elif self.hit != "none":
logger.debug(f"game over: hit {self.hit}")
self.over = True
elif self.missed_checkpoint:
logger.debug("missed checkpoint")
self.over = True
elif self.dq:
logger.debug("disqualified")
self.over = True
# Disable reset
if os.environ.get("RACE") == "True":
self.over = False
def on_scene_selection_ready(self, message: Dict[str, Any]) -> None:
logger.debug("SceneSelectionReady")
self.send_get_scene_names()
def on_car_loaded(self, message: Dict[str, Any]) -> None:
logger.debug("car loaded")
self.loaded = True
# Enable hand brake, so the car doesn't move
self.send_control(0, 0, 1.0)
self.on_need_car_config({})
def on_recv_scene_names(self, message: Dict[str, Any]) -> None:
if message:
names = message["scene_names"]
logger.debug(f"SceneNames: {names}")
print("loading scene", self.SceneToLoad)
if self.SceneToLoad in names:
self.send_load_scene(self.SceneToLoad)
else:
raise ValueError(f"Scene name {self.SceneToLoad} not in scene list {names}")
def send_control(self, steer: float, throttle: float, brake: float = 0.0) -> None:
"""
Send command to simulator.
:param steer: desired steering
:param throttle: desired throttle
:param brake: whether to activate or not hand brake
(can be a continuous value)
"""
if not self.loaded:
return
msg = {
"msg_type": "control",
"steering": str(steer),
"throttle": str(throttle),
"brake": str(brake),
}
self.queue_message(msg)
def send_reset_car(self) -> None:
msg = {"msg_type": "reset_car"}
self.queue_message(msg)
def send_get_scene_names(self) -> None:
msg = {"msg_type": "get_scene_names"}
self.queue_message(msg)
def send_load_scene(self, scene_name: str) -> None:
msg = {"msg_type": "load_scene", "scene_name": scene_name}
self.queue_message(msg)
def send_exit_scene(self) -> None:
msg = {"msg_type": "exit_scene"}
self.queue_message(msg)
def send_car_config(
self,
body_style: str = "donkey",
body_rgb: Tuple[int, int, int] = (255, 255, 255),
car_name: str = "car",
font_size: int = 100,
):
"""
# body_style = "donkey" | "bare" | "car01" | "f1" | "cybertruck"
# body_rgb = (128, 128, 128) tuple of ints
# car_name = "string less than 64 char"
"""
assert isinstance(body_style, str)
assert isinstance(body_rgb, list) or isinstance(body_rgb, tuple)
assert len(body_rgb) == 3
assert isinstance(car_name, str)
assert isinstance(font_size, int) or isinstance(font_size, str)
msg = {
"msg_type": "car_config",
"body_style": body_style,
"body_r": str(body_rgb[0]),
"body_g": str(body_rgb[1]),
"body_b": str(body_rgb[2]),
"car_name": car_name,
"font_size": str(font_size),
}
self.blocking_send(msg)
time.sleep(0.1)
def send_racer_bio(self, racer_name: str, car_name: str, bio: str, country: str, guid: str) -> None:
# body_style = "donkey" | "bare" | "car01" choice of string
# body_rgb = (128, 128, 128) tuple of ints
# car_name = "string less than 64 char"
# guid = "some random string"
msg = {
"msg_type": "racer_info",
"racer_name": racer_name,
"car_name": car_name,
"bio": bio,
"country": country,
"guid": guid,
}
self.blocking_send(msg)
time.sleep(0.1)
def send_cam_config(
self,
msg_type: str = "cam_config",
img_w: int = 0,
img_h: int = 0,
img_d: int = 0,
img_enc: Union[str, int] = 0, # 0 is default value
fov: int = 0,
fish_eye_x: float = 0.0,
fish_eye_y: float = 0.0,
offset_x: float = 0.0,
offset_y: float = 0.0,
offset_z: float = 0.0,
rot_x: float = 0.0,
rot_y: float = 0.0,
rot_z: float = 0.0,
) -> None:
"""Camera config
set any field to Zero to get the default camera setting.
offset_x moves camera left/right
offset_y moves camera up/down
offset_z moves camera forward/back
rot_x will rotate the camera
with fish_eye_x/y == 0.0 then you get no distortion
img_enc can be one of JPG|PNG|TGA
"""
msg = {
"msg_type": msg_type,
"fov": str(fov),
"fish_eye_x": str(fish_eye_x),
"fish_eye_y": str(fish_eye_y),
"img_w": str(img_w),
"img_h": str(img_h),
"img_d": str(img_d),
"img_enc": str(img_enc),
"offset_x": str(offset_x),
"offset_y": str(offset_y),
"offset_z": str(offset_z),
"rot_x": str(rot_x),
"rot_y": str(rot_y),
"rot_z": str(rot_z),
}
self.blocking_send(msg)
time.sleep(0.1)
def send_lidar_config(
self,
deg_per_sweep_inc: float = 2.0,
deg_ang_down: float = 0.0,
deg_ang_delta: float = -1.0,
num_sweeps_levels: int = 1,
max_range: float = 50.0,
noise: float = 0.5,
offset_x: float = 0.0,
offset_y: float = 0.5,
offset_z: float = 0.5,
rot_x: float = 0.0,
):
"""Lidar config
offset_x moves lidar left/right
the offset_y moves lidar up/down
the offset_z moves lidar forward/back
deg_per_sweep_inc : as the ray sweeps around, how many degrees does it advance per sample (int)
deg_ang_down : what is the starting angle for the initial sweep compared to the forward vector
deg_ang_delta : what angle change between sweeps
num_sweeps_levels : how many complete 360 sweeps (int)
max_range : what it max distance we will register a hit
noise : what is the scalar on the perlin noise applied to point position
Here's some sample settings that similate a more sophisticated lidar:
msg = '{ "msg_type" : "lidar_config",
"degPerSweepInc" : "2.0", "degAngDown" : "25", "degAngDelta" : "-1.0",
"numSweepsLevels" : "25", "maxRange" : "50.0", "noise" : "0.2",
"offset_x" : "0.0", "offset_y" : "1.0", "offset_z" : "1.0", "rot_x" : "0.0" }'
And here's some sample settings that similate a simple RpLidar A2 one level horizontal scan.
msg = '{ "msg_type" : "lidar_config", "degPerSweepInc" : "2.0",
"degAngDown" : "0.0", "degAngDelta" : "-1.0", "numSweepsLevels" : "1",
"maxRange" : "50.0", "noise" : "0.4",
"offset_x" : "0.0", "offset_y" : "0.5", "offset_z" : "0.5", "rot_x" : "0.0" }'
"""
msg = {
"msg_type": "lidar_config",
"degPerSweepInc": str(deg_per_sweep_inc),
"degAngDown": str(deg_ang_down),
"degAngDelta": str(deg_ang_delta),
"numSweepsLevels": str(num_sweeps_levels),
"maxRange": str(max_range),
"noise": str(noise),
"offset_x": str(offset_x),
"offset_y": str(offset_y),
"offset_z": str(offset_z),
"rot_x": str(rot_x),
}
self.blocking_send(msg)
time.sleep(0.1)
self.lidar_deg_per_sweep_inc = float(deg_per_sweep_inc)
self.lidar_num_sweep_levels = int(num_sweeps_levels)
self.lidar_deg_ang_delta = float(deg_ang_delta)
def process_lidar_packet(self, lidar_info: List[Dict[str, float]]) -> np.ndarray:
point_per_sweep = int(360 / self.lidar_deg_per_sweep_inc)
points_num = round(abs(self.lidar_num_sweep_levels * point_per_sweep))
reconstructed_lidar_info = [-1 for _ in range(points_num)] # we chose -1 to be the "None" value
if lidar_info is not None:
for point in lidar_info:
rx = point["rx"]
ry = point["ry"]
d = point["d"]
x_index = round(abs(rx / self.lidar_deg_per_sweep_inc))
y_index = round(abs(ry / self.lidar_deg_ang_delta))
reconstructed_lidar_info[point_per_sweep * y_index + x_index] = d
return np.array(reconstructed_lidar_info)
def blocking_send(self, msg: Dict[str, Any]) -> None:
if self.client is None:
logger.debug(f"skipping: \n {msg}")
return
logger.debug(f"blocking send \n {msg}")
self.client.send_now(msg)
def queue_message(self, msg: Dict[str, Any]) -> None:
if self.client is None:
logger.debug(f"skipping: \n {msg}")
return
logger.debug(f"sending \n {msg}")
self.client.queue_message(msg)
|
from django.test import TestCase
from django.contrib.auth.models import User
class ProfileMethodTests(TestCase):
def setUp(self):
user = User.objects.create_user(
username='lichun',
email='i@lichun.me',
password='lichun_password',
)
user.profile.url = 'https://lichun.me/'
user.save()
def test_get_profile(self):
user = User.objects.get(username='lichun')
url = user.profile.get_url()
self.assertEqual(url, 'https://lichun.me/')
picture_url = user.profile.get_picture()
self.assertEqual(picture_url, '/static/img/user.png')
screen_name = user.profile.get_screen_name()
self.assertEqual(screen_name, 'lichun')
|
"""
Module `chatette_qiu.cli.interactive_command.save_command`.
Contains the strategy class that represents the interactive mode command
`save` which writes a template file that, when parsed, would make a parser
that is in the state of the current parser.
"""
from __future__ import print_function
import io
from chatette_qiu.cli.interactive_commands.command_strategy import CommandStrategy
class SaveCommand(CommandStrategy):
usage_str = 'save <template-file-path>'
def execute(self, facade):
if len(self.command_tokens) < 2:
self.print_wrapper.error_log("Missing some arguments\nUsage: " +
self.usage_str)
return
template_filepath = self.command_tokens[1]
parser = facade.parser
with io.open(template_filepath, 'w+') as f:
for intent_name in parser.intent_definitions:
intent = parser.intent_definitions[intent_name]
print(intent.get_template_description(), file=f)
print(file=f)
for alias_name in parser.alias_definitions:
alias = parser.alias_definitions[alias_name]
print(alias.get_template_description(), file=f)
print(file=f)
for slot_name in parser.slot_definitions:
slot = parser.slot_definitions[slot_name]
print(slot.get_template_description(), file=f)
self.print_wrapper.write("Template file successfully written.")
# Override abstract methods
def execute_on_unit(self, facade, unit_type, unit_name, variation_name=None):
raise NotImplementedError()
def finish_execution(self, facade):
raise NotImplementedError()
|
from django.urls import path
from django.views.generic import TemplateView
app_name = 'frontend'
urlpatterns = [
path('', TemplateView.as_view(template_name='frontend/index.html'), name='index'),
]
|
"""Classical Checkpoints classes implementations
"""
# main imports
import os
import logging
import numpy as np
# module imports
from macop.callbacks.base import Callback
from macop.utils.progress import macop_text, macop_line
class BasicCheckpoint(Callback):
"""
BasicCheckpoint is used for loading previous computations and start again after loading checkpoint
Attributes:
algo: {:class:`~macop.algorithms.base.Algorithm`} -- main algorithm instance reference
every: {int} -- checkpoint frequency used (based on number of evaluations)
filepath: {str} -- file path where checkpoints will be saved
"""
def run(self):
"""
Check if necessary to do backup based on `every` variable
"""
# get current best solution
solution = self.algo.result
currentEvaluation = self.algo.getGlobalEvaluation()
# backup if necessary
if currentEvaluation % self._every == 0:
logging.info("Checkpoint is done into " + self._filepath)
solution_data = ""
solutionSize = len(solution.data)
for index, val in enumerate(solution.data):
solution_data += str(val)
if index < solutionSize - 1:
solution_data += ' '
line = str(currentEvaluation) + ';' + solution_data + ';' + str(
solution.fitness) + ';\n'
# check if file exists
if not os.path.exists(self._filepath):
with open(self._filepath, 'w') as f:
f.write(line)
else:
with open(self._filepath, 'a') as f:
f.write(line)
def load(self):
"""
Load last backup line of solution and set algorithm state (best solution and evaluations) at this backup
"""
if os.path.exists(self._filepath):
logging.info('Load best solution from last checkpoint')
with open(self._filepath) as f:
# get last line and read data
lastline = f.readlines()[-1]
data = lastline.split(';')
# get evaluation information
globalEvaluation = int(data[0])
if self.algo.getParent() is not None:
self.algo.getParent().setEvaluation(globalEvaluation)
else:
self.algo.setEvaluation(globalEvaluation)
# get best solution data information
solution_data = list(map(int, data[1].split(' ')))
if self.algo.result is None:
self.algo.result = self.algo.initialiser()
self.algo.result.data = np.array(solution_data)
self.algo.result.fitness = float(data[2])
macop_line(self.algo)
macop_text(self.algo,
f'Checkpoint found from `{self._filepath}` file.')
macop_text(
self.algo,
f'Restart algorithm from evaluation {self.algo.getEvaluation()}.'
)
else:
macop_text(
self.algo,
'No backup found... Start running algorithm from evaluation 0.'
)
logging.info(
"Can't load backup... Backup filepath not valid in Checkpoint")
macop_line(self.algo)
class ContinuousCheckpoint(Callback):
"""
ContinuousCheckpoint is used for loading previous computations and start again after loading checkpoint (only continuous solution)
Attributes:
algo: {:class:`~macop.algorithms.base.Algorithm`} -- main algorithm instance reference
every: {int} -- checkpoint frequency used (based on number of evaluations)
filepath: {str} -- file path where checkpoints will be saved
"""
def run(self):
"""
Check if necessary to do backup based on `every` variable
"""
# get current best solution
solution = self.algo.result
currentEvaluation = self.algo.getGlobalEvaluation()
# backup if necessary
if currentEvaluation % self._every == 0:
logging.info("Checkpoint is done into " + self._filepath)
solution_data = ""
solutionSize = len(solution.data)
for index, val in enumerate(solution.data):
solution_data += str(val)
if index < solutionSize - 1:
solution_data += ' '
line = str(currentEvaluation) + ';' + solution_data + ';' + str(
solution.fitness) + ';\n'
# check if file exists
if not os.path.exists(self._filepath):
with open(self._filepath, 'w') as f:
f.write(line)
else:
with open(self._filepath, 'a') as f:
f.write(line)
def load(self):
"""
Load last backup line of solution and set algorithm state (best solution and evaluations) at this backup
"""
if os.path.exists(self._filepath):
logging.info('Load best solution from last checkpoint')
with open(self._filepath) as f:
# get last line and read data
lastline = f.readlines()[-1]
data = lastline.split(';')
# get evaluation information
globalEvaluation = int(data[0])
if self.algo.getParent() is not None:
self.algo.getParent().setEvaluation(globalEvaluation)
else:
self.algo.setEvaluation(globalEvaluation)
# get best solution data information
solution_data = list(map(float, data[1].split(' ')))
if self.algo.result is None:
self.algo.result = self.algo.initialiser()
self.algo.result.data = np.array(solution_data)
self.algo.result.fitness = float(data[2])
macop_line(self.algo)
macop_text(self.algo,
f'Checkpoint found from `{self._filepath}` file.')
macop_text(
self.algo,
f'Restart algorithm from evaluation {self.algo.getEvaluation()}.'
)
else:
macop_text(
self.algo,
'No backup found... Start running algorithm from evaluation 0.'
)
logging.info(
"Can't load backup... Backup filepath not valid in Checkpoint")
macop_line(self.algo)
|
import torch
import torch.nn as nn
from torch.nn.modules import activation
import lightconvpoint.nn as lcp_nn
from lightconvpoint.nn.conv_fkaconv import FKAConv as conv
from lightconvpoint.nn.pool import max_pool
from lightconvpoint.nn.sampling_knn import sampling_knn_quantized as sampling_knn
from lightconvpoint.nn.sampling import sampling_quantized as sampling, sampling_apply_on_data
from lightconvpoint.nn.knn import knn
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size):
super().__init__()
self.cv0 = nn.Conv1d(in_channels, in_channels//2, 1)
self.bn0 = nn.BatchNorm1d(in_channels//2)
self.cv1 = conv(in_channels//2, in_channels//2, kernel_size, bias=False)
self.bn1 = nn.BatchNorm1d(in_channels//2)
self.cv2 = nn.Conv1d(in_channels//2, out_channels, 1)
self.bn2 = nn.BatchNorm1d(out_channels)
self.activation = nn.ReLU()
self.shortcut = nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels else nn.Identity()
def batched_index_select(self, input, dim, index):
"""Gather input with respect to the index tensor."""
index_shape = index.shape
views = [input.shape[0]] + [
1 if i != dim else -1 for i in range(1, len(input.shape))
]
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
return torch.gather(input, dim, index).view(
input.size(0), -1, index_shape[1], index_shape[2]
)
def forward(self, x, pos, support_points, indices):
x_short = x
x = self.activation(self.bn0(self.cv0(x)))
x = self.activation(self.bn1(self.cv1(x, pos, support_points, indices)))
x = self.bn2(self.cv2(x))
if x_short.shape[2] != x.shape[2]:
x_short = max_pool(x_short, indices)
x_short = self.shortcut(x_short)
return self.activation(x + x_short)
class FKAConv(nn.Module):
def __init__(self, in_channels, out_channels, segmentation=False, hidden=64, w2v_size=600):
super().__init__()
self.segmentation = segmentation
self.cv0 = conv(in_channels, hidden, 16)
self.bn0 = nn.BatchNorm1d(hidden)
self.resnetb01 = ResidualBlock(hidden, hidden, 16)
self.resnetb10 = ResidualBlock(hidden, 2*hidden, 16)
self.resnetb11 = ResidualBlock(2*hidden, 2*hidden, 16)
self.resnetb20 = ResidualBlock(2*hidden, 4*hidden, 16)
self.resnetb21 = ResidualBlock(4*hidden, 4*hidden, 16)
self.resnetb30 = ResidualBlock(4*hidden, 8*hidden, 16)
self.resnetb31 = ResidualBlock(8*hidden, 8*hidden, 16)
self.resnetb40 = ResidualBlock(8*hidden, 16*hidden, 16)
self.resnetb41 = ResidualBlock(16*hidden, 16*hidden, 16)
if self.segmentation:
self.cv3d = nn.Conv1d(24*hidden, 8 * hidden, 1)
self.bn3d = nn.BatchNorm1d(8 * hidden)
self.cv2d = nn.Conv1d(12 * hidden, 4 * hidden, 1)
self.bn2d = nn.BatchNorm1d(4 * hidden)
self.cv1d = nn.Conv1d(6 * hidden, 2 * hidden, 1)
self.bn1d = nn.BatchNorm1d(2 * hidden)
self.cv0d = nn.Conv1d(3 * hidden, hidden, 1)
self.bn0d = nn.BatchNorm1d(hidden)
self.fcout = nn.Conv1d(hidden, out_channels, 1)
self.fcout_gen = nn.Conv1d(hidden, out_channels, 1)
self.fcout_baseline = nn.Conv1d(hidden, w2v_size,1)
self.fcout_basline_convex1_relu = nn.ReLU()
self.fcout_basline_convex2_relu = nn.ReLU()
self.fcout_baseline_convex1 = nn.Conv1d(hidden, 256,1)
self.fcout_baseline_convex2 = nn.Conv1d(256,512,1)
self.fcout_baseline_convex3 = nn.Conv1d(512,w2v_size,1)
else:
self.fcout = nn.Linear(1024, out_channels)
self.dropout = nn.Dropout(0.5)
self.activation = nn.ReLU()
def compute_indices(self, pos):
ids0, _ = sampling_knn(pos, 16, ratio=1)
ids10_support, support1 = sampling(pos, ratio=0.25, return_support_points=True)
ids10 = sampling_apply_on_data(ids0, ids10_support)
ids11, _ = sampling_knn(support1, 16, ratio=1)
ids20_support, support2 = sampling(support1, ratio=0.25, return_support_points=True)
ids20 = sampling_apply_on_data(ids11, ids20_support)
ids21, _ = sampling_knn(support2, 16, ratio=1)
ids30_support, support3 = sampling(support2, ratio=0.25, return_support_points=True)
ids30 = sampling_apply_on_data(ids21, ids30_support)
ids31, _ = sampling_knn(support3, 16, ratio=1)
ids40_support, support4 = sampling(support3, ratio=0.25, return_support_points=True)
ids40 = sampling_apply_on_data(ids31, ids40_support)
ids41, _ = sampling_knn(support4, 16, ratio=1)
indices = [ids0, ids10, ids11, ids20, ids21, ids30, ids31, ids40, ids41]
support_points = [support1, support2, support3, support4]
if self.segmentation:
ids3u = knn(support4, support3, 1)
ids2u = knn(support3, support2, 1)
ids1u = knn(support2, support1, 1)
ids0u = knn(support1, pos, 1)
indices = indices + [ids3u, ids2u, ids1u, ids0u]
return None, indices, support_points
def forward_with_features(self, x, pos, support_points=None, indices=None, gen_forward = False, backbone = False):
if (support_points is None) or (indices is None):
_, indices, support_points = self.compute_indices(pos)
if self.segmentation:
ids0, ids10, ids11, ids20, ids21, ids30, ids31, ids40, ids41, ids3u, ids2u, ids1u, ids0u = indices
else:
ids0, ids10, ids11, ids20, ids21, ids30, ids31, ids40, ids41 = indices
support1, support2, support3, support4 = support_points
x0 = self.activation(self.bn0(self.cv0(x, pos, pos, ids0)))
x0 = self.resnetb01(x0, pos, pos, ids0)
x1 = self.resnetb10(x0, pos, support1, ids10)
x1 = self.resnetb11(x1, support1, support1, ids11)
x2 = self.resnetb20(x1, support1, support2, ids20)
x2 = self.resnetb21(x2, support2, support2, ids21)
x3 = self.resnetb30(x2, support2, support3, ids30)
x3 = self.resnetb31(x3, support3, support3, ids31)
x4 = self.resnetb40(x3, support3, support4, ids40)
x4 = self.resnetb41(x4, support4, support4, ids41)
if self.segmentation:
xout = sampling_apply_on_data(x4, ids3u, dim=2)
xout = self.activation(self.bn3d(self.cv3d(torch.cat([xout, x3], dim=1))))
xout = sampling_apply_on_data(xout, ids2u, dim=2)
xout = self.activation(self.bn2d(self.cv2d(torch.cat([xout, x2], dim=1))))
xout = sampling_apply_on_data(xout, ids1u, dim=2)
xout = self.activation(self.bn1d(self.cv1d(torch.cat([xout, x1], dim=1))))
xout = sampling_apply_on_data(xout, ids0u, dim=2)
xout = self.activation(self.bn0d(self.cv0d(torch.cat([xout, x0], dim=1))))
if backbone:
#Return after backbone
return xout
else:
if gen_forward:
xout = self.fcout_gen(xout)
else:
xout = self.fcout(xout)
else:
xout = x4.mean(dim=2)
xout = self.dropout(xout)
xout = self.fcout(xout)
return xout
def forward(self, x, pos, support_points=None, indices=None, gen_forward=False, backbone = False):
if x is None:
return self.compute_indices(pos)
else:
return self.forward_with_features(x, pos, support_points, indices, gen_forward=gen_forward, backbone=False)
def backbone(self, x, pos, support_points, indices = None, gen_forward=False):
if x is None:
return self.compute_indices(pos)
else:
return self.forward_with_features(x, pos, support_points, indices, gen_forward=gen_forward, backbone=True)
def training_generative(self, xout):
return self.fcout_gen(xout)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
m.eval()
def freeze_backbone_fcout(self):
for name, param in self.named_modules():
if name == "fcout_gen":
param.requires_grad = True
else:
param.requires_grad = False
def get_1x_lr_params(self):
for name, param in self.named_parameters():
if name != "fcout_gen":
if param.requires_grad:
yield param
def get_10x_lr_params(self):
for name, param in self.named_parameters():
if name == "fcout_gen":
if param.requires_grad:
yield param
|
"""
This paver file is intented to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
Building a fancy dmg from scratch
=================================
Clone the numpy-macosx-installer git repo from on github into the source tree
(numpy-macosx-installer should be in the same directory as setup.py). Then, do
as follows::
git clone git://github.com/cournape/macosx-numpy-installer
# remove build dir, and everything generated by previous paver calls
# (included generated installers). Use with care !
paver nuke
paver bootstrap && source boostrap/bin/activate
# Installing numpy is necessary to build the correct documentation (because
# of autodoc)
python setupegg.py install
paver dmg
Building a simple (no-superpack) windows installer from wine
============================================================
It assumes that blas/lapack are in c:\local\lib inside drive_c. Build python
2.5 and python 2.6 installers.
paver bdist_wininst_simple
You will have to configure your wine python locations (WINE_PYS).
The superpack requires all the atlas libraries for every arch to be installed
(see SITECFG), and can then be built as follows::
paver bdist_superpack
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
This automatically put the checksum into NOTES.txt, and write the Changelog
which can be uploaded to sourceforge.
TODO
====
- the script is messy, lots of global variables
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
import os
import sys
import subprocess
import re
import shutil
try:
from hash import md5
except ImportError:
import md5
import distutils
try:
from paver.tasks import VERSION as _PVER
if not _PVER >= '1.0':
raise RuntimeError("paver version >= 1.0 required (was %s)" % _PVER)
except ImportError, e:
raise RuntimeError("paver version >= 1.0 required")
import paver
import paver.doctools
import paver.path
from paver.easy import options, Bunch, task, needs, dry, sh, call_task
setup_py = __import__("setup")
FULLVERSION = setup_py.FULLVERSION
# Wine config for win32 builds
WINE_SITE_CFG = ""
if sys.platform == "darwin":
WINE_PY25 = "/Applications/Darwine/Wine.bundle/Contents/bin/wine /Users/david/.wine/drive_c/Python25/python.exe"
WINE_PY26 = "/Applications/Darwine/Wine.bundle/Contents/bin/wine /Users/david/.wine/drive_c/Python26/python.exe"
else:
WINE_PY25 = "/home/david/.wine/drive_c/Python25/python.exe"
WINE_PY26 = "/home/david/.wine/drive_c/Python26/python.exe"
WINE_PYS = {'2.6' : WINE_PY26, '2.5': WINE_PY25}
SUPERPACK_BUILD = 'build-superpack'
SUPERPACK_BINDIR = os.path.join(SUPERPACK_BUILD, 'binaries')
# Where to put built documentation (where it will picked up for copy to
# binaries)
PDF_DESTDIR = paver.path.path('build') / 'pdf'
HTML_DESTDIR = paver.path.path('build') / 'html'
DOC_ROOT = paver.path.path("doc")
DOC_SRC = DOC_ROOT / "source"
DOC_BLD = DOC_ROOT / "build"
DOC_BLD_LATEX = DOC_BLD / "latex"
# Source of the release notes
RELEASE = 'doc/release/1.3.0-notes.rst'
# Start/end of the log (from git)
LOG_START = 'tags/1.2.0'
LOG_END = 'master'
# Virtualenv bootstrap stuff
BOOTSTRAP_DIR = "bootstrap"
BOOTSTRAP_PYEXEC = "%s/bin/python" % BOOTSTRAP_DIR
BOOTSTRAP_SCRIPT = "%s/bootstrap.py" % BOOTSTRAP_DIR
DMG_CONTENT = paver.path.path('numpy-macosx-installer') / 'content'
# Where to put the final installers, as put on sourceforge
RELEASE_DIR = 'release'
INSTALLERS_DIR = os.path.join(RELEASE_DIR, 'installers')
# XXX: fix this in a sane way
MPKG_PYTHON = {"25": "/Library/Frameworks/Python.framework/Versions/2.5/bin/python",
"26": "/Library/Frameworks/Python.framework/Versions/2.6/bin/python"}
options(sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
virtualenv=Bunch(script_name=BOOTSTRAP_SCRIPT,packages_to_install=["sphinx==0.6.1"]),
wininst=Bunch(pyver="2.5", scratch=True))
# Bootstrap stuff
@task
def bootstrap():
"""create virtualenv in ./install"""
install = paver.path.path(BOOTSTRAP_DIR)
if not install.exists():
install.mkdir()
call_task('paver.virtual.bootstrap')
sh('cd %s; %s bootstrap.py' % (BOOTSTRAP_DIR, sys.executable))
@task
def clean():
"""Remove build, dist, egg-info garbage."""
d = ['build', 'dist', 'numpy.egg-info']
for i in d:
paver.path.path(i).rmtree()
(paver.path.path('doc') / options.sphinx.builddir).rmtree()
@task
def clean_bootstrap():
paver.path.path('bootstrap').rmtree()
@task
@needs('clean', 'clean_bootstrap')
def nuke():
"""Remove everything: build dir, installers, bootstrap dirs, etc..."""
d = [SUPERPACK_BUILD, INSTALLERS_DIR]
for i in d:
paver.path.path(i).rmtree()
# NOTES/Changelog stuff
def compute_md5():
released = paver.path.path(INSTALLERS_DIR).listdir()
checksums = []
for f in released:
m = md5.md5(open(f, 'r').read())
checksums.append('%s %s' % (m.hexdigest(), f))
return checksums
def write_release_task(filename='NOTES.txt'):
source = paver.path.path(RELEASE)
target = paver.path.path(filename)
if target.exists():
target.remove()
source.copy(target)
ftarget = open(str(target), 'a')
ftarget.writelines("""
Checksums
=========
""")
ftarget.writelines(['%s\n' % c for c in compute_md5()])
def write_log_task(filename='Changelog'):
st = subprocess.Popen(
['git', 'svn', 'log', '%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE)
out = st.communicate()[0]
a = open(filename, 'w')
a.writelines(out)
a.close()
@task
def write_release():
write_release_task()
@task
def write_log():
write_log_task()
# Doc stuff
@task
def html(options):
"""Build numpy documentation and put it into build/docs"""
# Don't use paver html target because of numpy bootstrapping problems
subprocess.check_call(["make", "html"], cwd="doc")
builtdocs = paver.path.path("doc") / options.sphinx.builddir / "html"
HTML_DESTDIR.rmtree()
builtdocs.copytree(HTML_DESTDIR)
@task
def latex():
"""Build numpy documentation in latex format."""
subprocess.check_call(["make", "latex"], cwd="doc")
@task
@needs('latex')
def pdf():
def build_pdf():
subprocess.check_call(["make", "all-pdf"], cwd=str(DOC_BLD_LATEX))
dry("Build pdf doc", build_pdf)
PDF_DESTDIR.rmtree()
PDF_DESTDIR.makedirs()
user = DOC_BLD_LATEX / "numpy-user.pdf"
user.copy(PDF_DESTDIR / "userguide.pdf")
ref = DOC_BLD_LATEX / "numpy-ref.pdf"
ref.copy(PDF_DESTDIR / "reference.pdf")
def tarball_name(type='gztar'):
root = 'numpy-%s' % FULLVERSION
if type == 'gztar':
return root + '.tar.gz'
elif type == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type)
@task
def sdist():
# To be sure to bypass paver when building sdist... paver + numpy.distutils
# do not play well together.
sh('python setup.py sdist --formats=gztar,zip')
# Copy the superpack into installers dir
if not os.path.exists(INSTALLERS_DIR):
os.makedirs(INSTALLERS_DIR)
for t in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(t))
target = os.path.join(INSTALLERS_DIR, tarball_name(t))
shutil.copy(source, target)
#------------------
# Wine-based builds
#------------------
SSE3_CFG = {'BLAS': r'C:\local\lib\yop\sse3', 'LAPACK': r'C:\local\lib\yop\sse3'}
SSE2_CFG = {'BLAS': r'C:\local\lib\yop\sse2', 'LAPACK': r'C:\local\lib\yop\sse2'}
NOSSE_CFG = {'BLAS': r'C:\local\lib\yop\nosse', 'LAPACK': r'C:\local\lib\yop\nosse'}
SITECFG = {"sse2" : SSE2_CFG, "sse3" : SSE3_CFG, "nosse" : NOSSE_CFG}
def internal_wininst_name(arch, ismsi=False):
"""Return the name of the wininst as it will be inside the superpack (i.e.
with the arch encoded."""
if ismsi:
ext = '.msi'
else:
ext = '.exe'
return "numpy-%s-%s%s" % (FULLVERSION, arch, ext)
def wininst_name(pyver, ismsi=False):
"""Return the name of the installer built by wininst command."""
# Yeah, the name logic is harcoded in distutils. We have to reproduce it
# here
if ismsi:
ext = '.msi'
else:
ext = '.exe'
name = "numpy-%s.win32-py%s%s" % (FULLVERSION, pyver, ext)
return name
def bdist_wininst_arch(pyver, arch, scratch=True):
"""Arch specific wininst build."""
if scratch:
paver.path.path('build').rmtree()
if not os.path.exists(SUPERPACK_BINDIR):
os.makedirs(SUPERPACK_BINDIR)
_bdist_wininst(pyver, SITECFG[arch])
source = os.path.join('dist', wininst_name(pyver))
target = os.path.join(SUPERPACK_BINDIR, internal_wininst_name(arch))
if os.path.exists(target):
os.remove(target)
os.rename(source, target)
def superpack_name(pyver, numver):
"""Return the filename of the superpack installer."""
return 'numpy-%s-win32-superpack-python%s.exe' % (numver, pyver)
def prepare_nsis_script(pyver, numver):
if not os.path.exists(SUPERPACK_BUILD):
os.makedirs(SUPERPACK_BUILD)
tpl = os.path.join('tools/win32build/nsis_scripts', 'numpy-superinstaller.nsi.in')
source = open(tpl, 'r')
target = open(os.path.join(SUPERPACK_BUILD, 'numpy-superinstaller.nsi'), 'w')
installer_name = superpack_name(pyver, numver)
cnt = "".join(source.readlines())
cnt = cnt.replace('@NUMPY_INSTALLER_NAME@', installer_name)
for arch in ['nosse', 'sse2', 'sse3']:
cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
internal_wininst_name(arch))
target.write(cnt)
@task
def bdist_wininst_nosse(options):
"""Build the nosse wininst installer."""
bdist_wininst_arch(options.wininst.pyver, 'nosse', scratch=options.wininst.scratch)
@task
def bdist_wininst_sse2(options):
"""Build the sse2 wininst installer."""
bdist_wininst_arch(options.wininst.pyver, 'sse2', scratch=options.wininst.scratch)
@task
def bdist_wininst_sse3(options):
"""Build the sse3 wininst installer."""
bdist_wininst_arch(options.wininst.pyver, 'sse3', scratch=options.wininst.scratch)
@task
@needs('bdist_wininst_nosse', 'bdist_wininst_sse2', 'bdist_wininst_sse3')
def bdist_superpack(options):
"""Build all arch specific wininst installers."""
prepare_nsis_script(options.wininst.pyver, FULLVERSION)
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'],
cwd=SUPERPACK_BUILD)
# Copy the superpack into installers dir
if not os.path.exists(INSTALLERS_DIR):
os.makedirs(INSTALLERS_DIR)
source = os.path.join(SUPERPACK_BUILD,
superpack_name(options.wininst.pyver, FULLVERSION))
target = os.path.join(INSTALLERS_DIR,
superpack_name(options.wininst.pyver, FULLVERSION))
shutil.copy(source, target)
@task
@needs('clean', 'bdist_wininst')
def bdist_wininst_simple():
"""Simple wininst-based installer."""
_bdist_wininst(pyver=options.wininst.pyver)
def _bdist_wininst(pyver, cfg_env=WINE_SITE_CFG):
subprocess.check_call([WINE_PYS[pyver], 'setup.py', 'build', '-c', 'mingw32', 'bdist_wininst'], env=cfg_env)
#-------------------
# Mac OS X installer
#-------------------
def macosx_version():
if not sys.platform == 'darwin':
raise ValueError("Not darwin ??")
st = subprocess.Popen(["sw_vers"], stdout=subprocess.PIPE)
out = st.stdout.readlines()
ver = re.compile("ProductVersion:\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
for i in out:
m = ver.match(i)
if m:
return m.groups()
def mpkg_name():
maj, min = macosx_version()[:2]
pyver = ".".join([str(i) for i in sys.version_info[:2]])
return "numpy-%s-py%s-macosx%s.%s.mpkg" % \
(FULLVERSION, pyver, maj, min)
def dmg_name():
maj, min = macosx_version()[:2]
pyver = ".".join([str(i) for i in sys.version_info[:2]])
return "numpy-%s-py%s-macosx%s.%s.dmg" % \
(FULLVERSION, pyver, maj, min)
@task
def bdist_mpkg():
call_task("clean")
pyver = "".join([str(i) for i in sys.version_info[:2]])
sh("%s setupegg.py bdist_mpkg" % MPKG_PYTHON[pyver])
@task
@needs("bdist_mpkg", "pdf")
def dmg():
pyver = ".".join([str(i) for i in sys.version_info[:2]])
dmg_n = dmg_name()
dmg = paver.path.path('numpy-macosx-installer') / dmg_n
if dmg.exists():
dmg.remove()
# Clean the image source
content = DMG_CONTENT
content.rmtree()
content.mkdir()
# Copy mpkg into image source
mpkg_n = mpkg_name()
mpkg_tn = "numpy-%s-py%s.mpkg" % (FULLVERSION, pyver)
mpkg_source = paver.path.path("dist") / mpkg_n
mpkg_target = content / mpkg_tn
mpkg_source.copytree(content / mpkg_tn)
# Copy docs into image source
#html_docs = HTML_DESTDIR
#html_docs.copytree(content / "Documentation" / "html")
pdf_docs = DMG_CONTENT / "Documentation"
pdf_docs.rmtree()
pdf_docs.makedirs()
user = PDF_DESTDIR / "userguide.pdf"
user.copy(pdf_docs / "userguide.pdf")
ref = PDF_DESTDIR / "reference.pdf"
ref.copy(pdf_docs / "reference.pdf")
# Build the dmg
cmd = ["./create-dmg", "--window-size", "500", "500", "--background",
"art/dmgbackground.png", "--icon-size", "128", "--icon", mpkg_tn,
"125", "320", "--icon", "Documentation", "375", "320", "--volname", "numpy",
dmg_n, "./content"]
subprocess.check_call(cmd, cwd="numpy-macosx-installer")
@task
def simple_dmg():
# Build the dmg
image_name = "numpy-%s.dmg" % FULLVERSION
image = paver.path.path(image_name)
image.remove()
cmd = ["hdiutil", "create", image_name, "-srcdir", str(builddir)]
sh(" ".join(cmd))
@task
def write_note_changelog():
write_release_task(os.path.join(RELEASE_DIR, 'NOTES.txt'))
write_log_task(os.path.join(RELEASE_DIR, 'Changelog'))
|
import cv2
import matplotlib.pyplot as plt
img = cv2.imread('/home/pi/book/dataset/4.2.07.tiff', 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 75, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0, 0, 255), 2)
original = cv2.imread('/home/pi/book/dataset/4.2.07.tiff', 1)
original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)
output = [original, img]
titles = ['Original', 'Contours']
for i in range(2):
plt.subplot(1, 2, i+1)
plt.imshow(output[i])
plt.title(titles[i])
plt.axis('off')
plt.show()
|
'''256 color terminal handling
ALL_COLORS is a dict mapping color indexes to the ANSI escape code.
ALL_COLORS is essentially a "palette".
Note that ALL_COLORS is a dict that (mostly) uses integers as the keys.
Yeah, that is weird. In theory, the keys could also be other identifiers,
although that isn't used much at the moment. So, yeah, could/should just
be a list/array.
Also note that the ordering of the color indexes is fairly arbitrary.
It mostly follows the order of the ANSI escape codes. But it is
important to note that the order doesn't mean much. For example,
the colors for index 154 and 155 may or may not be related in any way.
This module also defines some convience names for common keys of
ALL_COLORS.
The first 8 terminal colors:
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE
The terminal reset index:
RESET_SEQ_INDEX
The default color index:
DEFAULT_COLOR_IDX
'''
# hacky ansi color stuff
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
NUMBER_OF_BASE_COLORS = 8
ALL_COLORS = {}
# See https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit
#: Used to determine what color index we start from.
#:
#: The xterm256 color indexes are:
#:
#: 0-7 normal term colors
#:
#: 8-15 bright term colors
#:
#: 16-231 are the rest from a 6x6x6 (216 color) rgb cube
#:
#: 16, 17 are black and very dark blue so they are skipped since they are hard to read.
#:
#: 232-255 are the grays (white to gray to black) and are skipped and why END_OF_THREAD_COLORS is 231.
RGB_COLOR_OFFSET = 16 + 2
# FIXME: needs to learn to support dark/light themes
START_OF_THREAD_COLORS = RGB_COLOR_OFFSET
END_OF_THREAD_COLORS = 231
NUMBER_OF_THREAD_COLORS = END_OF_THREAD_COLORS - RGB_COLOR_OFFSET
BASE_COLORS = dict((color_number, color_seq) for
(color_number, color_seq) in [(x, "\033[38;5;%dm" % x) for x in range(NUMBER_OF_BASE_COLORS)])
# \ x 1 b [ 38 ; 5; 231m
THREAD_COLORS = dict((color_number, color_seq) for
(color_number, color_seq) in [(x, "\033[38;5;%dm" % x) for x in range(START_OF_THREAD_COLORS, END_OF_THREAD_COLORS)])
ALL_COLORS.update(BASE_COLORS)
ALL_COLORS.update(THREAD_COLORS)
#: The number of total colors when excluded and skipped colors
#: are considered. The color mappers use this to know what
#: number to modulus (%) by to figure out the color bucket.
NUMBER_OF_ALL_COLORS = len(ALL_COLORS) - RGB_COLOR_OFFSET
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = BASE_COLORS.keys()
#: Some named colors that map to the first 8 terminal colors
NAMED_COLOR_IDXS = {'BLACK': BLACK,
'RED': RED,
'GREEN': GREEN,
'YELLOW': YELLOW,
'BLUE': BLUE,
'MAGENTA': MAGENTA,
'CYAN': CYAN,
'WHITE': WHITE
}
#: The index for a 'reset'
RESET_SEQ_IDX = 256
ALL_COLORS[RESET_SEQ_IDX] = RESET_SEQ
#: The index for the default 'default' color
DEFAULT_COLOR_IDX = 257
#: The default color (white)
DEFAULT_COLOR = NAMED_COLOR_IDXS['WHITE']
ALL_COLORS[DEFAULT_COLOR_IDX] = ALL_COLORS[DEFAULT_COLOR]
|
import torch
import pdb
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .beam_search import CaptionGenerator
__DEBUG__ = False
def process_lengths(input):
# the input sequence should be in [batch x word]
max_length = input.size(1) - 1 # remove START
lengths = list(
max_length - input.data.eq(0).sum(1).squeeze()
if isinstance(input, Variable)
else input.eq(0).sum(1).squeeze()
)
lengths = [
min(max_length, length + 1) for length in lengths
] # add additional word for EOS
return lengths
def process_lengths_sort(input, include_inv=False, cuda=True):
# the input sequence should be in [batch x word]
max_length = input.size(1) - 1 # remove additional START
lengths = list(max_length - input.data.eq(0).sum(1, keepdim=False))
lengths = [(i, lengths[i]) for i in range(len(lengths))]
lengths.sort(key=lambda p: p[1], reverse=True)
feat_id = [lengths[i][0] for i in range(len(lengths))]
lengths = [
min(max_length, lengths[i][1] + 1) for i in range(len(lengths))
] # add additional word for EOS
if include_inv:
inv_id = torch.LongTensor(len(lengths))
for i, i_id in enumerate(feat_id):
inv_id[i_id] = i
if cuda:
return (
torch.LongTensor(feat_id).cuda(),
lengths,
torch.LongTensor(inv_id).cuda(),
)
else:
return torch.LongTensor(feat_id), lengths, torch.LongTensor(inv_id)
else:
if cuda:
return torch.LongTensor(feat_id).cuda(), lengths
else:
return torch.LongTensor(feat_id), lengths
class Abstract_Gen_Model(nn.Module):
def __init__(self, vocab, opt):
super(Abstract_Gen_Model, self).__init__()
self.vocab = vocab
self.start = vocab.index("START") if "START" in vocab else None
self.end = vocab.index("EOS")
self.unk = vocab.index("UNK")
self.classifier = nn.Linear(opt["dim_h"], len(self.vocab), bias=False)
self.embedder = nn.Embedding(len(self.vocab), opt["dim_embedding"])
self.opt = opt
if opt["share_weight"]:
assert (
opt["dim_embedding"] == opt["dim_h"]
), "If share_weight is set, dim_embedding == dim_h required!"
self.embedder.weight = (
self.classifier.weight
) # make sure the embeddings are from the final
# initilization
torch.nn.init.uniform_(self.embedder.weight, -0.25, 0.25)
class SAMI(Abstract_Gen_Model): # Single Answer and Multiple Image
def __init__(self, vocab, opt):
super(SAMI, self).__init__(vocab, opt)
self.rnn = nn.LSTM(
opt["dim_embedding"] + opt["dim_v"],
opt["dim_h"],
num_layers=opt["nb_layers"],
batch_first=True,
)
def forward(self, v_feat, a_feat, questions):
"""
The answer embdding is fed at the first step, and the image
embedding is fed into the model concatenated with word embedding
at every step.
"""
# prepare the data
batch_size = questions.size(0)
max_length = questions.size(1)
new_ids, lengths, inv_ids = process_lengths_sort(
questions.cpu().data, include_inv=True
)
new_ids = Variable(new_ids).detach()
inv_ids = Variable(inv_ids).detach()
padding_size = questions.size(1) - lengths[0]
questions = questions.index_select(0, new_ids)
v_feat = v_feat.index_select(0, new_ids)
a_feat = a_feat.index_select(0, new_ids)
embeddings = self.embedder(questions)
v_feat = v_feat.unsqueeze(1).expand(batch_size, max_length, self.opt["dim_v"])
embeddings = torch.cat(
[embeddings, v_feat], 2
) # each time step, input image and word embedding
a_feat = a_feat.unsqueeze(1)
_, hidden_feat = self.rnn(a_feat)
packed_embeddings = pack_padded_sequence(
embeddings, lengths, batch_first=True
) # add additional image feature
feats, _ = self.rnn(packed_embeddings, hidden_feat)
if __DEBUG__:
print("[Generation Module] RNN feature.std(): "),
print(feats.std())
pred = self.classifier(feats[0])
pred = pad_packed_sequence([pred, feats[1]], batch_first=True)
pred = pred[0].index_select(0, inv_ids)
if padding_size > 0:
pred = torch.cat(
[
pred,
Variable(
torch.zeros(batch_size, padding_size, pred.size(2)).type_as(
pred.data
)
).detach(),
],
1,
)
return pred
def generate(self, v_feat, a_feat):
batch_size = v_feat.size(0)
max_length = self.opt["nseq"] if "nseq" in self.opt else 20
# x = Variable(torch.ones(1, batch_size,).type(torch.LongTensor) * self.start, volatile=True).cuda() # <start>
output = Variable(
torch.zeros(max_length, batch_size).type(torch.LongTensor)
).cuda()
scores = torch.zeros(batch_size)
flag = torch.ones(batch_size)
input_x = a_feat.unsqueeze(1)
_, hidden_feat = self.rnn(input_x) # initialize the LSTM
x = Variable(
torch.ones(batch_size, 1).type(torch.LongTensor) * self.start,
requires_grad=False,
).cuda() # <start>
v_feat = v_feat.unsqueeze(1)
input_x = torch.cat([self.embedder(x), v_feat], 2)
for i in range(max_length):
output_feature, hidden_feat = self.rnn(input_x, hidden_feat)
output_t = self.classifier(
output_feature.view(batch_size, output_feature.size(2))
)
output_t = F.log_softmax(output_t)
logprob, x = output_t.max(1)
output[i] = x
scores += logprob.cpu().data * flag
flag[x.cpu().eq(self.end).data] = 0
if flag.sum() == 0:
break
input_x = torch.cat([self.embedder(x.view(-1, 1)), v_feat], 2)
return output.transpose(0, 1)
class Baseline(Abstract_Gen_Model):
def __init__(self, vocab, opt):
super(Baseline, self).__init__(vocab, opt)
self.rnn = nn.LSTM(
opt["dim_embedding"],
opt["dim_h"],
num_layers=opt["nb_layers"],
batch_first=True,
)
def forward(self, va_feat, questions):
# image feature tranform
batch_size = va_feat.size(0)
new_ids, lengths, inv_ids = process_lengths_sort(
questions.cpu().data, include_inv=True
)
new_ids = Variable(new_ids).detach()
inv_ids = Variable(inv_ids).detach()
# manually set the first length to MAX_LENGTH
padding_size = questions.size(1) - lengths[0]
questions = questions.index_select(0, new_ids)
if __DEBUG__:
print("[Generation Module] input feat.std(): "),
print(va_feat.std())
embeddings = self.embedder(questions)
va_feat = va_feat.index_select(0, new_ids).unsqueeze(1)
_, hidden_feat = self.rnn(va_feat)
packed_embeddings = pack_padded_sequence(
embeddings, lengths, batch_first=True
) # add additional image feature
feats, _ = self.rnn(packed_embeddings, hidden_feat)
if __DEBUG__:
print("[Generation Module] RNN feature.std(): "),
print(feats.std())
pred = self.classifier(feats[0])
pred = pad_packed_sequence(PackedSequence(pred, feats[1]), batch_first=True)
pred = pred[0].index_select(0, inv_ids)
if (
padding_size > 0
): # to make sure the sizes of different patches are matchable
pred = torch.cat(
[
pred,
Variable(
torch.zeros(batch_size, padding_size, pred.size(2)).type_as(
pred.data
)
).detach(),
],
1,
)
return pred
def generate(self, va_feat):
batch_size = va_feat.size(0)
max_length = self.opt["nseq"] if "nseq" in self.opt else 20
# x = Variable(torch.ones(1, batch_size,).type(torch.LongTensor) * self.start, volatile=True).cuda() # <start>
output = Variable(
torch.zeros(max_length, batch_size).type(torch.LongTensor)
).cuda()
scores = torch.zeros(batch_size)
flag = torch.ones(batch_size)
input_x = va_feat.unsqueeze(1)
_, hidden_feat = self.rnn(input_x) # initialize the LSTM
x = Variable(
torch.ones(batch_size, 1).type(torch.LongTensor) * self.start,
requires_grad=False,
).cuda() # <start>
input_x = self.embedder(x)
for i in range(max_length):
output_feature, hidden_feat = self.rnn(input_x, hidden_feat)
output_t = self.classifier(
output_feature.view(batch_size, output_feature.size(2))
)
output_t = F.log_softmax(output_t, dim=1)
logprob, x = output_t.max(1)
output[i] = x
scores += logprob.cpu().data * flag
flag[x.cpu().eq(self.end).data] = 0
if flag.sum() == 0:
break
input_x = self.embedder(x.view(-1, 1))
return output.transpose(0, 1)
def beam_search(
self,
va_feat,
beam_size=3,
max_caption_length=20,
length_normalization_factor=0.0,
include_unknown=False,
):
batch_size = va_feat.size(0)
assert (
batch_size == 1
), "Currently, the beam search only support batch_size == 1"
input_x = va_feat.unsqueeze(1)
_, hidden_feat = self.rnn(input_x) # initialize the LSTM
x = Variable(
torch.ones(batch_size, 1).type(torch.LongTensor) * self.start, volatile=True
).cuda() # <start>
input_x = self.embedder(x)
cap_gen = CaptionGenerator(
embedder=self.embedder,
rnn=self.rnn,
classifier=self.classifier,
eos_id=self.end,
include_unknown=include_unknown,
unk_id=self.unk,
beam_size=beam_size,
max_caption_length=max_caption_length,
length_normalization_factor=length_normalization_factor,
batch_first=True,
)
sentences, score = cap_gen.beam_search(input_x, hidden_feat)
sentences = [" ".join([self.vocab[idx] for idx in sent]) for sent in sentences]
return sentences
|
w = 12
x = 3
y = 2
z = 4
y < z # no chaining, with True answer
z < y # no chaining, with False answer
x < z > y # should be same as (3 < 4 and 4 > 2), True answer
x < z > w # should be same as (3 < 4 and 4 > 12), False answer
(x < z) > y # should be same as (3 < 4 and True > 2)
y < x < z < w # 5-chain True answer
y < z < x < w # 5-chain False answer
y < z < x + 2 < w # 5-chain True answer, nested expression
w & (2 ** 4) != 0
|
from sys import argv
def print_even_odd_zero(d):
if d == 0:
print("I'm Zero.")
elif d % 2:
print("I'm Odd.")
else:
print("I'm Even.")
try:
if len(argv) == 1:
pass
elif len(argv) > 2:
raise Exception
else:
d = int(argv[1])
print_even_odd_zero(d)
except Exception:
print('ERROR')
|
import time
from enum import Enum
from typing import Dict, Optional, Union
class StateChange(Enum):
BECAME_OPEN = 'became_open'
BECAME_THROTTLED = 'became_throttled'
STILL_THROTTLED = 'still_throttled'
STILL_OPEN = 'still_open'
STATE_CHANGE_MAP = {
# Key: (did change, current state)
(True, True): StateChange.BECAME_OPEN,
(True, False): StateChange.BECAME_THROTTLED,
(False, True): StateChange.STILL_OPEN,
(False, False): StateChange.STILL_THROTTLED,
}
class Rate:
__slots__ = ('rate', 'period', 'rate_per_period')
def __init__(self, rate: int, period: Union[float, int] = 1) -> None:
self.rate = float(rate)
self.period = float(period)
if self.rate < 0:
raise ValueError(f'`rate` must be >= 0 (not {self.rate!r})')
if self.period <= 0:
raise ValueError(f'`period` must be > 0 (not {self.period!r})')
self.rate_per_period = (self.rate / self.period)
def __repr__(self) -> str:
return f'<Rate {self.rate:f} per {self.period:f}>'
class TickResult:
"""
The result of a `.tick()` operation.
If the `.state` value is True, the event should be allowed;
if it is False, the event should not be allowed.
(As a shorthand, `TickResult`s are truthy iff `.state` is True.)
To find out whether the `.tick()` operation caused the state to change,
and how, the `.did_change` value can be accessed; to find out the exact
change state (as a `StateChange` value), it's available as `.state_change`.
"""
__slots__ = ('state', 'did_change')
def __init__(self, state: bool, did_change: bool) -> None:
self.state = bool(state)
self.did_change = bool(did_change)
@property
def state_change(self) -> StateChange:
return STATE_CHANGE_MAP[(self.did_change, self.state)]
def __bool__(self) -> bool:
return self.state
def __repr__(self) -> str:
state_text = ('throttled' if not self.state else 'open')
return f'<TickResult: {state_text} (change: {self.state_change})>'
class RateLimiter:
"""
A token bucket -based rate limiter.
See: https://en.wikipedia.org/wiki/Token_bucket
Loosely based on https://stackoverflow.com/a/668327/51685
"""
#: The clock to use for RateLimiters. Should return seconds (or whatever is
#: the `period` of the RateLimiter) as a floating-point number.
#: By default, the high-resolution performance counter is used.
#: This can be overwritten, or overridden in subclasses.
clock = (time.perf_counter if hasattr(time, 'perf_counter') else time.time)
__slots__ = ('rate', 'allow_underflow', 'last_check', 'allowance', 'current_state')
def __init__(self, rate: Rate, allow_underflow: bool = False) -> None:
"""
:param rate: The Rate for this RateLimiter.
:param allow_underflow: Whether to allow underflow for the limiter, i.e.
whether subsequent ticks during throttling may cause
the "token counter", as it were, to go negative.
"""
self.rate = rate
self.allow_underflow = bool(allow_underflow)
self.last_check = None # type: Optional[float]
self.allowance = None # type: Optional[float]
self.current_state = None # type: Optional[bool]
@classmethod
def from_per_second(cls, per_second: int, allow_underflow: bool = False) -> "RateLimiter":
return cls(rate=Rate(rate=per_second), allow_underflow=allow_underflow)
def _tick(self) -> bool:
# https://github.com/python/mypy/issues/6910
current = self.clock() # type: float # type: ignore[misc]
if self.current_state is None:
self.last_check = current
self.allowance = self.rate.rate
self.current_state = None
last_check = self.last_check # type: float # type: ignore[assignment]
time_passed = current - last_check
self.last_check = current
self.allowance += time_passed * self.rate.rate_per_period # type: ignore[operator]
self.allowance = min(self.allowance, self.rate.rate) # Do not allow allowance to grow unbounded
throttled = (self.allowance < 1)
if self.allow_underflow or not throttled:
self.allowance -= 1
return (not throttled)
def reset(self) -> None:
"""
Reset the rate limiter to an open state.
"""
self.current_state = self.allowance = self.last_check = None
def tick(self) -> TickResult:
"""
Tick the rate limiter, i.e. when a new event should be processed.
:return: Returns a TickResult; see that class's documentation for information.
"""
new_state = self._tick()
if self.current_state is None:
self.current_state = new_state
did_change = (new_state is not self.current_state)
self.current_state = new_state
return TickResult(new_state, did_change)
def __repr__(self) -> str:
state_text = ('throttled' if not self.current_state else 'open')
return f'<RateLimiter {state_text} (allowance {self.allowance}, rate {self.rate})>'
class MultiRateLimiter:
"""
Wraps multiple RateLimiters in a map.
"""
rate_limiter_class = RateLimiter
allow_underflow = False
def __init__(self, default_limit: Rate, per_name_limits: Optional[Dict[str, Rate]] = None) -> None:
self.limiters = {} # type: Dict[str, RateLimiter]
self.default_limit = default_limit
self.per_name_limits = dict(per_name_limits or {})
assert isinstance(default_limit, Rate)
assert all(isinstance(l, Rate) for l in self.per_name_limits.values())
def tick(self, name: str) -> TickResult:
"""
Tick a named RateLimiter.
:param name: Name of the limiter.
:return: TickResult for the limiter.
"""
return self.get_limiter(name).tick()
def reset(self, name: str) -> bool:
"""
Reset (i.e. delete) a named RateLimiter.
:param name: Name of the limiter.
:return: True if the limiter was found and deleted.
"""
return bool(self.limiters.pop(name, None))
def get_limiter(self, name: str) -> RateLimiter:
"""
Get (or instantiate) a named RateLimiter.
:param name: Name of the limiter.
"""
limiter = self.limiters.get(name)
if not limiter:
limiter = self.limiters[name] = self.rate_limiter_class(
rate=self.get_rate(name),
allow_underflow=self.allow_underflow,
)
return limiter
def get_rate(self, name: str) -> Rate:
"""
Get the RateLimit for a named RateLimiter.
This function is a prime candidate for overriding in a subclass.
:param name: Name of the limiter.
"""
return self.per_name_limits.get(name, self.default_limit)
|
#!/usr/bin/env python
import os
import pprint
# ------------------------------------------------------------------------------
#
# This example demonstrates various utilities to inspect, print, trigger stack
# traces and exceptions with RU.
#
# ------------------------------------------------------------------------------
import radical.utils as ru
# ------------------------------------------------------------------------------
# helper method
def raise_something():
print('%s wants an exception' % ru.get_caller_name())
raise RuntimeError('oops')
# ------------------------------------------------------------------------------
# print current stack trace
def inner_1(arg_1, arg_2): # pylint: disable=W0613
ru.print_stacktrace()
# ------------------------------------------------------------------------------
# get currenet stack trace as list (to store to disk or print or whatever)
def inner_2(arg_1, arg_2): # pylint: disable=W0613
st = ru.get_stacktrace()
pprint.pprint(st)
# ------------------------------------------------------------------------------
# print an exception trace, pointint to the origin of an exception
def inner_3(arg_1, arg_2): # pylint: disable=W0613
try:
raise_something()
except Exception:
ru.print_exception_trace()
# ------------------------------------------------------------------------------
# print the name of the calling class and method
def inner_4(arg_1, arg_2): # pylint: disable=W0613
print(ru.get_caller_name())
# ------------------------------------------------------------------------------
# trigger exception for integration testing etc.
def inner_5(arg_1, arg_2): # pylint: disable=W0613
os.environ['RU_RAISE_ON_TEST'] = '3'
for i in range(10):
print(i)
ru.raise_on('test')
print()
os.environ['RU_RAISE_ON_RAND'] = 'RANDOM_10'
for i in range(100):
try:
ru.raise_on('rand')
except Exception:
print('raised on %d' % i)
# ------------------------------------------------------------------------------
#
def outer(arg):
print('--------------------------------')
inner_1(arg, 'bar')
print('--------------------------------')
inner_2(arg, 'bar')
print('--------------------------------')
inner_3(arg, 'bar')
print('--------------------------------')
inner_4(arg, 'bar')
print('--------------------------------')
inner_5(arg, 'bar')
print('--------------------------------')
# ------------------------------------------------------------------------------
#
if __name__ == '__main__':
outer('foo')
# ------------------------------------------------------------------------------
|
"""Fitting Models to Fit data with."""
import numpy as np
from scipy.integrate import odeint
from scipy.special import erf, erfc
from scipy.stats import norm, skewnorm
from iminuit import Minuit, describe
import sys
import yaml
import logging
from .utils.static import sfgn
thismodule = sys.modules[__name__]
logger = logging.getLogger(__name__)
flatten = lambda l: [item for sublist in l for item in sublist]
#logger.setLevel(logging.DEBUG)
def read_fit_results(fname):
with open(fname) as ifile:
fit_results = yaml.load(ifile)
return fit_results
def model_fit_record(
model,
record,
kwargs_select_y,
kwargs_select_x,
kwargs_select_yerr=None,
kwargs_model=None,
run=False,
):
"""Make a model using selected data from SfgRecrod.
**Parameters**:
- **model**: String, Class name of the model to use
- **record**: SfgRecord obj to select data from
- **kwargs_ydata**: kwargs to select y data with
- **kwargs_xdata**: kwargs to select x data with
- **kwargs_model**: kwargs to pass to model
- **kwargs_yerr**: kwargs to select yerr with
**Keywords:**
- **run**: Actually run the fit
**Returns:**
A model obj for the fit.
"""
if not kwargs_model:
kwargs_model = {}
#if not kwargs_select_yerr:
# raise NotImplementedError('Models without errorbar not implemented yet')
logger.debug('Selecting model data from record with:')
logger.debug('ydata :{}'.format(kwargs_select_y))
xdata = record.select(**kwargs_select_x).squeeze()
ydata = record.select(**kwargs_select_y).squeeze()
if not kwargs_select_yerr:
yerr = np.ones_like(ydata)
else:
yerr = record.sem(**kwargs_select_yerr).squeeze()
logger.debug('Setting model with:')
logger.debug('xdata: {}'.format(xdata))
logger.debug('ydata: {}'.format(ydata))
logger.debug('yerr: {}'.format(yerr))
logger.debug('kwargs_module: {}'.format(kwargs_model))
model = getattr(thismodule, model)(xdata, ydata, yerr, **kwargs_model)
if run:
fit_model(
model, # print_matrix=print_matrix
)
return model
def make_model_fit(
model,
xdata,
ydata,
yerr=None,
fit=False,
print_matrix=True,
model_kwargs={}
):
"""Generig interface for model fits.
**Arguments:**
- **model**: String name of model class
- **xdata**: x-data to the model
- **ydata**: y-data to model
**Keywords:**
- **yerr**: yerr to model
- **fit**: boolean weather to run the fit
- **model_kwargs**: Keywords passed to model during creation
"""
model = getattr(thismodule, model)(xdata, ydata, yerr, **model_kwargs)
if fit:
fit_model(
model, print_matrix=print_matrix
)
return model
def fit_model(model, minos=False, print_matrix=True):
"""Function to run migrad minimizations.
**Arguments:**
- **model**: Model instance to run migrad of.
**Keywords:**
- **minos**: Boolean, If Errors should be calculated with minos.
Slow but more precise error estimation of the fit parameters.
"""
model.minuit.migrad() # Run the minimization
if minos:
model.minuit.minos()
model.minuit.migrad()
if print_matrix:
try:
model.minuit.print_matrix()
except ValueError:
pass
def normalize_trace(model, shift_mu=False, scale_amp=False, shift_heat=False, scale_x=None):
"""Normalize trace.
model: model to work on
shift_mu: Schift by mu value of fit
scale_amp: Scale by realtive height between heat and minimum.
shift_heat: Make heat value equal
returns shiftet data arrays with:
"""
mu = 0
if shift_mu:
mu = model.minuit.fitarg['mu']
offset = 0
if shift_heat:
offset = 1-model.yfit_sample[-1]
scale = 1
if scale_amp:
x_mask = np.where((model.xsample-mu>0) & (model.xsample-mu<1000))
scale = 1-offset-model.yfit_sample[x_mask].min()
xdata = model.xdata - mu
ydata = (model.ydata+offset-1)/scale+1
yerr = model.yerr/scale
xsample = model.xsample - mu
yfit_sample = (model.yfit_sample+offset-1)/scale+1
if scale_x:
xdata = scale_x * xdata
xsample = scale_x * xsample
return xdata, ydata, yerr, xsample, yfit_sample
class Fitter():
def __init__(
self,
xdata=None,
ydata=None,
sigma=None,
fitarg={},
box_coords=None,
roi=None,
name='',
ignore_errors=False,
**kwargs
):
"""Base Class to fit with Minuit.
- **ignore_errors**:
Optional if given, sigmas will get ignored during the fit.
**fitarg**: Dictionary gets passed to minuit.
and sets the starting parameters.
**kwargs:**
Get passed to minuit. Most important is
"""
self.xdata = xdata
self.ydata = ydata
self.sigma = sigma # y-errors.
self.cov = None # The covariance of the fit
# Coordinates of the fit result box in fig coordinates
self._box_coords = box_coords
self._box_str_format = '{:2}: {:8.3g} $\pm$ {:6.1g}\n'
if not roi:
roi = slice(None)
self.roi = roi
self._pnames = None
self._xsample_num = 400
self.name = name
self.ignore_errors=ignore_errors
# Buffer for figures
self.figures = {}
kwargs.setdefault('pedantic', False)
# Minuit is used for fitting. This makes self.chi2 the fit function
#logger.info(self.chi2)
#kwargs['forced_parameters'] = self.parameters
logger.info(fitarg)
logger.info(kwargs)
self.minuit = Minuit(self.chi2, **fitarg, **kwargs)
def _setup_fitter_kwargs(self, fitarg, kwargs=None):
"""Setup initial fitter kwargs
Use this to pass default fitargs and parameter names to Minuit.
This allows to initialize a Model class with only a fitfunc and no
boilerplate chi2 function.
"""
# This gurantees correct oder and names of fitparameters
# we start at 1 because running value (x or t) must be skipped
self.parameter_names = describe(self.fit_func)[1:]
# The oder of parameters is important
fitarg['forced_parameters'] = self.parameter_names
if not kwargs:
kwargs = {}
if not kwargs.get('fitarg'):
kwargs['fitarg'] = {}
kwargs['fitarg'] = {**fitarg, **kwargs['fitarg']}
# DODO add check that fitargs and parameter_names fit together
return kwargs
def chi2(self, *args, **kwargs):
"""Sum of distance of data and fit. Weighted by uncertainty of data."""
return np.sum(
(
(self.ydata - self.fit_func(self.xdata, *args, **kwargs)) /
self.sigma
)**2
)
def fit_func(self):
"""Fit function that must be implemented by child classes."""
raise NotImplementedError
@property
def parameters(self):
return describe(self.fit_func)[1:]
@property
def box_coords(self):
"""Coordinades for the fit results box."""
if not self._box_coords:
return self.xdata.mean(), self.ydata.mean()
return self._box_coords
def draw_text_box(self, box_coords=None, **kwargs):
"""Draw a textbox on current axes."""
from matplotlib.pyplot import text
if not box_coords:
box_coords = self.box_coords
text(*box_coords, self.box_str, **kwargs)
@property
def p(self):
"""Parameters of the Fit."""
#return self.minuit.args
return [self.minuit.fitarg[param] for param in self.minuit.parameters]
@property
def box_str(self):
"""String to place on the plot. Showing Fit Statistics."""
text = ''
for name, value in zip(self.minuit.parameters, self.minuit.args):
text += self._box_str_format.format(
name, value, self.minuit.errors[name]
)
return text
@property
def xdata(self):
"""X data for the fit."""
return self._xdata[self.roi]
@xdata.setter
def xdata(self, value):
if len(np.shape(value)) != 1:
raise IndexError('Shappe if xdata is not of dim 1')
self._xdata = value
@property
def ydata(self):
"""Y data for the fit."""
return self._ydata[self.roi]
@ydata.setter
def ydata(self, value):
if len(np.shape(value)) != 1:
raise IndexError('Shappe if ydata is not of dim 1')
self._ydata = value
@property
def sigma(self):
"""Error of the ydata for the fit."""
if isinstance(self._sigma, type(None)):
return np.ones_like(self.ydata)
if self.ignore_errors:
return np.ones_like(self.ydata)
return self._sigma[self.roi]
@sigma.setter
def sigma(self, value):
self._sigma = value
if isinstance(value, type(None)):
self._sigma = np.ones_like(self._ydata)
elif len(np.shape(value)) != 1:
raise IndexError('Shappe of yerr is not of dim 1')
if np.any(value==0):
pos = np.where(value==0)
#replace = np.nanmedian(value)
logger.warn('Zero value within ucertainty.')
logger.warn('Zero Values @ {}'.format(pos))
#logger.warn('Replacing error with {}'.format(replace))
#logger.warn('Errors passed were {}'.format(value))
#self._sigma = np.ones_like(self._ydata)
#self.ignore_errors = True
@property
def yerr(self):
"""Error of the ydata for the fit."""
return np.array(self.sigma)
def fit_res(self, x):
"""Fit function wit fit result parameters"""
return self.fit_func(x, *self.p)
@property
def x_edges(self):
"""Edges of the x data of the fit."""
return self.xdata[0], self.xdata[-1]
@property
def y_edges(self):
"""Edges of the y data of the fit."""
return self.ydata[0], self.ydata[-1]
@property
def xsample(self):
"""A sampled version of the xdata. `Fitter._xsample_num` is the number
of samples.
`Fitter.xsample` is used to generate a smooth plot of the fitting curve.
"""
return np.linspace(self.xdata[0], self.xdata[-1], self._xsample_num)
@property
def ysample(self):
"""Y vales of the fit function sampled with `Fitter.xsample`."""
return self.yfit_sample
@property
def y_fit(self):
"""Y vales of the fit function sampled with `Fitter.xsample`."""
return self.yfit_sample
@property
def yfit_sample(self):
"""Y vales of the fit function sampled with `Fitter.xsample`."""
return self.fit_res(self.xsample)
@property
def fitarg(self):
"""Minuit fitargs."""
return self.minuit.fitarg
def plot(self, kwargs_data=None, kwargs_fit=None):
"""Function to easily look at a plot. Not very flexible. But usefull during
interactive sessions.
"""
import matplotlib.pyplot as plt
if not kwargs_data:
kwargs_data = {}
kwargs_data.setdefault('x', self.xdata)
kwargs_data.setdefault('y', self.ydata)
kwargs_data.setdefault('yerr', self.yerr)
kwargs_data.setdefault('fmt', 'o')
if not kwargs_fit:
kwargs_fit = {}
kwargs_fit.setdefault('x', self.xsample)
kwargs_fit.setdefault('y', self.ysample)
kwargs_fit.setdefault('color', 'r')
plt.errorbar(**kwargs_data)
plt.plot(**kwargs_fit)
def save(fpath):
"""Save fit."""
with open(fpath, 'w') as ofile:
yaml.dump(
self.fitarg
)
@property
def kwargs(self):
"""Dict containing the most important kwargs of the Model."""
return {
'xdata' : self.xdata.tolist(),
'ydata' : self.ydata.tolist(),
'sigma': self.sigma.tolist(),
'fitarg' : self.fitarg,
}
@property
def dict(self):
"""Dict containing class name and most important kwargs."""
return {
'name': self.__class__.__name__,
'module': self.__module__,
'kwargs': self.kwargs
}
class GaussianModelM(Fitter):
def __init__(self, *args, **kwargs):
''' Fit Gausian model using Minuit.
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
'''
kwargs = self._setup_fitter_kwargs(
{'A': 1, 'mu':0, 'sigma': 1, 'c': 0,},
kwargs
)
Fitter.__init__(self, *args, **kwargs)
self._box_str_format = '{:5}: {:7.3g} $\\pm$ {:6.1g}\n'
def fit_func(self, x, A, mu, sigma, c):
"""Guassian function
A: amplitude
mu: position
sigma: std deviation
c : offset
"""
# Minuit passes negative values for sigma
# and these values lead to failures of the fitting
if sigma < 0:
return 0
return A * norm.pdf(x, mu, sigma) + c
class GaussianModelN(Fitter):
def __init__(self, *args, parameter_dict=None, **kwargs):
''' Fit Gausian model using Minuit.
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **parameter_dict**: Dict of parameters for gaussian fit.
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
'''
self._parameter_names = None
self._parameter_dict_fitarg = None
self._pmn = ['A', 'mu', 'sigma', 'c']
#Numberfy params
if not parameter_dict:
raise NotImplementedError('Must have parameter dict currently')
self.parameter_dict = parameter_dict
if not kwargs:
kwargs = {}
kwargs['forced_parameters'] = self.parameter_names
kwargs['fitarg'] = self.parameter_dict_fitarg
Fitter.__init__(self, *args, **kwargs)
self._box_str_format = '{:5}: {:7.3g} $\\pm$ {:6.1g}\n'
@property
def parameter_names(self):
if isinstance(self._parameter_names, type(None)):
ret = []
for name in self._pmn:
pos = 0
for value in self.parameter_dict[name]:
ret.append('%s%d'%(name, pos))
pos += 1
self._parameter_names = ret
return self._parameter_names
@property
def parameter_dict_fitarg(self):
"""Creates a numbered dictionary that can be used as fitargs
dict to create the fit function."""
if isinstance(self._parameter_dict_fitarg, type(None)):
ret = {}
for pm in self._pmn:
values = self.parameter_dict[pm]
pos = 0
for value in values:
ret['%s%d'%(pm,pos)] = value
pos += 1
self._parameter_dict_fitarg = ret
return self._parameter_dict_fitarg
def _params_from_parameter_dict(self):
ret = []
for name in self._parameter_names:
[ret.append(value) for value in self.parameter_dict[name]]
return np.array(ret)
def fit_func(self, x, *params):
"""
Gaussian functions.
Pass parameters as list. Sorting of parameters is:
A0, A1,.. mu0, mu1,... sigma0, sigma1,....c0,c1,....
"""
# Minuit passes negative values for sigma
# and these values lead to failures of the fitting
i = len(params)//4
pparams = np.reshape(params, (4, i)).T
ret = np.zeros_like(x)
for _p in pparams:
ret += self._gaussian(x, *_p)
return ret
def _gaussian(self, x, A, mu, sigma, c):
"""Gaussian function"""
if sigma < 0:
return 0
return A * norm.pdf(x, mu, sigma) + c
class LorenzianModel(Fitter):
"""
N-Lorenzian Peaks and Non Resonant background to fit SFG
Spectra with.
"""
def __init__(self, *args, n_lorenzians=1, **kwargs):
# Must definde forced_parameters because iminuits parameter auto
# discovery fails for sfgn as fit function
self.n_lorenzians = n_lorenzians
_fitarg = {k: 0 for k in flatten([('amp_%i'%i, 'pos_%i'%i, 'width_%i'%i) for i in range(n_lorenzians)])}
_fitarg = {'nr': 0, 'phase': 0, **_fitarg}
self.parameter_names = list(_fitarg.keys())
kwargs['forced_parameters'] = self.parameter_names
# If no fitargs is defined, we define a minimum set and use
# sane parameter defaults
# This has a problem if n_lorenzians is wrong. Currently the user
# has to take care to use it correctly
fitarg = kwargs.get('fitarg')
if not fitarg:
kwargs['fitarg'] = _fitarg
Fitter.__init__(self, *args, **kwargs)
def fit_func(self, x, *args, **kwargs):
return sfgn(x, *args, **kwargs)
@property
def kwargs(self):
"""n_lorenzians is needed for model to work."""
ret = super().kwargs
ret['n_lorenzians'] = self.n_lorenzians
return ret
class SkewedNormal(Fitter):
def __init__(self, *args, **kwargs):
Fitter.__init__(self, *args, **kwargs)
self._box_str_format = '{:5}: {:7.3g} $\\pm$ {:6.1g}\n'
def fit_funct(self, x, A, mu, sigma, kurt, c):
return A * skewnorm.pdf(x, kurt, mu, sigma) + c
class FourLevelMolKinM(Fitter):
def __init__(
self,
*args,
gSigma=150,
N0=[1, 0, 0, 0],
rtol=1.09012e-9,
atol=1.49012e-9,
full_output=True,
**kwargs
):
"""4 Level Model Fitter.
To use set following `kwargs`
`xdata`, `ydata` and `fitarg`. Optinal pass `sigma` for y errors.
**Arguments:**
- **N0**: Boundary condition of the DGL
- **rtol**: Precision parameter of the DGL
- **atol**: Precision parameter of the DGL
- **full_output**: Weather to get full_output of the DGL Solver.
Usefull for debugging. atol and rtol
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
"""
self.gSigma = gSigma # width of the excitation
self.rtol = rtol # Precition of the numerical integrator.
self.atol = atol
# Starting conditions of the Populations, not to be confuesed with starting conditions of the plot
self.N0 = N0
self.full_output = full_output
self.infodict = None # Infodict return of the Odeint.
kwargs = self._setup_fitter_kwargs(
{'s': 1, 't1': 1, 't2': 0.7, 'c': 1, 'mu': 0,},
kwargs
)
Fitter.__init__(self, *args, **kwargs)
def ext_gaus(self, t, mu, sigma):
"""Gausian excitation function.
Due to historic reasons its not a strict gausian, but something
very cloe to it. The Igor Code is:
1/sqrt(pi)/coeff1*exp(-(coeff0-x)^2/coeff1^2)
The here wanted sigma is sqrt(2)*sigma of a normal gaussian
and then its also normalized. If you have FWHM, then sigma
is sigma = FWHM/(2*sqrt(log(2)))
"""
return 1 / np.sqrt(np.pi) / sigma * np.exp(-((mu-t)/sigma)**2)
# The Physical Water model
def dgl(self, N, t, ext_func, s, t1, t2):
"""Dgl of the 4 Level DGL system.
**Arguments:**
- **N**: deg 4 array
Population of the 4 levels respectively
- **t**: float
time
- **ext_func**: exictation function in time.
Time profile of the pump laser.
Function of t. Usaully a gaussian function.
- **s**: scaling factor of the pump laser.
- **t1**: Time constant of first level
- **t2**: Time constant of second level.
**Returns:**
Derivatives of the system. As 4 dim array.
"""
# This is the DGL written as a Matrix multiplication.
# dNdt = A x N
# A is the constructing matrix of the DGL
# and N is a 4-level vector with (N0, N1, N2, N3)
# as the population of the states at time t.
# dNdt is the state wise derivative of N
# See https://en.wikipedia.org/wiki/Matrix_differential_equation
A = np.array([
[-s * ext_func(t), s * ext_func(t), 0, 0],
[s * ext_func(t), -s * ext_func(t) - 1/t1, 0, 0],
[0, 1 / t1, -1 / t2, 0],
[0, 0, 1 / t2, 0],
], dtype=np.float64)
dNdt = A.dot(N)
return dNdt
def fit_func(self, t, s, t1, t2, c, mu):
"""
Function we use to fit.
**Arguments:**
- **t**: time
- **s**: Gaussian Amplitude
- **t1**: Livetime of first state
- **t2**: livetime of second(intermediate) state
- **c**: Coefficient of third(Heat) state
- **mu**: Position of pump pulse, the zero.
**Returns**
The bleach of the water model
and the Matrix with the populations"""
N = self.population(
t,
lambda t: self.ext_gaus(t, mu, self.gSigma),
s,
t1,
t2
).T
return ((N[0] - N[1]+ N[2] + c * N[3])**2) / (self.N0[0]**2)
def population(self, t, *args, **kwargs):
"""Numerical solution to the 4 Level DGL-Water system.
**Arguments:**
- **t**: array of time values
**Args**:
Arguments of the dgl function
- **ext_func**: Function of excitation.
- **s**: scalar factor for the pump
- **t1**: Live time of the first exited state
- **t2**: livetime of the intermediate state.
**kwargs**:
Get passe to differential equation solver odeing
**Returns**
(len(t), 4) shaped array with the 4 entires beeing the population
of the N0 t0 N3 levels of the system
"""
ret = odeint(
func=self.dgl, # the DGL of the 4 level water system
y0=self.N0, # Starting conditions of the DGL
t=t, # Time as parameter
args=args, # Aguments of the dgl
# Dfun=self.jac, # The Jacobean of the DGL. Its optional.
# The precisioin parameter for the nummerical DGL solver.
rtol=self.rtol,
atol=self.atol,
full_output=self.full_output,
**kwargs,
)
if self.full_output:
ret, self.infodict = ret
return ret
def jac(self, N, t, ext_func, s, t1, t2):
"""Jacobean matrix of the DGL."""
# In this case the Jacobean Matrix is euqal the
# Consturcting matrix of the DGL.
# So it doesn't help much. It just speeds up the thing
# a little.
A = np.array([
[-s * ext_func(t), s * ext_func(t), 0, 0],
[s * ext_func(t), -s * ext_func(t) - 1/t1, 0, 0],
[0, 1 / t1, -1 / t2, 0],
[0, 0, 1 / t2, 0],
], dtype=np.float64)
return A
def fit_populations(self, t):
s, t1, t2, c1, mu = self.p
return self.population(
t,
lambda t: self.ext_gaus(t, mu, self.gSigma),
s,
t1,
t2
)
def start_population(self, t):
s, t1, t2, c1, mu = self.p0
return self.population(
t,
lambda t: self.ext_gaus(t, mu, self.gSigma),
s,
t1,
t2
)
def save(self, fname):
"""Save FourLevelMolKin results."""
parameter_dict = {
'gSigma': self.gSigma,
'rtol': self.rtol,
'atol': self.atol,
'N0': self.N0,
}
super().__save__(fname, parameter_dict)
class Crosspeak(FourLevelMolKinM):
def __init__(
self,
*args,
N0=[1, 0, 0, 0, 0],
**kwargs
):
"""4 Level Model based crosspeak fitter.
"""
FourLevelMolKinM.__init__(self, *args, N0=N0, **kwargs)
def matrix(self, t, t1, teq, tup, tdown, ext_func, s):
"""Matrix to construct the DGL"""
return np.array([
[-s * ext_func(t), -s * ext_func(t), 0, 0, 0],
[s * ext_func(t), -s * ext_func(t)-1/tup-1/t1, 1/tdown, 0, 0],
[0, 1/tup, -1/tdown, 0, 0],
[0, 1/t1, 0, -1/teq, 0],
[0, 0, 0, 1/teq, 0]
], dtype=np.float64)
def dgl(self, N, *args):
"""Matrix form of the DGL"""
dNdt = self.matrix(*args).dot(N)
return dNdt
def fit_func(self, t, t1, teq, tup, tdown, mu, gSigma, s, c):
"""Function that is used for fitting the data.
"""
N = self.population(
t,
t1,
teq,
tup,
tdown,
lambda t: self.ext_gaus(t, mu, gSigma),
s,
).T
# On Pump vibration
y0 = (N[0] + c * N[3] - N[1])**2 / self.N0[0]**2
# Off Pump vibration
y1 = (N[0] + c * N[3] - N[2])**2 / self.N0[0]**2
# Fit function is two dimensional because input data consist of two
# traces.
return np.array([y0, y1])
@property
def ydata(self):
return self._ydata
@ydata.setter
def ydata(self, value):
self._ydata = np.array(value)
@property
def sigma(self):
"""Error of the ydata for the fit."""
if isinstance(self._sigma, type(None)):
return np.ones_like(self.ydata)
if self.ignore_errors:
return np.ones_like(self.ydata)
return self._sigma[self.roi]
@sigma.setter
def sigma(self, value):
if isinstance(value, type(None)):
self._sigma = np.ones_like(self._ydata)
if np.any(value == 0):
raise ValueError('Cant handle 0 errors')
from warnings import warn
warn('Passed uncertainty has a 0 value\nIgnoring errorbars.\n{}'.format(value))
self._sigma = value
self.ignore_errors = True
self._sigma = value
class SingleLifetime(Fitter):
def __init__(
self,
*args,
fit_func_dtype=np.float64,
**kwargs
):
"""Fitting Model with convolution of single exponential and gaussian.
**Arguments**:
- **xsample**: Optional
Stepping size of the convolution. Default minimal
difference of xdata and in the range of xdata.
- **xsample_ext**: Boundary effects of the convolution make int necesarry to,
add additional Datapoints to the xsample data. By default 10% are
added.
- **fit_func_dtype**: The exponential function in the fitfunc can become
very huge. To cope with that one can set the dtype of the fit func.
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
"""
self.fit_func_dtype = fit_func_dtype
kwargs = self._setup_fitter_kwargs(
{'A': 1, 't1':1000, 'c': 0, 'mu': 0, 'sigma': 200 },
kwargs
)
Fitter.__init__(self, *args, **kwargs)
def fit_func(self, t, A, t1, c, mu, ofs, sigma):
"""Result of a convolution of Gausian an exponential recovery.
This function is the Analytically solution to the convolution of:
f = (- A*exp(-t/tau) + c)*UnitStep(t)
g = Gausian(t, mu, sigma)
result = Convolve(f, g)
**Arguments:**
- **t**: array of times
- **A**: Amplitude of the recovery
- **t1**: Livetime of the recovery
- **c**: Convergence of the recovery
- **mu**: Tempoaral Position of the Pulse
- **ofs**: Global offset factor
- **sigma**: Width of the gaussian
"""
## This dtype hack is needed because the exp cant get very large.
return 1/2 * (
c + c * erf((t - mu)/(np.sqrt(2) * sigma)) -
A * np.exp(((sigma**2 - 2 * t * t1 + 2 * mu * t1)/(2 * t1**2)),
dtype=self.fit_func_dtype) *
erfc((sigma**2 - t * t1 + mu * t1)/(np.sqrt(2) * sigma * t1))
) + ofs
class ThreeLevelMolkin(Fitter):
def __init__(
self,
*args,
gSigma=150,
N0=[1, 0, 0],
rtol=1.09012e-9,
atol=1.49012e-9,
full_output=True,
**kwargs
):
"""
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
"""
Fitter.__init__(self, *args, **kwargs)
self.gSigma = gSigma # width of the excitation
self.rtol = rtol # Precition of the numerical integrator.
self.atol = atol
# Starting conditions of the Populations, not to be confuesed with starting conditions of the plot
self.N0 = N0
self.full_output = full_output
self.infodict = None # Infodict return of the Odeint.
def ext_gaus(self, t, mu, sigma):
"""Gausian excitation function.
Due to historic reasons its not a strict gausian, but something
very cloe to it. The Igor Code is:
1/sqrt(pi)/coeff1*exp(-(coeff0-x)^2/coeff1^2) """
return 1 / np.sqrt(np.pi) / sigma * np.exp(-((mu-t)/sigma)**2)
def dgl(self, N, t, ext_func, s, t1):
"""DGL of the three level system.
Parameters
----------
N: deg 3 Array with initial populations of the levels
typically [1, 0, 0]
t: float
time
ext_func: excitation function of the laser. Typically a gaussian.
Function of t.
s: scaling factor of the pump laser.
t1: Livetime of the excited state.
Returns
-------
Dericatives of the system as 3dim array.
"""
A = np.array([
[-s*ext_func(t), s*ext_func(t), 0],
[s*ext_func(t), -s*ext_func(t) - 1/t1, 0],
[0, 1/t1, 0]
], dtype=np.float64)
dNdt = A.dot(N)
return dNdt
def population(self, t, ext_func, s, t1, **kwargs):
"""Nummerical solution of the DGL.
Parameters
----------
t: array if times
ext_func: excitation function. Depends on t.
s: scaling factor of the pump.
t1: livetime of the first state.
Returns
-------
Populations of the 3 levels at the times t.
"""
ret = odeint(
func=self.dgl, # the DGL of the 3 level water system
y0=self.N0, # Starting conditions of the DGL
t=t,
args=(ext_func, s, t1),
rtol=self.rtol,
atol=self.atol,
full_output=self.full_output,
**kwargs,
)
if self.full_output:
ret, self.infodict = ret
return ret
def fit_populations(self, t):
s, t1, c1, mu = self.p
return self.population(
t,
lambda t: self.ext_gaus(t, mu, self.gSigma),
s,
t1,
)
def fit_func(self, t, s, t1, c, mu):
"""
Function we use to fit.
parameters
----------
t: time
s: Gaussian Amplitude
t1: Livetime of first state
c: Coefficient of third(Heat) state
scale: Scaling factor at the very end
Returns:
The bleach of the water model
and the Matrix with the populations"""
N = self.population(
t,
lambda t: self.ext_gaus(t, mu, self.gSigma),
s,
t1,
).T
return ((N[0] + c * N[2] - N[1])**2) / (self.N0[0]**2)
class TwoExponentials(Fitter):
def __init__(self, *args, **kwargs):
"""Two exponentials convoluted with gaussian. Dont use this. Its has
a causality problem.
"""
Fitter.__init__(self, *args, **kwargs)
self.N0 = [1, 0, 0, 0]
def fit_func(self, x, Amp, Aheat, t1, t2, offs, pwidth, mu):
"""Analytical solution to the four level system with gaussian excitation pulse."""
e1 = np.exp((0.5*((t2**-2.)*((pwidth**2)+((-2.*(x*t2))+(2.*(mu*t2)))))))
e2 = np.exp((0.5*((t1**-2.)*((pwidth**2)+((-2.*(x*t1))+(2.*(mu*t1)))))))
er_1 = ((((2.**-0.5)*(((pwidth**2)+(mu*t2))-(x*t2)))/t2)/pwidth)
er_2 = ((((2.**-0.5)*(((pwidth**2)+(mu*t1))-(x*t1)))/t1)/pwidth)
er_3 = (((2.**-0.5)*(x-mu))/pwidth)
aux0=(e1)*(erfc(er_1));
aux1=(e2)*(erfc(er_2));
aux2=Amp*(((offs+(offs*(erf(er_3))))-(Aheat*aux0))-aux1);
output=0.5*aux2+1
# Due to exp overflow, nans can occur.
# However they result in 1 because they happen at negative times.
output[np.isnan(output)] = 1
# +1 to have right population
return output
class FourLevel(Fitter):
"""Analytical Solution to the 4 Level Model.
The Conzept for the solution was taken from: (doi:10.1021/jp003158e) Lock,
A. J.; Woutersen, S. & Bakker, H. J.
"""
def __init__(self, *args, **kwargs):
# Autodiscovery of iminuit doesnt work with implicit
# variable definitions. Thus we must specify parameters
# and there names specifically. We also define some sane defalts,
# that should be updated by the user.
# The oder of the arguments matters, because
kwargs = self._setup_fitter_kwargs(
{'Amp': 1, 't1': 1, 't2': 0.7, 'c': 1, 'sigma':0.2, 'mu': 0,},
kwargs
)
Fitter.__init__(self, *args, **kwargs)
self.N = 1 # Number of initial oszillators.
def N(self, t, t1, t2, N10, N20=0, N30=0):
"""Populations of the solution to the 4 level model.
This is only true for t>0.
**Parameters:**
- **t**: Time points to calculated population of
- **t1**: Lifetime of first excited state
- **t2**: Lifetime of intermediate (heat) state
- **N10**: Fraction of initialy excited oszillators 0<N10<1
- **N20**: Fraction of initialy excited oszillators in heated state
- **N30**: Fraction if initialy excited oszillators in final state
**Returns:**
Tuple of N0, N1, N2, N3 at times t
"""
N1 = np.exp(((-t)/t1))*N10
aux0=(np.exp((((-t)/t2)-(t/t1))))*(((np.exp((t/t2)))-(np.exp((t/t1))))\
*(N10*t2));
N2=((np.exp(((-t)/t2)))*N20)+(aux0/(t1-t2));
aux0=(((np.exp(((t/t1)+(t/t2))))*t1)+((np.exp((t/t1)))*t2))-((np.exp((\
(t/t1)+(t/t2))))*t2);
aux1=((np.exp((((-t)/t2)-(t/t1))))*(N10*(aux0-((np.exp((t/t2)))*t1))))\
/(t1-t2);
N3=((np.exp(((-t)/t2)))*((-1.+(np.exp((t/t2))))*N20))+(N30+aux1);
N0 = self.N - N1 - N2 - N3
return N0, N1, N2, N3
def fit_func(self, t, Amp, t1, t2, c, mu, sigma):
"""Function for the time dependency of pump-probe sfg data.
Function is derived by analytically solving the 4 level system and
subsequent convolution with a gaussian excitation function of the
model. Initial state is N0=1. All other states are empty.
This exact implementation has a problem when t1==t2 exactly. Due to
numerical constrains this must be avoided.
If difference instead of ratio is used. The function keeps the same
due to the distributivity of the convolution and the fact that gaussian
convolved with -1 gives -1. Therefore only -1 needs to be subtract.
**Arguments**:
- **t**: Array of Time values. Usually given by experiment.
- **Amp**: Amplitude of the excitation pulse. Determines the fraction
of oscillators excited by the excitation pulse.
- **t1**: Lifetime of the first excited vibrational state in units of
**t**
- **t2**: Lifetime of the second excited vibrational state in units of
**t**
- **c**: Scaling factor of final (heated) state. Used to account for
spectral differences induced by residual heat.
- **mu**: Tempoaral position of pump pulse in units of **t**.
- **sigma**: Temporal width of pump pulse in units of **t**.
**Returns**
Modeled result as deduced from the 4 level system for the given array
of **t** time values.
"""
pi=np.pi;
#a0 = erf((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/sigma))
def mysqrt(x): return np.sqrt(x)
aux0=sigma*((t1**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux1=sigma*((t1**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux2=sigma*((t1**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux3=(((t1-t2)**2))*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma)))));
aux4=sigma*(t1*(t2*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma)))))));
aux5=sigma*(t1*(t2*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma)))))));
aux6=sigma*(t1*(t2*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma)))))));
aux7=sigma*((t2**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux8=sigma*((t2**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux9=sigma*((t2**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux10=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux11=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*((t1**2)*(-1.+(erf(aux10))))));
aux12=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux13=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux12)))))));
aux14=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux15=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux14)))))));
aux16=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux17=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux16))))));
aux18=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux19=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux18))))));
aux20=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux21=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux20)))))));
aux22=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux23=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux22)))))));
aux24=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux25=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux24))))));
aux26=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux27=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux26)))))));
aux28=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux29=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux28))))));
aux30=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux31=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux30))))));
aux32=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux33=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux32))))));
aux34=(0.5*((sigma**2)*(t1**-2.)))+((mu/t1)+((0.5*((sigma**2)*(t2**-2.\
)))+((mu/t2)+(((sigma**2)/t2)/t1))));
aux35=(((2.**-0.5)*mu)/sigma)+((((2.**-0.5)*sigma)/t1)+(((2.**-0.5)*\
sigma)/t2));
aux36=(mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf((aux35-(((2.**-0.5)*t)/sigma))))))));
aux37=(0.5*((sigma**2)*(t1**-2.)))+((mu/t1)+((0.5*((sigma**2)*(t2**-2.\
)))+((mu/t2)+(((sigma**2)/t2)/t1))));
aux38=(((2.**-0.5)*mu)/sigma)+((((2.**-0.5)*sigma)/t1)+(((2.**-0.5)*\
sigma)/t2));
aux39=(mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf((aux38-(((2.**-0.5)*t)/sigma)))))));
aux40=(0.5*((sigma**2)*(t1**-2.)))+((mu/t1)+((0.5*((sigma**2)*(t2**-2.\
)))+((mu/t2)+(((sigma**2)/t2)/t1))));
aux41=(((2.**-0.5)*mu)/sigma)+((((2.**-0.5)*sigma)/t1)+(((2.**-0.5)*\
sigma)/t2));
aux42=(mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf((aux41-(((2.**-0.5)*t)/sigma)))))));
aux43=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t2))-(((2.**-0.5)\
*t)/sigma);
aux44=(np.exp(((2.*((sigma**2)*(t2**-2.)))+(((2.*mu)/t2)+((-2.*t)/t2))\
)))*((mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux43))))));
aux45=t1*(t2*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t1))-(t*t1)))/t1)/\
sigma))));
aux46=(np.exp((0.5*((t1**-2.)*((sigma**2)+((2.*(mu*t1))+(-2.*(t*t1))))\
))))*((mysqrt((2.*pi)))*(sigma*aux45));
aux47=t1*(t2*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t1))-(t*t1)))/t1)/\
sigma))));
aux48=(np.exp((0.5*((t1**-2.)*((sigma**2)+((2.*(mu*t1))+(-2.*(t*t1))))\
))))*((mysqrt((2.*pi)))*(sigma*aux47));
aux49=(t2**2)*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t1))-(t*t1)))/t1)/\
sigma)));
aux50=(np.exp((0.5*((t1**-2.)*((sigma**2)+((2.*(mu*t1))+(-2.*(t*t1))))\
))))*((mysqrt((2.*pi)))*(sigma*aux49));
aux51=t1*(t2*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t2))-(t*t2)))/t2)/\
sigma))));
aux52=(np.exp((0.5*((t2**-2.)*((sigma**2)+((2.*(mu*t2))+(-2.*(t*t2))))\
))))*((mysqrt((2.*pi)))*(sigma*aux51));
aux53=(t2**2)*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t2))-(t*t2)))/t2)/\
sigma)));
aux54=(np.exp((0.5*((t2**-2.)*((sigma**2)+((2.*(mu*t2))+(-2.*(t*t2))))\
))))*((mysqrt((2.*pi)))*(sigma*aux53));
aux55=(3.*(Amp*aux46))+((Amp*(c*aux48))+((-2.*(Amp*aux50))+((Amp*(c*\
aux52))+(Amp*aux54))));
aux56=(-2.*((Amp**2)*(c*((np.exp(((aux40-(t/t2))-(t/t1))))*aux42))))+(\
((Amp**2)*(c*aux44))+aux55);
aux57=((Amp**2)*((c**2)*((np.exp(((aux34-(t/t2))-(t/t1))))*aux36)))+((\
2.*((Amp**2)*((np.exp(((aux37-(t/t2))-(t/t1))))*aux39)))+aux56);
aux58=((Amp**2)*aux29)+((-2.*((Amp**2)*(c*aux31)))+(((Amp**2)*((c**2)*\
aux33))+aux57));
aux59=(2.*((Amp**2)*(c*aux23)))+((-2.*((Amp**2)*aux25))+((2.*((Amp**2)\
*(c*aux27)))+aux58));
aux60=(-2.*((Amp**2)*aux17))+((2.*((Amp**2)*(c*aux19)))+((2.*((Amp**2)\
*aux21))+aux59));
aux61=((Amp**2)*((c**2)*aux11))+((3.*((Amp**2)*aux13))+((-2.*((Amp**2)\
*(c*aux15)))+aux60));
aux62=((Amp**2)*((c**2)*((mysqrt((0.5*pi)))*aux8)))+((Amp*(c*((\
mysqrt((2.*pi)))*aux9)))+aux61);
aux63=(2.*((Amp**2)*(c*((mysqrt((2.*pi)))*aux6))))+(((Amp**2)*((\
mysqrt((0.5*pi)))*aux7))+aux62);
aux64=(2.*(Amp*((mysqrt((2.*pi)))*aux4)))+((-2.*(Amp*(c*((mysqrt((\
2.*pi)))*aux5))))+aux63);
aux65=(Amp*(c*((mysqrt((2.*pi)))*aux2)))+(((mysqrt((0.5*pi)))*(\
sigma*aux3))+aux64);
aux66=((Amp**2)*((mysqrt((0.5*pi)))*aux0))+(((Amp**2)*((c**2)*((\
mysqrt((0.5*pi)))*aux1)))+aux65);
aux67=(t2**2)*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t2))-(t*t2)))/t2)/\
sigma)));
aux68=(np.exp((0.5*((t2**-2.)*((sigma**2)+((2.*(mu*t2))+(-2.*(t*t2))))\
))))*((mysqrt((2.*pi)))*(sigma*aux67));
aux69=t1*(t2*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t2))-(t*t2)))/t2)/\
sigma))));
aux70=(np.exp((0.5*((t2**-2.)*((sigma**2)+((2.*(mu*t2))+(-2.*(t*t2))))\
))))*((mysqrt((2.*pi)))*(sigma*aux69));
aux71=(t1**2)*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t1))-(t*t1)))/t1)/\
sigma)));
aux72=(np.exp((0.5*((t1**-2.)*((sigma**2)+((2.*(mu*t1))+(-2.*(t*t1))))\
))))*((mysqrt((2.*pi)))*(sigma*aux71));
aux73=(t1**2)*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t1))-(t*t1)))/t1)/\
sigma)));
aux74=(np.exp((0.5*((t1**-2.)*((sigma**2)+((2.*(mu*t1))+(-2.*(t*t1))))\
))))*((mysqrt((2.*pi)))*(sigma*aux73));
aux75=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t2))-(((2.**-0.5)\
*t)/sigma);
aux76=(np.exp(((2.*((sigma**2)*(t2**-2.)))+(((2.*mu)/t2)+((-2.*t)/t2))\
)))*((mysqrt((0.5*pi)))*(sigma*((t2**2)*(-1.+(erf(aux75))))));
aux77=((((aux66-(Amp*(c*aux68)))-(Amp*aux70))-(Amp*(c*aux72)))-(Amp*\
aux74))-((Amp**2)*((c**2)*aux76));
aux78=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t2))-(((2.**-0.5)\
*t)/sigma);
aux79=(np.exp(((2.*((sigma**2)*(t2**-2.)))+(((2.*mu)/t2)+((-2.*t)/t2))\
)))*((mysqrt((0.5*pi)))*(sigma*((t2**2)*(-1.+(erf(aux78))))));
aux80=(0.5*((sigma**2)*(t1**-2.)))+((mu/t1)+((0.5*((sigma**2)*(t2**-2.)))+((mu/t2)+(((sigma**2)/t2)/t1))));
aux81=(((2.**-0.5)*mu)/sigma)+((((2.**-0.5)*sigma)/t1)+(((2.**-0.5)*\
sigma)/t2));
aux82=(mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf((aux81-(((2.**-0.5)*t)/sigma))))))));
aux83=(aux77-((Amp**2)*aux79))-((Amp**2)*((np.exp(((aux80-(t/t2))-(t/\
t1))))*aux82));
aux84=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux85=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux84)))))));
aux86=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux87=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux86)))))));
aux88=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux89=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((2.*pi)))*(sigma*((t1**2)*(-1.+(erf(aux88))))));
aux90=((aux83-((Amp**2)*((c**2)*aux85)))-((Amp**2)*aux87))-((Amp**2)*(\
c*aux89));
aux91=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux92=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((0.5*pi)))*(sigma*((t1**2)*(-1.+(erf(aux91))))));
aux93=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux94=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((0.5*pi)))*(sigma*((t1**2)*(-1.+(erf(aux93))))));
aux95=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux96=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux95)))))));
aux97=((aux90-((Amp**2)*((c**2)*aux92)))-((Amp**2)*aux94))-((Amp**2)*(\
(c**2)*aux96));
aux98=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux99=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*((t1**2)*(-1.+(erf(aux98))))));
aux100=sigma*((t2**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*\
t)/sigma))))));
aux101=sigma*((t2**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*\
t)/sigma))))));
aux102=((aux97-((Amp**2)*aux99))-((Amp**2)*(c*((mysqrt((2.*pi)))*\
aux100))))-(Amp*((mysqrt((2.*pi)))*aux101));
aux103=sigma*(t1*(t2*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)\
/sigma)))))));
aux104=sigma*(t1*(t2*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)\
/sigma)))))));
aux105=(aux102-((Amp**2)*((c**2)*((mysqrt((2.*pi)))*aux103))))-((\
Amp**2)*((mysqrt((2.*pi)))*aux104));
aux106=sigma*((t1**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*\
t)/sigma))))));
aux107=sigma*((t1**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*\
t)/sigma))))));
aux108=(aux105-((Amp**2)*(c*((mysqrt((2.*pi)))*aux106))))-(Amp*((\
mysqrt((2.*pi)))*aux107));
aux109=(((t1-t2)**2))*(-1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*\
t)/sigma)))));
aux110=((2.*pi)**-0.5)*(((t1-t2)**-2.)*(aux108-((mysqrt((0.5*pi)\
))*(sigma*aux109))));
output=aux110/sigma;
return output
class FourLevelDifference(FourLevel):
"""This is the fit model for the four level model if difference instead
of ratio is used. The only difference is that we need to subtract -1. This
is due to two things. First, the convolution is distributive, second convolution
of -1 with gaussian is -1. Therefore this is the correct and most simple solution.
"""
def fit_func(self, t, Amp, t1, t2, c, mu, sigma):
return super().fit_func(t, Amp, t1, t2, c, mu, sigma) - 1
|
#--*-- coding:utf-8 --*--
import requests, requests.utils, pickle
import httplib
import sys
import pprint
from bs4 import BeautifulSoup
import re, shutil, xml.dom.minidom, json
import netrc
import os.path, time
import random
from optparse import OptionParser
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
import threading
s = requests.Session()
s.headers.update({'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.94 Safari/537.36',
'Connection': 'keep-alive',
'Content-type': 'application/x-www-form-urlencoded'})
def debugReq(r):
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(r.status_code)
# pp.pprint(r.request.__dict__)
# print >>sys.stderr, r.text
print >> sys.stderr, s.cookies.get_dict()
real_send = False
http_port = 5678
uuid = ''
redirect_uri = ''
base_uri = ''
skey = ''
wxsid = ''
wxuin = ''
pass_ticket = ''
deviceId = 'e000000000000000'
BaseRequest = {}
SyncKey = []
ContactList = []
ChatContactList = []
My = {}
def getUUID():
global uuid
# url = "https://login.weixin.qq.com/jslogin"
# payload = {
# 'redirect_uri':'https://wx.qq.com/cgi-bin/mmwebwx-bin/webwxnewloginpage',
# 'appid': 'wx782c26e4c19acffb',
# 'fun': 'new',
# 'lang': 'zh_CN',
# '_': int(time.time()),
# }
# headers = {'content-type': 'text/javascript'}
# r = s.get(url, data = payload, headers = headers)
url = "https://login.weixin.qq.com/jslogin?appid=wx782c26e4c19acffb&redirect_uri=https%3A%2F%2Fwx.qq.com%2Fcgi-bin%2Fmmwebwx-bin%2Fwebwxnewloginpage&fun=new&lang=zh_CN&_=" + str(
int(time.time()))
r = s.get(url)
# debugReq(r)
# window.QRLogin.code = 200; window.QRLogin.uuid = "oZwt_bFfRg==";
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, r.text)
code = pm.group(1)
uuid = pm.group(2)
if code == '200':
return True
return False
def getQRImage():
path = os.path.join(os.getcwd(), "qrcode.jpg")
url = "https://login.weixin.qq.com/qrcode/" + uuid
r = s.get(url, stream=True)
# debugReq(r)
if r.status_code == 200:
with open(path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
import socket
ip = [l
for l in
([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]
if not ip.startswith("127.")][:1],
[[(sc.connect(('8.8.8.8', 80)), sc.getsockname()[0], sc.close())
for sc in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]])
if l][0][0]
print "[+] Please open http://" + ip + ":" + str(http_port) + "/qrcode.jpg or open " + path
time.sleep(1)
def waitForLogin():
global redirect_uri, base_uri
url = "https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?loginicon=true&uuid=%s&tip=0&r=-1746241605&_=%s" % (
uuid, int(time.time()))
r = s.get(url, stream=True)
# debugReq(r)
# print r.text
data = r.text
regx = r'window.code=(\d+);'
pm = re.search(regx, data)
code = pm.group(1)
if code == '201': # 已扫描
print('[+] Scan success, please click confirm on your mobile phone')
tip = 0
elif code == '200': # 已登录
print('[+] Logging in ...')
regx = r'window.redirect_uri="(\S+?)";'
pm = re.search(regx, data)
redirect_uri = pm.group(1) + "&fun=new&version=v2"
base_uri = redirect_uri[:redirect_uri.rfind('/')]
# # push_uri与base_uri对应关系(排名分先后)(就是这么奇葩..)
# services = [
# ('wx2.qq.com', 'webpush2.weixin.qq.com'),
# ('qq.com', 'webpush.weixin.qq.com'),
# ('web1.wechat.com', 'webpush1.wechat.com'),
# ('web2.wechat.com', 'webpush2.wechat.com'),
# ('wechat.com', 'webpush.wechat.com'),
# ('web1.wechatapp.com', 'webpush1.wechatapp.com'),
# ]
# push_uri = base_uri
# for (searchUrl, pushUrl) in services:
# if base_uri.find(searchUrl) >= 0:
# push_uri = 'https://%s/cgi-bin/mmwebwx-bin' % pushUrl
# break
elif code == '408': # 超时
pass
return code
def login():
global skey, wxsid, wxuin, pass_ticket, BaseRequest
r = s.get(redirect_uri)
# debugReq(r)
# print r.text
data = r.text.encode('utf-8')
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
wxsid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
wxuin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
pass_ticket = node.childNodes[0].data
if not all((skey, wxsid, wxuin, pass_ticket)):
return False
BaseRequest = {
'Uin': int(wxuin),
'Sid': wxsid.encode('unicode_escape'),
'Skey': skey.encode('unicode_escape'),
'DeviceID': deviceId,
}
return True
def responseState(func, BaseResponse):
ErrMsg = BaseResponse['ErrMsg']
Ret = BaseResponse['Ret']
if Ret != 0:
print('func: %s, Ret: %d, ErrMsg: %s' % (func, Ret, ErrMsg))
if Ret != 0:
return False
return True
def webwxinit():
global My, SyncKey
url = base_uri + "/webwxinit?r=-1746916482&lang=zh_CN&pass_ticket=" + pass_ticket
payload = {'BaseRequest': BaseRequest}
headers = {'ContentType': 'application/json; charset=UTF-8'}
r = s.post(url, json=payload, headers=headers)
# debugReq(r)
# print r.text
data = r.text.encode('unicode_escape').decode('string_escape')
dic = json.loads(data)
My = dic['User']
SyncKey = dic['SyncKey']
state = responseState('webwxinit', dic['BaseResponse'])
return state
def webwxsendmsg(friend, content):
clientMsgId = str(int(time.time()))
url = base_uri + "/webwxsendmsg?lang=zh_CN&pass_ticket=" + pass_ticket
Msg = {
'Type': '1',
'Content': content,
'ClientMsgId': clientMsgId.encode('unicode_escape'),
'FromUserName': My['UserName'].encode('unicode_escape'),
'ToUserName': friend["UserName"].encode('unicode_escape'),
'LocalID': clientMsgId.encode('unicode_escape')
}
payload = {'BaseRequest': BaseRequest, 'Msg': Msg}
headers = {'ContentType': 'application/json; charset=UTF-8'}
# print str(payload).decode('string_escape')
data = json.dumps(payload, ensure_ascii=False)
# r = s.post(url, json=payload, headers=headers)
r = s.post(url, data = data, headers=headers)
# debugReq(r)
# print r.text
resp = json.loads(r.text)
if 'BaseResponse' in resp:
if 'Ret' in resp['BaseResponse']:
return int(resp['BaseResponse']['Ret'])
return -1
def webwxsync():
url = base_uri + "/webwxsync?sid=" + wxsid + "&skey=" + skey
payload = {'BaseRequest': BaseRequest, 'SyncKey': SyncKey, 'rr' : int(time.time())}
headers = {'ContentType': 'application/json; charset=UTF-8'}
data = json.dumps(payload, ensure_ascii=False)
r = s.post(url, data = data, headers=headers)
# debugReq(r)
content = r.text.encode('unicode_escape').decode('string_escape')
resp = json.loads(content)
return resp
def parseRecvMsgs(msgs):
mymsgs = []
m = {}
for msg in msgs:
user = findFriend('UserName', msg['FromUserName'])
if user:
m[u'FromUserName'] = user['NickName']
else:
m[u'FromUserName'] = msg['FromUserName']
m[u'Content'] = msg['Content']
m[u'Status'] = msg['Status']
user = findFriend('UserName', msg['ToUserName'])
if user:
m[u'ToUserName'] = user['NickName']
else:
m[u'ToUserName'] = msg['ToUserName']
m[u'MsgType'] = msg['MsgType']
mymsgs.append(m)
print json.dumps(mymsgs, ensure_ascii=False)
return mymsgs
def webwxgetcontact():
global ContactList
url = base_uri + "/webwxgetcontact?r=" + str(int(
time.time()))
r = s.post(url, json={})
# debugReq(r)
content = r.text.encode('unicode_escape').decode('string_escape')
ContactList = json.loads(content)['MemberList']
#with open('contacts.txt', 'w') as f:
# f.write(content)
def getChatroomList():
global ChatContactList
chat_list = []
for user in ContactList:
if user['UserName'].find('@@') != -1: # 群聊
chat_list.append({"UserName":user['UserName'], "ChatRoomId": ""})
url = base_uri + "/webwxbatchgetcontact?type=ex&r=%d&lang=zh_CN&pass_ticket=%s"% (time.time(), pass_ticket)
payload = {
'BaseRequest': BaseRequest,
'List': chat_list,
'Count': len(chat_list)
}
r =s.post(url, json=payload)
data = r.text.encode('unicode_escape').decode('string_escape')
with open('chatroom.txt', 'w') as f:
f.write(data)
ChatContactList = json.loads(data)["ContactList"]
# def webwxbatchgetcontact():
# url = "https://wx.qq.com/cgi-bin/mmwebwx-bin/webwxbatchgetcontact?type=ex&r=1453704524520"
def findFriend(key, value):
for friend in ContactList:
if friend[key] == value:
# print friend['NickName']
return friend
return None
def getRandomMsg():
lines = open('regards.txt').read().splitlines()
myline =random.choice(lines)
return myline
def striphtml(data):
p = re.compile(r'<.*?>')
return p.sub('', data)
def main():
global real_send, http_port
parser = OptionParser(usage='%prog [options]',
description='send custom message to your friend on wechat, default dry run')
parser.add_option('-s', '--sendall',action='store_true', help='send message to your friend, please double check')
parser.add_option('-p', '--port',type='int', help='http server port listen')
(options, args) = parser.parse_args()
if options.sendall:
real_send = options.sendall
if options.port:
http_port = options.port
server = HTTPServer(('0.0.0.0', http_port), SimpleHTTPRequestHandler)
thread = threading.Thread(target = server.serve_forever)
thread.daemon = True
print "[+] Starting Http Server"
try:
thread.start()
except KeyboardInterrupt:
server.shutdown()
sys.exit(0)
if not getUUID():
print "[-] UUID get fail"
return
print "[+] Getting QR Image..."
getQRImage()
while waitForLogin() != '200':
pass
if not login():
print "[-] Login fail"
return
print "[+] Login success"
if not webwxinit():
print "[-] Wxinit fail"
webwxgetcontact()
for f in ContactList:
name = striphtml(f['RemarkName'].encode('utf-8'))
if len(name) == 0:
name = striphtml(f['NickName'].encode('utf-8'))
# content="嗨, %s 新年快乐 %s" % (name, "[拥抱]")
content="嗨, %s, %s %s" % (name, getRandomMsg(), "[拥抱]")
'''
= -1 : 群聊
= 0 : 公众号/服务号
'''
if f['UserName'].find('@@') != -1 or f['VerifyFlag'] & 8 != 0:
# content = "skip " + name
# print "[-] " + content
# webwxsendmsg(My, content=content)
continue
print "[+] Prepare sending to " + name + " ..."
if webwxsendmsg(My, content=content) != 0:
print "[!]\tSent to yourself fail, please check your account."
else:
print "[*]\tSent to yourself success."
if real_send:
# 发给朋友,请检查好喔
if webwxsendmsg(f, content=content) != 0:
print "[!]\tSent to " + name + " fail, please check your account."
else:
print "[*]\tSent to " + name + " success."
time.sleep(1)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from datetime import date
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from django.conf import settings
from django.utils import translation
from braces.views import LoginRequiredMixin
from .models import Event, Activity, Attendee, AttendeeReceipt, Content, Logo, Organization
from .pdf import createPDF
from .png import createPNG
class HomeView(DetailView):
model = Event
template_name = "pages/home.html"
def get_object(self, **kwargs):
try:
object = super(HomeView, self).get_object(**kwargs)
except AttributeError:
object = Event.objects.filter(status='frontpage')[0]
return object
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context['home'] = True
context['logos'] = Logo.objects.filter(event=self.object)
return context
class ContentView(DetailView):
model = Content
page = None
def get_object(self):
if self.kwargs:
return get_object_or_404(self.model, event__slug=self.kwargs['slug'], page=self.page)
else:
#FIXME qué si hay varios eventos?
return get_object_or_404(self.model, page=self.page)
class AttendeeReceiptView(LoginRequiredMixin, DetailView):
model = AttendeeReceipt
def get_object(self):
try:
attendee = Attendee.objects.get(username=self.kwargs.get("username"))
if attendee.balance() > 0:
raise NameError('Balance > 0')
receipt = AttendeeReceipt.objects.get(attendee__username=self.kwargs.get("username"))
return receipt
except AttendeeReceipt.DoesNotExist:
attendee = Attendee.objects.get(username=self.kwargs.get("username"))
new_receipt = AttendeeReceipt(attendee=attendee, date=date.today())
new_receipt.save()
return new_receipt
class AttendeePDFView(LoginRequiredMixin, DetailView):
model = Attendee
slug_field = "username"
slug_url_kwarg = "username"
template_name = "app/attendee_badge.html"
response = HttpResponse(content_type="application/pdf")
def get(self, request, username):
self.response = HttpResponse(content_type="application/pdf")
obj = self.get_object()
self.response['Content-Disposition'] = 'inline; filename="%s-%s.pdf"' % ( obj.pk, obj )
createPDF(obj, self.response)
return self.response
class AttendeePNGView(LoginRequiredMixin, DetailView):
model = Attendee
slug_field = "username"
slug_url_kwarg = "username"
response = HttpResponse(content_type="image/png")
def get(self, request, username):
self.response = HttpResponse(content_type="image/png")
obj = self.get_object()
self.response['Content-Disposition'] = 'inline; filename="%s-%s.png"' % ( obj.pk, obj )
createPNG(obj, self.response)
return self.response
class AttendeeBadgeView(LoginRequiredMixin, DetailView):
model = Attendee
slug_field = "username"
slug_url_kwarg = "username"
template_name = "app/attendee_badge.html"
class AttendeeDetailView(LoginRequiredMixin, DetailView):
model = Attendee
slug_field = "username"
slug_url_kwarg = "username"
class SpeakersDetailView(DetailView):
model = Attendee
template_name = 'app/speaker_detail.html'
slug_field = 'username'
def get_context_data(self, **kwargs):
context = super(SpeakersDetailView, self).get_context_data(**kwargs)
context['event'] = Event.objects.get(slug=self.kwargs['eventslug'])
return context
class SpeakersView(ListView):
model = Attendee
template_name = 'app/speaker_list.html'
def get_queryset(self):
return Attendee.objects.filter(event__slug=self.kwargs['slug'], type=Attendee.SPEAKER)
def get_context_data(self, **kwargs):
context = super(SpeakersView, self).get_context_data(**kwargs)
context['event'] = Event.objects.get(slug=self.kwargs['slug'])
return context
class OrganizationsView(ListView):
model = Organization
filter = 'O'
title = 'Organizations'
def get_queryset(self):
return Organization.objects.filter(event__slug=self.kwargs['slug'], type=self.filter)
def get_context_data(self, **kwargs):
context = super(OrganizationsView, self).get_context_data(**kwargs)
context['title'] = self.title
#context['event'] = Event.objects.get(slug=self.kwargs['slug'])
return context
#class ActivitiesView(LoginRequiredMixin, ListView):
class ActivitiesView(ListView):
model = Activity
def get_queryset(self):
return Activity.objects.filter(event__slug=self.kwargs['slug'])
def get_context_data(self, **kwargs):
context = super(ActivitiesView, self).get_context_data(**kwargs)
context['event'] = Event.objects.get(slug=self.kwargs['slug'])
return context
# orphan
def event(request,slug):
#if not url.startswith('/'):
# url = '/' + url
template_name = "pages/event.html"
if slug.endswith('/'):
slug = slug[:-1]
try:
f = get_object_or_404(Event, slug=slug)
except Http404:
if not slug.endswith('/') and settings.APPEND_SLASH:
slug += '/'
f = get_object_or_404(Event, slug=slug)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render(request, template_name, context={'object': f})
|
import os.path as osp
import os
from six.moves import shlex_quote
from rlf.rl import utils
import sys
import pipes
import time
import numpy as np
import random
import datetime
import string
import copy
from rlf.exp_mgr import config_mgr
from rlf.rl.loggers.base_logger import BaseLogger
from collections import deque, defaultdict
class TbLogger(BaseLogger):
def __init__(self, tb_log_dir=None):
super().__init__()
self.tb_log_dir = tb_log_dir
def init(self, args):
super().init(args)
if self.tb_log_dir is None:
self.tb_log_dir = args.log_dir
self.writer = self._create_writer(args, self.tb_log_dir)
def _create_writer(self, args, log_dir):
from tensorboardX import SummaryWriter
rnd_id = ''.join(random.sample(string.ascii_uppercase + string.digits, k=4))
log_dir = osp.join(self.tb_log_dir, args.env_name, args.prefix + '-' + rnd_id)
writer = SummaryWriter(log_dir)
return writer
def _internal_log_vals(self, key_vals, step_count):
for k, v in key_vals.items():
self.writer.add_scalar('data/' + k, v, step_count)
def close(self):
self.writer.close()
|
import os
from pathlib import Path
import json
from pathlib import Path
import argparse
from pprint import pprint
from tabulate import tabulate
from app.db_connector import *
at_dir = Path('frontend/src')
def language_settings():
languages = [
'en',
'ja'
]
dictionary = {k: {} for k in languages}
for p in (at_dir / 'lang').glob('*.csv'):
with p.open(mode='r') as f:
for line in f.read().split('\n')[1:]:
keys = line.split(',')
if len(keys) < 2:
continue
for i, lang in enumerate(languages):
dictionary[lang].setdefault(p.stem.capitalize(), {})
dictionary[lang][p.stem.capitalize()][keys[0]] = keys[i + 1].replace('~', ',')
with open(at_dir / 'lang/dictionary.json', 'w') as f:
f.write(json.dumps(dictionary))
pprint(dictionary)
default_infos = {
'user': [{
'user_name': 'master',
'user_password': '000',
'email': 'hogehoge@test.com',
'phone_number': '000-0000-0000',
'nick_name': 'admin',
'real_name': 'admin',
'zipcode': ['171', '0033'],
'address': ['東京都豊島区高田', '2-5-19'],
'ocupation': [],
'companies': [],
'projects': []
}, {
'user_name': 'pysan3',
'user_password': '000',
'email': 'hogehoge@test.com',
'phone_number': '000-0000-0000',
'nick_name': 'takuto',
'real_name': 'takuto',
'zipcode': ['171', '0033'],
'address': ['東京都豊島区高田', '2-5-19'],
'ocupation': [],
'companies': [],
'projects': []
}, {
'user_name': 'teppei',
'user_password': '000',
'email': 'hogehoge@test.com',
'phone_number': '000-0000-0000',
'nick_name': 'teppei',
'real_name': 'teppei',
'zipcode': ['000', '0000'],
'address': ['hoge', 'fuga'],
'ocupation': [],
'companies': [],
'projects': []
}]
}
def db_init(auto_yes=False):
if auto_yes or input('Going to delete all data in DB. Are you sure what you are doing? [y/N] ') == 'y':
print('initializing DB')
import app.app as backapp
from app.db_connector import Base, engine
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
for user in default_infos['user']:
backapp.signup(user)
else:
print('Not initializing the DB.')
def show_all_data(name, columns, data):
print(name.upper())
print(tabulate([[d[col] for col in columns] for d in data], columns, tablefmt='github'))
def find_tables():
tables = []
g = globals()
names = engine.table_names()
for t in g:
if t.lower() in names:
# if input(t + ' [Y/n]: ') == 'n':
# continue
tables.append(g[t])
return tables
def db_show():
with SessionContext() as session:
for t in find_tables():
show_all_data(str(t), t.__table__.c.keys(), [DBtoDict(s) for s in session.query(t).all()])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='upload .md to your webpage')
group = parser.add_mutually_exclusive_group()
group.add_argument('-d', '--db', type=str, help='delete all data in DB')
group.add_argument('-g', '--git', type=str, help='git push with [commit message]')
parser.add_argument('-p', '--prod', action='store_true', help='npm run prod')
parser.add_argument('-b', '--build', action='store_true', help='npm run local')
parser.add_argument('-t', '--test', action='store_true', help='npm run dev')
parser.add_argument('-r', '--run', action='store_true', help='python run.py')
parser.add_argument('-l', '--lang', action='store_true', help='language json')
parser.add_argument('-y', '--yes', action='store_true', help='pass yes to all verifications')
args = parser.parse_args()
if args.db:
if args.db == 'init':
db_init(args.yes)
elif args.db == 'show':
db_show()
else:
print('Couldn\'t find a corresponding command')
print('init\tclear all data in DB')
print('show\tshow all data in DB')
if args.lang:
language_settings()
if args.build:
os.system('cd frontend; npm run local')
if args.prod:
os.system('cd frontend; npm run prod')
if args.test:
os.system('cd frontend; npm run dev')
if args.run:
os.system('python run.py')
if args.git:
os.system('git add .')
os.system(f'git commit -m "{args.git}"')
os.system('git push origin master')
|
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.shared_container_info import SharedContainerInfo # noqa: E501
from openapi_client.rest import ApiException
class TestSharedContainerInfo(unittest.TestCase):
"""SharedContainerInfo unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test SharedContainerInfo
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.shared_container_info.SharedContainerInfo() # noqa: E501
if include_optional :
return SharedContainerInfo(
all_compliance = openapi_client.models.vuln/all_compliance.vuln.AllCompliance(
compliance = [
openapi_client.models.vuln/vulnerability.vuln.Vulnerability(
applicable_rules = [
''
],
binary_pkgs = [
''
],
block = True,
cause = '',
cri = True,
custom = True,
cve = '',
cvss = 1.337,
description = '',
discovered = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
exploit = '[\"\",\"exploit-db\",\"exploit-windows\"]',
fix_date = 56,
fix_link = '',
function_layer = '',
grace_period_days = 56,
id = 56,
layer_time = 56,
link = '',
package_name = '',
package_version = '',
published = 56,
risk_factors = {
'key' : ''
},
severity = '',
status = '',
templates = [
'[\"PCI\",\"HIPAA\",\"NIST SP 800-190\",\"GDPR\",\"DISA STIG\"]'
],
text = '',
title = '',
twistlock = True,
type = '[\"container\",\"image\",\"host_config\",\"daemon_config\",\"daemon_config_files\",\"security_operations\",\"k8s_master\",\"k8s_worker\",\"k8s_federation\",\"linux\",\"windows\",\"istio\",\"aws\",\"serverless\",\"custom\",\"docker_stig\"]',
vec_str = '',
vuln_tag_infos = [
openapi_client.models.vuln/tag_info.vuln.TagInfo(
comment = '',
name = '', )
], )
],
enabled = True, ),
app = '',
cloud_metadata = openapi_client.models.common/cloud_metadata.common.CloudMetadata(
account_id = '',
image = '',
labels = [
openapi_client.models.common/external_label.common.ExternalLabel(
key = '',
source_name = '',
source_type = '[\"namespace\",\"deployment\",\"aws\",\"azure\",\"gcp\"]',
timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
value = '', )
],
name = '',
provider = '[\"aws\",\"azure\",\"gcp\",\"alibaba\",\"others\"]',
region = '',
resource_id = '',
type = '', ),
cluster = '',
compliance_distribution = openapi_client.models.vuln/distribution.vuln.Distribution(
critical = 56,
high = 56,
low = 56,
medium = 56,
total = 56, ),
compliance_issues = [
openapi_client.models.vuln/vulnerability.vuln.Vulnerability(
applicable_rules = [
''
],
binary_pkgs = [
''
],
block = True,
cause = '',
cri = True,
custom = True,
cve = '',
cvss = 1.337,
description = '',
discovered = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
exploit = '[\"\",\"exploit-db\",\"exploit-windows\"]',
fix_date = 56,
fix_link = '',
function_layer = '',
grace_period_days = 56,
id = 56,
layer_time = 56,
link = '',
package_name = '',
package_version = '',
published = 56,
risk_factors = {
'key' : ''
},
severity = '',
status = '',
templates = [
'[\"PCI\",\"HIPAA\",\"NIST SP 800-190\",\"GDPR\",\"DISA STIG\"]'
],
text = '',
title = '',
twistlock = True,
type = '[\"container\",\"image\",\"host_config\",\"daemon_config\",\"daemon_config_files\",\"security_operations\",\"k8s_master\",\"k8s_worker\",\"k8s_federation\",\"linux\",\"windows\",\"istio\",\"aws\",\"serverless\",\"custom\",\"docker_stig\"]',
vec_str = '',
vuln_tag_infos = [
openapi_client.models.vuln/tag_info.vuln.TagInfo(
comment = '',
name = '', )
], )
],
compliance_issues_count = 56,
compliance_risk_score = 1.337,
external_labels = [
openapi_client.models.common/external_label.common.ExternalLabel(
key = '',
source_name = '',
source_type = '[\"namespace\",\"deployment\",\"aws\",\"azure\",\"gcp\"]',
timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
value = '', )
],
id = '',
image = '',
image_id = '',
image_name = '',
infra = True,
installed_products = openapi_client.models.shared/installed_products.shared.InstalledProducts(
apache = '',
aws_cloud = True,
crio = True,
docker = '',
docker_enterprise = True,
has_package_manager = True,
k8s_api_server = True,
k8s_controller_manager = True,
k8s_etcd = True,
k8s_federation_api_server = True,
k8s_federation_controller_manager = True,
k8s_kubelet = True,
k8s_proxy = True,
k8s_scheduler = True,
kubernetes = '',
openshift = True,
os_distro = '',
serverless = True,
swarm_manager = True,
swarm_node = True, ),
labels = [
''
],
name = '',
namespace = '',
network = openapi_client.models.shared/container_network.shared.ContainerNetwork(
ports = [
openapi_client.models.shared/container_port.shared.ContainerPort(
container = 56,
host = 56,
host_ip = '',
listening = True,
nat = True, )
], ),
network_settings = openapi_client.models.shared/docker_network_info.shared.DockerNetworkInfo(
ip_address = '',
mac_address = '',
networks = [
openapi_client.models.shared/network_info.shared.NetworkInfo(
ip_address = '',
mac_address = '',
name = '', )
],
ports = [
openapi_client.models.shared/port.shared.Port(
container_port = '',
host_ip = '',
host_port = 56, )
], ),
processes = [
openapi_client.models.shared/container_process.shared.ContainerProcess(
name = '', )
],
profile_id = '',
size_bytes = 56
)
else :
return SharedContainerInfo(
)
def testSharedContainerInfo(self):
"""Test SharedContainerInfo"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 4.0.1 on 2022-03-01 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0007_earned_badge_visit_remove_player_badges_and_more'),
]
operations = [
migrations.AddField(
model_name='player',
name='test',
field=models.CharField(default='test', max_length=10, verbose_name='Test'),
),
]
|
# This Task is the base task that we will be executing as a second step (see task_piping.py)
# In order to make sure this experiment is registered in the platform, you must execute it once.
from trains import Task
# Initialize the task pipe's first task used to start the task pipe
task = Task.init('examples', 'Toy Base Task')
# Create a dictionary for hyper-parameters
params = {}
# Add a parameter and value to the dictionary
params['Example_Param'] = 1
# Connect the hyper-parameter dictionary to the task
task.connect(params)
# Print the value to demonstrate it is the value is set by the initiating task.
print ("Example_Param is", params['Example_Param'])
|
from docbarcodes.zxingjpype.zxingreader import decodeURIs
def test_jpype_qrcode():
file = "data/single/qr-code-wikipedia.png"
results = decodeURIs([file])
assert results[0][0].text=='http://en.m.wikipedia.org'
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 10 18:32:07 2018
@author: admin
"""
# import the necessary packages
from pyimagesearch.shapedetector import ShapeDetector
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the input image")
args = vars(ap.parse_args())
# load the image and resize it to a smaller factor so that
# the shapes can be approximated better
image = cv2.imread(args["image"])
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
# find contours in the thresholded image and initialize the
# shape detector
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
|
import pytest
from aoc_cqkh42.year_2020 import day_15
@pytest.mark.parametrize(
'data, answer',
[
('1,3,2', 1),
('2,1,3', 10),
('1,2,3', 27),
('2,3,1', 78),
('3,2,1', 438),
('3,1,2', 1836),
('0,3,6', 436)
]
)
def test_part_a(data, answer):
assert day_15.part_a(data) == answer
@pytest.mark.parametrize(
'data, answer',
[
('1,3,2', 2578),
('2,1,3', 3544142),
('1,2,3', 261214),
('2,3,1', 6895259),
('3,2,1', 18),
('3,1,2', 362),
('0,3,6', 175594)
]
)
def test_part_b(data, answer):
assert day_15.part_b(data) == answer
|
from django.contrib import admin
from .models import Website, DataPoint
# Register your models here.
admin.site.register(Website)
admin.site.register(DataPoint)
|
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
class LoginSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def create(self, validated_data):
username = validated_data.get('username')
password = validated_data.get('password')
request = self.context.get('request')
# This is for debugging purposes only.
print(username, password)
try:
user = authenticate(username=username, password=password)
if user:
print("PRE-LOGIN", user.get_full_name())
login(request, user)
print("POST-LOGIN", user.get_full_name())
return user
except Exception as e:
print(e)
raise serializers.ValidationError({
'message': 'Could not log in, username or password are incorrect.'
})
|
from howiml.utils import utilities
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
colors = list(utilities.getColorScheme().values())
sns.set(context='paper', style='whitegrid', palette=sns.color_palette(colors))
def correlationMatrix(df):
# Calculates correlation matrix of a pandas dataframe
if 'Date' in df.columns:
df = df.drop('Date', axis=1, inplace=False)
if 'Index' in df.columns:
df = df.drop('Index', axis=1, inplace=False)
X = df.values
standardScaler = StandardScaler()
X = standardScaler.fit_transform(X)
covMat = np.cov(X.T)
return covMat
def pca(df, numberOfComponents, relevantColumns=None, columnDescriptions=None):
# Calculates Principal Component Analysis of a pandas dataframe
if 'Date' in df.columns:
df = df.drop('Date', axis=1, inplace=False)
if 'Index' in df.columns:
df = df.drop('Index', axis=1, inplace=False)
X = df.values
standardScaler = StandardScaler()
X = standardScaler.fit_transform(X)
if numberOfComponents < 1 or numberOfComponents > df.shape[1]:
numberOfComponents = df.shape[1]
pca = PCA(n_components=numberOfComponents)
pca.fit(X)
return pca
def pcaPlot(df, timestamps=None, plotTitle=None):
# Calculates and plots a 2D Principal Component Analysis decomposition of a pandas dataframe
if timestamps is not None:
traintime, testtime, validtime = timestamps
df_train, df_test = utilities.getTestTrainSplit(df, traintime, testtime)
train_vals = df_train.values
else:
train_vals = df.values
sc = StandardScaler()
train_vals = sc.fit_transform(train_vals)
numberOfComponents = 2
pca = PCA(n_components=numberOfComponents)
pca.fit(train_vals)
X = df.values
X = sc.transform(X)
X = pca.transform(X)
df_pca = pd.DataFrame(data = X, index=df.index, columns=['pca1', 'pca2'])
if timestamps is not None:
df_pca_train, df_pca_test = utilities.getTestTrainSplit(df_pca, traintime, testtime)
else:
df_pca_train, df_pca_test = None, df_pca
fig = plt.figure(figsize = (8,4))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel('PCA 1', fontsize=10)
ax.set_ylabel('PCA 2', fontsize=10)
ax.set_title(((plotTitle + '\n') if plotTitle is not None else '') + 'PCA plot', fontsize=12)
cmap = sns.cubehelix_palette(as_cmap=True)
indexx = list(range(df_pca_test.shape[0]))
if df_pca_train is not None:
ax.scatter(df_pca_train['pca1'], df_pca_train['pca2'], c = 'red')
points = ax.scatter(df_pca_test['pca1'], df_pca_test['pca2'], c = indexx, cmap = cmap, alpha=0.7)
fig.colorbar(points)
plt.show()
def pcaDuoPlot(df_1_train, df_1_test, df_2_test, plotTitle=None):
# Calculates and plots a 2D Principal Component Analysis decomposition
# based on one training and two testing pandas dataframes
train_vals = df_1_train.values
sc = StandardScaler()
train_vals = sc.fit_transform(train_vals)
numberOfComponents = 2
pca = PCA(n_components=numberOfComponents)
pca.fit(train_vals)
X_1_train = df_1_train.values
X_1_train = sc.transform(X_1_train)
X_1_train = pca.transform(X_1_train)
df_train1 = pd.DataFrame(data = X_1_train, index=df_1_train.index, columns=['pca1', 'pca2'])
df_train1 = df_train1.resample("180min").mean()
X_1_test = df_1_test.values
X_1_test = sc.transform(X_1_test)
X_1_test = pca.transform(X_1_test)
df_test1 = pd.DataFrame(data = X_1_test, index=df_1_test.index, columns=['pca1', 'pca2'])
df_test1 = df_test1.resample("180min").mean()
X_2_test = df_2_test.values
X_2_test = sc.transform(X_2_test)
X_2_test = pca.transform(X_2_test)
df_test2 = pd.DataFrame(data = X_2_test, index=df_2_test.index, columns=['pca1', 'pca2'])
df_test2 = df_test2.resample("180min").mean()
fig,axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 4), dpi=100)
fig.tight_layout(w_pad=3.0)
ax, ax2 = axs
ax.set_xlabel('PCA 1', fontsize=10)
ax.set_ylabel('PCA 2', fontsize=10)
ax.set_title(((plotTitle + '\n') if plotTitle is not None else '') + 'PCA plot timeseries part 1', fontsize=12)
cmap1 = sns.cubehelix_palette(reverse=False, as_cmap=True)
cmap2 = sns.cubehelix_palette(reverse=False, start=50.0, rot=0.1, as_cmap=True)
index1 = list(range(df_test1.shape[0]))
index2 = list(range(df_test2.shape[0]))
ax.scatter(df_train1['pca1'], df_train1['pca2'], c = 'red', alpha=0.3)
points1 = ax.scatter(df_test1['pca1'], df_test1['pca2'], c = index1, cmap = cmap1, alpha=1.0)
fig.colorbar(points1, ax=ax)
ax2.set_xlabel('PCA 1', fontsize=10)
ax2.set_ylabel('PCA 2', fontsize=10)
ax2.set_title(((plotTitle + '\n') if plotTitle is not None else "") + 'PCA plot timeseries part 2', fontsize=12)
cmap1 = sns.cubehelix_palette(reverse=False, as_cmap=True)
cmap2 = sns.cubehelix_palette(reverse=False, start=50.0, rot=0.1, as_cmap=True)
index1 = list(range(df_test1.shape[0]))
index2 = list(range(df_test2.shape[0]))
ax2.scatter(df_train1['pca1'], df_train1['pca2'], c = 'red', alpha=0.3)
points2 = ax2.scatter(df_test2['pca1'], df_test2['pca2'], c = index2, cmap = cmap2, alpha=1.0)
fig.colorbar(points2, ax=ax2)
plt.show()
def pairplot(df):
# Plots 2D pair plots of all columns in a pandas dataframe
scaler = StandardScaler()
scaled = scaler.fit_transform(df.values)
scaled_df = pd.DataFrame(scaled, index=df.index, columns=df.columns)
if scaled_df.shape[0] > 1000:
scaled_df = scaled_df.resample('H').mean()
sns.pairplot(scaled_df, vars=scaled_df.columns, height=1.1)
plt.show()
def scatterplot(df):
# Plots 2D scatter plots of all columns in a pandas dataframe
pd.plotting.scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde')
plt.show()
def correlationPlot(df, title="Correlation plot"):
# Plots the correlation matrix of a pandas dataframe
scaler = StandardScaler()
scaled = scaler.fit_transform(df.values)
scaled_df = pd.DataFrame(scaled, index=df.index, columns=df.columns)
corr = scaled_df.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(5,5), dpi=100)
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap,
square=True, linewidths=1, cbar_kws={"shrink": .6}, vmin=-1, vmax=1)
ax.set_title(title)
plt.show()
def correlationDuoPlot(df1, df2, title1="Correlation plot", title2="Correlation plot"):
# Plots the correlation matrix of two pandas dataframes side by side
scaler1 = StandardScaler()
scaled1 = scaler1.fit_transform(df1.values)
scaled_df1 = pd.DataFrame(scaled1, index=df1.index, columns=df1.columns)
scaler2 = StandardScaler()
scaled2 = scaler2.fit_transform(df2.values)
scaled_df2 = pd.DataFrame(scaled2, index=df2.index, columns=df2.columns)
corr1 = scaled_df1.corr()
corr2 = scaled_df2.corr()
mask = np.zeros_like(corr1, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
fig,axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 5), dpi=100)
fig.tight_layout(w_pad=8.0)
ax1, ax2 = axs
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr1, ax=ax1, mask=mask, cmap=cmap,
square=True, linewidths=1, cbar_kws={"shrink": .6}, vmin=-1, vmax=1)
sns.heatmap(corr1, ax=ax2, mask=mask, cmap=cmap,
square=True, linewidths=1, cbar_kws={"shrink": .6}, vmin=-1, vmax=1)
ax1.set_title(title1)
ax2.set_title(title2)
plt.show()
def correlationDifferencePlot(df1, df2, title="Correlation difference plot"):
# Plots the correlation matrix difference between two pandas dataframes
scaler1 = StandardScaler()
scaled1 = scaler1.fit_transform(df1.values)
scaled_df1 = pd.DataFrame(scaled1, index=df1.index, columns=df1.columns)
scaler2 = StandardScaler()
scaled2 = scaler2.fit_transform(df2.values)
scaled_df2 = pd.DataFrame(scaled2, index=df2.index, columns=df2.columns)
corr1 = scaled_df1.corr()
corr2 = scaled_df2.corr()
corr_diff = corr1.sub(corr2)
mask = np.zeros_like(corr_diff, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(5,5), dpi=100)
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr_diff, mask=mask, cmap=cmap,
square=True, linewidths=1, cbar_kws={"shrink": .6}, vmin=-1, vmax=1)
ax.set_title(title)
plt.show()
def valueDistributionSingle(df, traintime, testtime):
# Plots values and value distributions for a pandas dataframe
# NB: all plots are put in a single figure with n rows
scaler = StandardScaler()
scaled = scaler.fit_transform(df.values)
scaled_df = pd.DataFrame(scaled, index=df.index, columns=df.columns)
df_train, df_test = utilities.getTestTrainSplit(scaled_df, traintime, testtime)
height = df_train.shape[-1]*5
fig, axs = plt.subplots(nrows=df_train.shape[-1], ncols=2, figsize=(15,height), dpi=100)
#fig.tight_layout()
for k in range(df_train.shape[-1]):
ax1, ax2 = axs[k, 0], axs[k, 1]
trainEndStr=[item for sublist in traintime for item in sublist]
for i, trainEndString in enumerate(trainEndStr):
ax1.axvline(x=pd.to_datetime(trainEndString, dayfirst=True), color='black' if i % 2 == 0 else 'blue', label='start training' if i % 2 == 0 else 'end training')
ax1.plot(df_train.iloc[:,k], label="train",
marker="o", ms=1.5, lw=0)
ax1.plot(df_test.iloc[:,k], label="test",
marker="o", ms=1.5, lw=0)
ax1.set_xticks(ax1.get_xticks()[3::3])
ax1.set_ylabel(df_train.columns[k])
sns.distplot(df_train.iloc[:,k], ax=ax2, label="train", kde=True, kde_kws={"lw":2.5})
sns.distplot(df_test.iloc[:,k], ax=ax2, label="test", kde=True, kde_kws={"lw":2.5})
ax2.set_xlim((-3,3))
ax2.legend(loc="upper right")
plt.show()
def valueDistribution(df, traintime, testtime, columnDescriptions, columnUnits):
# Plots values and value distributions for a pandas dataframe
# NB: all columns are plotted in separate figures
scaler = StandardScaler()
scaled = scaler.fit_transform(df.values)
scaled_df = pd.DataFrame(scaled, index=df.index, columns=df.columns)
df_train, df_test = utilities.getTestTrainSplit(scaled_df, traintime, testtime)
for k, column in enumerate(df_train.columns):
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10,3.0), dpi=100)
#fig.tight_layout()
ax1, ax2 = axs[0], axs[1]
fig.suptitle(column + " " + columnDescriptions[column])
trainEndStr=[item for sublist in traintime for item in sublist]
for i, trainEndString in enumerate(trainEndStr):
ax1.axvline(x=pd.to_datetime(trainEndString, dayfirst=True), color='black' if i % 2 == 0 else 'blue', label='start training' if i % 2 == 0 else 'end training')
ax1.plot(df_train.iloc[:,k], label="train",
marker="o", ms=1.5, lw=0)
ax1.plot(df_test.iloc[:,k], label="test",
marker="o", ms=1.5, lw=0)
ax1.set_xticks(ax1.get_xticks()[3::3])
ax1.set_ylabel(columnUnits[column] + ", standardized")
ax1.set_xlabel('Date')
sns.distplot(df_train.iloc[:,k], ax=ax2, label="train", kde=True, kde_kws={"lw":2.5})
sns.distplot(df_test.iloc[:,k], ax=ax2, label="test", kde=True, kde_kws={"lw":2.5})
ax2.set_xlim((-3,3))
ax2.legend(loc="upper right")
ax2.set_ylabel('Ratio')
ax2.set_xlabel(columnUnits[column] + ", standardized")
plt.show()
|
import tkinter as tk
win = tk.Tk()
win.title("C语言中文网")
win.geometry('400x350+200+200')
win.iconbitmap('C:/Users/Administrator/Desktop/C语言中文网logo.ico')
win.rowconfigure(1, weight=1)
win.columnconfigure(0, weight=1)
# 左侧的frame
frame_left = tk.LabelFrame(win, bg='red')
tk.Label(frame_left, text='左侧标签1', bg='green', width=10, height=5).grid(row=0, column=0)
tk.Label(frame_left, text='左侧标签2', bg='blue', width=10, height=5).grid(row=1, column=1)
frame_left.grid(row=0, column=0)
# 右侧的frame
frame_right = tk.LabelFrame(win, bg='yellow')
tk.Label(frame_right, text='右侧标签1', bg='gray', width=10, height=5).grid(row=0, column=1)
tk.Label(frame_right, text='右侧标签2', bg='pink', width=10, height=5).grid(row=1, column=0)
tk.Label(frame_right, text='右侧标签3', bg='purple', width=10, height=5).grid(row=1, column=1)
frame_right.grid(row=1, column=0)
frame_left.columnconfigure(2, weight=1)
frame_right.rowconfigure(0, weight=1)
frame_right.columnconfigure(0, weight=1)
win.mainloop()
|
#!/usr/bin/env python2.3
#
# Copyright (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
=====================================================================
"Adaptive Comms Components" - can add and remove inboxes and outboxes
=====================================================================
An AdaptiveCommsComponent is just like an ordinary component but with the
ability to create and destroy extra inboxes and outboxes whilst it is running.
* An AdaptiveCommsComponent is based on an Axon.Component.component
There are other variants on the basic component:
* Axon.ThreadedComponent.threadedcomponent
* Axon.ThreadedComponent.threadedadaptivecommscomponent
If your component needs to block - eg. wait on a system call; then make it a
'threaded' component. If it needs to change what inboxes or outboxes it has at
runtime, then make it an 'adaptive' component. Otherwise, simply make it an
ordinary component!
Adding and removing inboxes and outboxes
----------------------------------------
To add a new inbox or outbox call self.addInbox() or self.addOutbox() specifying
a base name for the inbox/outbox. The created inbox or outbox is immediately
ready to be used.::
actualInboxName = self.addInbox("inputData")
actualOutboxName = self.addOutbox("outputData")
You specify a name you would ideally like the inbox or outbox to be given. If
that name is already taken then a variant of it will be generated. Calls to
addInbox() and addOutbox() therefore return the actual name the inbox or outbox
was given. You should always use this returned name. It is unwise to assume your
ideal choice of name has been allocated!
To remove a box, call self.deleteInbox() or self.deleteOutbox() specifying the
name of the box to be deleted::
self.deleteInbox(actualInboxName)
self.deleteOutbox(actualOutboxName)
When deleting an inbox or outbox, try to make sure that any linkages involving
that inbox/outbox have been destroyed. This includes not only linkages created
by your component, but any created by other components too.
Tracking resources
------------------
adaptivecommscomponent also includes an ability to track associations between
resources and inboxes, outboxes and other information.
For example, you might want to associate another component (that your component
is interacting with) with the set of inboxes, outboxes and any other info that
are being used to communicate with it.
You can also associate particular inboxes or outboxes with those resources. This
therefore allows you to map both ways: "which resource relates to this inbox?"
and "which inboxes relate to this resource?"
For example, suppose a request leads to your component creating an inbox and
outbox to deal with another component. You might store these as a tracked
resource, along with other information, such as the 'other' component and any
state or linkages that were created; and associate this resource with the inbox
from which data might arrive::
def wireUpToOtherComponent(self, theComponent):
newIn = self.addInbox("commsIn")
newOut = self.addOutbox("commsOut")
newState = "WAITING"
inLinkage = self.link((theComponent,itsOutbox),(self,newIn))
outLinkage = self.link((theComponent,itsInbox), (self,newOut))
resource = theComponent
inboxes = [newIn]
outboxes = [newOut]
info = (newState, inLinkage, outLinkage)
self.trackResourceInformation(resource, inboxes, outboxes, info)
self.trackResource(resource, newIn)
If a message then arrives at that inbox, we can easily look up all the
information we might need know where it came from and how to handle it::
def handleMessageArrived(self, inboxName):
msg = self.recv(inboxName)
resource = self.retrieveResource(inboxName)
inboxes, outboxes, info = self.retrieveResourceInformation(resource)
theComponent=resource
...
When you are finished with a resource and its associated information you can
clean it up with the ceaseTrackingResource() method which removes the
association between the resource and information. For example when you get rid
of a set of linkages and inboxes or outboxes associated with another component
you might want to clean up the resource you were using to track this too::
def doneWithComponent(self, theComponent):
resource=theComponent
inboxes, outboxes, info = self.retrieveResourceInformation(resource)
for name in inboxes:
self.deleteInbox(name)
for name in outboxes:
self.deleteOutbox(name)
state,linkages = info[0], info[1:]
for linkage in linkages:
self.unlink(thelinkage=linkage)
self.ceaseTrackingResource(resource)
Implementation
--------------
AdaptiveCommsComponent's functionality above and beyond the ordinary
Axon.Component.component is implemented in a separate mixin class
_AdaptiveCommsable. This enables it to be reused for other variants on the
basic component that need to inherit this functionality - such as the
threadedadaptivecommscomponent.
When adding new inboxes or outboxes, name clashes are resolved by permuting the
box name with a suffixed unique ID number until there is no longer any clash.
"""
import sys
from Component import component
import idGen
from Box import makeInbox, makeOutbox
class _AdaptiveCommsable(object):
"""\
Mixin for making a component 'adaptable' so that it can create and destroy
extra inboxes and outboxes at runtime.
"""
#
# Public Methods
#
def __init__(self, *args, **argd):
super(_AdaptiveCommsable, self).__init__(*args, **argd)
self._resourceStore = {}
self._resourceLookup = {}
def trackResource(self, resource, inbox):
"""\
Associate the specified resource with the named inbox.
"""
self.inboxes[inbox] # Force failure if the inbox does not exist
self._resourceLookup[inbox] = resource
def retrieveTrackedResource(self, inbox):
"""\
Retrieve the resource that has been associated with the named inbox.
"""
return self._resourceLookup[inbox]
def trackResourceInformation(self, resource, inboxes, outboxes, information):
"""\
Store a list of inboxes, outboxes and other information as the specified
resource.
The inboxes and outboxes specified must exist.
"""
"Provides a lookup service associating inboxes/outboxes & user information with a resource. Uses GIGO principle."
#sys.stderr.write("OHHHH We're in HERE???!!\n"); sys.stderr.flush()
# print "TRACKING", inboxes, outboxes, information
# print "USING", repr(resource)
[ self.inboxes[x] for x in inboxes] # Force an assertion if any inbox does not exist
[ self.outboxes[x] for x in outboxes] # Force an assertion if any inbox does not exist
self._resourceStore[resource] = (inboxes, outboxes, information)
def ceaseTrackingResource(self, resource):
"""Stop tracking a resource and release references to it"""
# print "CEASING TO TRACK RESOURCE", repr(resource)
del self._resourceStore[resource]
def retrieveTrackedResourceInformation(self, resource):
"""\
Retrieve a tuple (inboxes, outboxes, otherdata) that has been stored as
the specified resource.
"""
# print self._resourceStore
return self._resourceStore[resource]
def addInbox(self,*args):
"""
Allocates a new inbox with name *based on* the name provided. If a box
with the suggested name already exists then a variant is used instead.
Returns the name of the inbox added.
"""
name = self._newInboxName(*args)
self.inboxes[name]=makeInbox(self.unpause)
return name
def deleteInbox(self,name):
"""\
Deletes the named inbox. Any messages in it are lost.
Try to ensure any linkages to involving this outbox have been destroyed -
not just ones created by this component, but by others too! Behaviour is
undefined if this is not the case, and should be avoided.
"""
del self.inboxes[name]
def addOutbox(self,*args):
"""\
Allocates a new outbox with name *based on* the name provided. If a box
with the suggested name already exists then a variant is used instead.
Returns the name of the outbox added.
"""
name = self._newOutboxName(*args)
self.outboxes[name]=makeOutbox(self.unpause)
return name
def deleteOutbox(self,name):
"""\
Deletes the named outbox.
Try to ensure any linkages to involving this outbox have been destroyed -
not just ones created by this component, but by others too! Behaviour is
undefined if this is not the case, and should be avoided.
"""
del self.outboxes[name]
#
# Private Methods
#
def _newInboxName(self, name="inbox"):
"""\
Allocates a new inbox with name *based on* the name provided.
If this name is available it will be returned unchanged.
Otherwise the name will be returned with a number appended
"""
while name in self.inboxes:
name =name+str(idGen.idGen().next())
return name
#
def _newOutboxName(self, name="outbox"):
"""\
Allocates a new outbox name *based on* the name provided.
If this name is available it will be returned unchanged.
Otherwise the name will be returned with a number appended
"""
while name in self.outboxes:
name =name+str(idGen.idGen().next())
return name
class AdaptiveCommsComponent(component, _AdaptiveCommsable):
"""\
Base class for a component that works just like an ordinary component but can
also 'adapt' its comms by adding or removing inboxes and outboxes whilst it
is running.
Subclass to make your own.
See Axon.AdaptiveCommsComponent._AdaptiveCommsable for the extra methods that
this subclass of component has.
"""
def __init__(self,*args, **argd):
component.__init__(self,*args, **argd)
_AdaptiveCommsable.__init__(self)
if __name__=="__main__":
print "Tests are separated into test/test_AdaptiveCommsableComponent.py"
|
#!/usr/bin/env python3
from sys import stderr, exit
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
from hanoi_lib import ConfigGenerator, HanoiTowerProblem
from utils_lang import get_formatted_move
# METADATA OF THIS TAL_SERVICE:
args_list = [
('v',str),
('start',str),
('final',str),
('n',int),
('answ',int),
('ok_if_congruent_modulus',int),
('silent',bool),
('feedback',str),
('with_certificate',bool),
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
# Get configurations
gen = ConfigGenerator()
start, final, error = gen.getConfigs(ENV['start'], ENV['final'], ENV['n'])
# Check errors
if error == 'n_not_valid':
TAc.print(LANG.render_feedback("n_not_valid", f"If you use the all_* form for start and final, you must use a N >= 0."), "red", ["bold"])
exit(0)
elif error == 'different_len':
TAc.print(LANG.render_feedback("different_len", f'If you use a custom configuration for start and final, the length of start must be equal to the length of final'), "red", ["bold"])
exit(0)
# Init Hanoi Tower
hanoi = HanoiTowerProblem(ENV['v'])
# Get the correct answer
modulus = ENV['ok_if_congruent_modulus']
user_answ = ENV['answ']
opt_answ = hanoi.getMinMoves(start, final)
if modulus != 0:
overflow = (opt_answ >= modulus)
mod_answ = opt_answ % modulus
# check the user answer
if modulus == 0 or not overflow: #case: not modulus or modulus irrilevant
if user_answ == opt_answ:
if not ENV['silent']:
TAc.print(LANG.render_feedback("answ-equal", f'Nice! Your answer is equal to the optimal minimum number.'), "green", ["bold"])
else:
TAc.print(LANG.render_feedback("answ-wrong", f'Oh no! Your answer is not equal to the optimal minimum number.'), "red", ["bold"])
# Provide feedback
if ENV["feedback"] == "true_val":
TAc.print(LANG.render_feedback("get-answ", f'The optimal minimum number of moves is {opt_answ}.'), "red", ["reverse"])
elif ENV["feedback"] == "smaller_or_bigger":
if opt_answ < user_answ:
TAc.print(LANG.render_feedback("answ-less", f'The optimal minimum number of moves is smaller then your answer.'), "red", ["reverse"])
else:
TAc.print(LANG.render_feedback("answ-more", f'The optimal minimum number of moves is bigger then your answer.'), "red", ["reverse"])
# Provide certificate
if ENV["with_certificate"] == 1:
if user_answ < opt_answ:
TAc.print(LANG.render_feedback("use-check_lower_bounds", f'Use check_lower_bounds service for check it.'), "red", ["reverse"])
else:
TAc.print(LANG.render_feedback("certificate", f'This is a certificate of a solution with less moves:'), "red", ["reverse"])
for e in hanoi.getNotOptimalMovesList(start, final, desired_size=(user_answ-1)):
TAc.print(LANG.render_feedback("certificate-line", f'{get_formatted_move(e, "extended", ENV["lang"])}'), "yellow", ["reverse"])
else: # case: modulus
if user_answ == mod_answ:
if not ENV['silent']:
TAc.print(LANG.render_feedback("answ-equal-mod", f'Oh no! Your answer is equal to the optimal minimum number in modulo={modulus}.'), "red", ["bold"])
else:
TAc.print(LANG.render_feedback("answ-wrong-mod", f'Oh no! Your answer is not equal to the optimal minimum number in modulo={modulus}.'), "red", ["reverse"])
# Provide feedback
if ENV["feedback"] == "true_val":
TAc.print(LANG.render_feedback("get-answ-mode", f'The optimal minimum number in modulo={modulus} of moves is {mod_answ} = {opt_answ} % {modulus}.'), "red", ["reverse"])
elif ENV["feedback"] == "smaller_or_bigger":
if mod_answ < user_answ:
TAc.print(LANG.render_feedback("answ-less-mod", f'The optimal minimum number in modulo={modulus} of moves is smaller then your answer.'), "red", ["reverse"])
else:
TAc.print(LANG.render_feedback("answ-more-mod", f'The optimal minimum number in modulo={modulus} of moves is bigger then your answer.'), "red", ["reverse"])
# Provide certificate
if ENV["with_certificate"] == 1:
if user_answ < opt_answ:
TAc.print(LANG.render_feedback("use-check_lower_bounds", f'Use check_lower_bounds service for check it.'), "red", ["reverse"])
else:
TAc.print(LANG.render_feedback("certificate", f'This is a certificate of a solution with less moves:'), "red", ["reverse"])
for e in hanoi.getNotOptimalMovesList(start, final, desired_size=len(user_answ) -1):
TAc.print(LANG.render_feedback("certificate-line", f'{get_formatted_move(e, "extended", ENV["lang"])}'), "yellow", ["reverse"])
exit(0)
|
r1 = float(input('Insira o comprimento da primeira reta:'))
r2 = float(input('Insira o comprimento da segunda reta:'))
r3 = float(input('Insira o coprimento da terceira reta: '))
if r1 + r2 > r3 and r2 + r3 > r1 and r1 + r3 > r2:
if r1 == r2 == r3:
print(f'O seu triângulo é Equilátero.')
elif r1 == r2 or r2 == r3 or r1 == r3:
print('Seu triangulo é Isósceles')
else:
print('Seu triângulo é escaleno.')
else:
print('Não é possível formar um triângulo com essas retas.')
|
from .page_data import PageData
|
# 4-4. One Million
for number in range(1, 1000001):
print(number)
|
import re
import sys
from collections import defaultdict
from itertools import chain, count
# see pyproject.toml
__version__ = "0.0.9"
__author__ = "Saito Tsutomu <tsutomu7@hotmail.co.jp>"
def addplus(s):
return s if s.startswith(("+", "-")) else "+" + s
def delplus(s):
return s[1:] if s.startswith("+") else s
def minus(s):
return s[1:] if s.startswith("-") else "-" + delplus(s)
def expr(lst):
s = re.sub("([+-])", "\\1 ", " ".join(lst)).strip("+ ")
if len(lst) == 1 and s.startswith("- "):
s = "-" + s[2:]
return "0" if s == "" else s
def trans(s):
if s in ("I", "-I"):
return s
return s[:-2] if s.endswith("^T") else s + "^T"
def dualvar(ss):
st = set(re.sub(r"[+-><=^]", " ", " ".join(ss)).split())
for v in chain(["x", "y", "z", "w"], ("v%d" % j for j in count())):
if v not in st:
yield v
def split_term(s, ismat=False):
dc = defaultdict(list)
ss = re.sub(r"^\+", "", re.sub(r"-\s*", "+-", s.strip())).split("+")
for t in ss:
tt = t.split()
if not (0 < len(tt) < 3):
raise Exception("Format error [%s]" % s)
c, v = (["I" if ismat else "e^T"] + tt)[-2:]
if v[0] == "-":
c, v = minus(c), minus(v)
if c[0] != "-":
c = "+" + c
dc[v].append(c)
return dc
def dual(mdl):
ss = [
s.split("#")[0].strip()
for s in mdl.strip().split("\n")
if s and not s.startswith("#")
]
if not ss:
raise Exception("Set mathematical optimization model")
if ss[0][:3] not in ("min", "max"):
raise Exception('Must start "min" or "max" [%s]' % ss[0])
is_min = ss[0][:3] == "min"
ds = split_term(ss[0][3:])
dc = defaultdict(lambda: "0^T")
for v, uu in ds.items():
if len(uu) != 1:
raise Exception("Format error [%s]" % ss[0])
dc[v] = uu[0]
di = defaultdict(lambda: "=")
cc = []
for s in ss[1:]:
m = re.fullmatch(r"(\S+)\s*([><])=\s*0", s)
if m:
di[m.group(1)] = "<=" if is_min == (m.group(2) == ">") else ">="
else:
cc.append(s)
db, dd, da = [], [], defaultdict(list)
for s, dv in zip(cc, dualvar(ss)):
m = re.fullmatch(r"([^<>=]+)(>|<|)=\s*(\S+)", s)
if not m:
raise Exception("Format error [%s]" % s)
t, f, b = m.groups()
if not b.startswith(("+", "-")):
b = "+" + b
tt = split_term(t, True)
if f:
if is_min != (f == ">"):
tt = {v: [minus(u) for u in uu] for v, uu in tt.items()}
b = minus(b)
dd.append("%s >= 0" % dv)
if b not in ("+0", "-0"):
db.append("%s %s" % (trans(b), dv))
for v, uu in tt.items():
da[v].append(addplus(expr(["%s %s" % (trans(u), dv) for u in uu])))
dr = [("max " if is_min else "min ") + expr(db)]
for v in sorted(da.keys()):
dr.append("%s %s %s" % (expr(da[v]), di[v], expr([trans(dc[v])])))
return "\n".join(dr + dd)
try:
import IPython.core.getipython
def dual_impl(_, s):
print(dual(s))
ip = IPython.core.getipython.get_ipython()
ip.register_magic_function(dual_impl, magic_kind="cell", magic_name="dual")
except:
pass
def main():
s = sys.stdin.read()
print(dual(s))
|
import json
import os
from catacomb.common import constants, errors
from catacomb.utils import helpers
def create(path, contents=None):
"""Creates a new file at the given path.
Arguments:
contents (str): The file contents.
"""
if os.path.exists(path):
helpers.exit(errors.FILE_CREATE_OVERWRITE.format(path))
if not contents:
# Allow the creation of an empty file.
contents = ""
with open(path, "w") as f:
f.write(json.dumps(contents, indent=constants.INDENT_NUM_SPACES))
def update(path, contents):
"""Updates the file at the given path with the provided contents.
Arguments:
contents (str): The file contents.
"""
if not os.path.exists(path):
helpers.exit(errors.FILE_UPDATE_UNKNOWN.format(path))
with open(path, "w") as f:
f.write(json.dumps(contents, indent=constants.INDENT_NUM_SPACES))
def read(path):
"""Read the file at the given path.
Arguments:
path (str): The path of the file to read.
Returns:
A `dict`, representing the contents of the file, if the file exists.
"""
if not os.path.exists(path):
helpers.exit(errors.FILE_READ_UNKNOWN.format(path))
with open(path, "r") as f:
contents = json.load(f)
return contents
|
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.addresses import IPAddr
rules = (
# ----------------------------------------
# --------DEFINE YOUR RULES BELOW---------
# -----------------------------------------
(None, None, None, 'tcp'),
(IPAddr('10.0.0.3'), None, None, None),
(None, IPAddr('10.0.0.4'), None, None),
(IPAddr('10.0.0.1'), IPAddr('10.0.0.3'), None, None),
(None, None, 3001, None),
(IPAddr('10.0.0.2'), None, 3000, None),
(IPAddr('10.0.0.1'), IPAddr('10.0.0.2'), 3000, None),
# ----------------------------------------
#
# ----------------------------------------
)
log = core.getLogger()
class Tutorial(object):
"""
A Tutorial object is created for each switch that connects.
A Connection object for that switch is passed to the __init__ function.
"""
def __init__(self, connection):
# Keep track of the connection to the switch so that we can
# send it messages!
self.connection = connection
# This binds our PacketIn event listener
connection.addListeners(self)
# Use this table to keep track of which ethernet address is on
# which switch port (keys are MACs, values are ports).
self.mac_to_port = {}
def resend_packet(self, packet_in, out_port):
"""
Instructs the switch to resend a packet that it had sent to us.
"packet_in" is the ofp_packet_in object the switch had sent to the
controller due to a table-miss.
"""
msg = of.ofp_packet_out()
msg.data = packet_in
# Add an action to send to the specified port
action = of.ofp_action_output(port=out_port)
msg.actions.append(action)
# Send message to switch
self.connection.send(msg)
def act_like_switch(self, packet, packet_in):
"""
Implement switch-like behavior.
"""
# Here's some psuedocode to start you off implementing a learning
# switch. You'll need to rewrite it as real Python code.
# Learn the port for the source MAC
self.mac_to_port[packet.src] = packet_in.in_port
# IP/ARP
# print(pkt.ETHERNET.ethernet.getNameForType(packet.type))
# self.mac_to_port[of.ofp_match()]
# if the port associated with the destination MAC of the packet is known:
if (packet.dst in self.mac_to_port.keys()) and self.mac_to_port[packet.dst]:
# Send packet out the associated port
# self.resend_packet(packet_in, self.mac_to_port[packet.dst])
# Once you have the above working, try pushing a flow entry
# instead of resending the packet (comment out the above and
# uncomment and complete the below.)
log.debug('Installing flow ' + str((packet.src, ((packet.dst), packet_in.in_port))))
# Maybe the log statement should have source/destination/port?
msg = of.ofp_flow_mod()
#
## Set fields to match received packet
msg.match = of.ofp_match.from_packet(packet)
#
# < Set other fields of flow_mod (timeouts? buffer_id?) >
msg.buffer_id = packet_in.buffer_id
msg.in_port = packet_in.in_port
msg.idle_timeout = 100
msg.data = packet_in
#
# < Add an output action, and send -- similar to resend_packet() >
msg.actions.append(of.ofp_action_output(port=self.mac_to_port[packet.dst]))
self.connection.send(msg)
else:
# Flood the packet out everything but the input port
# This part looks familiar, right?
self.resend_packet(packet_in, of.OFPP_ALL)
def _handle_PacketIn(self, event):
"""
Handles packet in messages from the switch.
"""
packet = event.parsed # This is the parsed packet data.
if not packet.parsed:
log.warning("Ignoring incomplete packet")
return
packet_in = event.ofp # The actual ofp_packet_in message.
# Comment out the following line and uncomment the one after
# when starting the exercise.
# self.act_like_hub(packet, packet_in)
self.act_like_switch(packet, packet_in)
def launch():
"""
Starts the component
"""
def start_switch(event):
log.debug("Controlling %s" % (event.connection,))
Tutorial(event.connection)
def start_firewall(event):
log.debug("firewall received packet")
ipp = event.parsed.find('ipv4')
if not ipp:
log.debug("not IP packet")
return
tcpp = event.parsed.find('tcp')
udpp = event.parsed.find('udp')
srcip = ipp.srcip
dstip = ipp.dstip
tp = tcpp if tcpp else udpp
prot = 'tcp' if tcpp else 'udp' if udpp else None
srcport = None
dstport = None
if prot:
srcport = tp.srcport
dstport = tp.dstport
for rule in rules:
if rule[0] and srcip != rule[0]:
continue
if rule[1] and dstip != rule[1]:
continue
if prot and rule[2] and rule[2] != srcport and rule[2] != dstport:
continue
if not prot or not rule[3] or prot == rule[3]:
print("Blocked: ", srcip, srcport, dstip, dstport, prot)
event.halt = True
return
core.openflow.addListenerByName("ConnectionUp", start_switch)
core.openflow.addListenerByName("PacketIn", start_firewall)
|
import argparse
import numpy as np
from decimal import Decimal
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--table', help='input table file')
parser.add_argument('-m', '--max_value', default=1, help='the max value in the dist matrix')
parser.add_argument('-o', '--out', help='output table file')
args = parser.parse_args()
fn_table = args.table
max_value = args.max_value
fn_out = args.out
fn = open(fn_table, 'r')
fo = open(fn_out, 'w')
for line in fn:
fields = line.split()
if len(fields) > 1:
fo.write(fields[0])
for ele in fields[1:-1]:
number = Decimal(max_value) - Decimal(float(ele))
fo.write('\t' + str(number))
fo.write('\n')
else:
fo.write(line)
fo.close()
fn.close()
|
import json
from typing import Optional
from great_expectations.core import IDDict
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.types.base import SerializableDotDict
class ExceptionInfo(SerializableDotDict):
def __init__(
self,
exception_traceback: str,
exception_message: str,
raised_exception: Optional[bool] = True,
):
super().__init__(
exception_traceback=exception_traceback,
exception_message=exception_message,
raised_exception=raised_exception,
)
def to_json_dict(self) -> dict:
fields_dict: dict = {
"exception_traceback": self.exception_traceback,
"exception_message": self.exception_message,
"raised_exception": self.raised_exception,
}
return convert_to_json_serializable(fields_dict)
@property
def exception_traceback(self) -> str:
return self["exception_traceback"]
@property
def exception_message(self) -> str:
return self["exception_message"]
@property
def raised_exception(self) -> bool:
return self["raised_exception"]
def __repr__(self) -> str:
fields_dict: dict = {
"exception_traceback": self.exception_traceback,
"exception_message": self.exception_message,
"raised_exception": self.raised_exception,
}
return str(fields_dict)
@property
def id(self) -> str:
return IDDict(self.to_json_dict()).to_id()
def __eq__(self, other):
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return NotImplemented
return self.id == other.id
def __ne__(self, other):
return not self.__eq__(other=other)
def __str__(self):
return json.dumps(self.to_json_dict(), indent=2)
def __hash__(self) -> int:
"""Overrides the default implementation"""
_result_hash: int = hash(self.id)
return _result_hash
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
def find_low_val_path(matrix,row,col):
if row == len(matrix) - 1 and col == 0:
return matrix[row][col]
if row < len(matrix) - 1 and col > 0:
return matrix[row][col] + min(find_low_val_path(matrix,row + 1,col),
find_low_val_path(matrix,row,col - 1))
elif row == len(matrix) - 1:
return matrix[row][col] + find_low_val_path(matrix,row,col - 1)
elif col == 0:
return matrix[row][col] + find_low_val_path(matrix,row + 1,col)
data = [
[1,2,3],
[4,5,6],
[7,8,9]
]
print(find_low_val_path(data,0,2))
|
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.db.models import Prefetch
from django.http import Http404, HttpResponseRedirect
from django.utils.translation import gettext_lazy as _
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import serializers
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound
from rest_framework.mixins import (CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin)
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.viewsets import (GenericViewSet, ModelViewSet,
ReadOnlyModelViewSet)
from rest_framework_extensions.mixins import NestedViewSetMixin
from rdmo.conditions.models import Condition
from rdmo.core.permissions import HasModelPermission, HasObjectPermission
from rdmo.core.utils import human2bytes, return_file_response
from rdmo.options.models import OptionSet
from rdmo.questions.models import Catalog, Question, QuestionSet
from .filters import SnapshotFilterBackend, ValueFilterBackend
from .models import (Continuation, Integration, Issue, Membership, Project,
Snapshot, Value)
from .serializers.v1 import (IntegrationSerializer, IssueSerializer,
MembershipSerializer,
ProjectIntegrationSerializer,
ProjectIssueSerializer,
ProjectMembershipSerializer,
ProjectMembershipUpdateSerializer,
ProjectSerializer, ProjectSnapshotSerializer,
ProjectValueSerializer, SnapshotSerializer,
ValueSerializer)
from .serializers.v1.overview import ProjectOverviewSerializer
from .serializers.v1.questionset import QuestionSetSerializer
from .utils import check_conditions
class ProjectViewSet(ModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ProjectSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'title',
'user',
'user__username',
'catalog',
'catalog__uri',
'catalog__key',
)
def get_queryset(self):
return Project.objects.filter_user(self.request.user)
@action(detail=True, permission_classes=(IsAuthenticated, ))
def overview(self, request, pk=None):
project = self.get_object()
project.catalog = Catalog.objects.prefetch_related(
'sections',
Prefetch('sections__questionsets', queryset=QuestionSet.objects.filter(questionset=None).prefetch_related(
'conditions',
'questions'
))
).get(id=project.catalog_id)
serializer = ProjectOverviewSerializer(project, context={'request': request})
return Response(serializer.data)
@action(detail=True, permission_classes=(HasModelPermission | HasObjectPermission, ))
def resolve(self, request, pk=None):
snapshot_id = request.GET.get('snapshot')
set_prefix = request.GET.get('set_prefix')
set_index = request.GET.get('set_index')
values = self.get_object().values.filter(snapshot_id=snapshot_id).select_related('attribute', 'option')
questionset_id = request.GET.get('questionset')
if questionset_id:
try:
questionset = QuestionSet.objects.get(id=questionset_id)
conditions = questionset.conditions.select_related('source', 'target_option')
if check_conditions(conditions, values, set_prefix, set_index):
return Response({'result': True})
except QuestionSet.DoesNotExist:
pass
question_id = request.GET.get('question')
if question_id:
try:
question = Question.objects.get(id=question_id)
conditions = question.conditions.select_related('source', 'target_option')
if check_conditions(conditions, values, set_prefix, set_index):
return Response({'result': True})
except Question.DoesNotExist:
pass
optionset_id = request.GET.get('optionset')
if optionset_id:
try:
optionset = OptionSet.objects.get(id=optionset_id)
conditions = optionset.conditions.select_related('source', 'target_option')
if check_conditions(conditions, values, set_prefix, set_index):
return Response({'result': True})
except OptionSet.DoesNotExist:
pass
condition_id = request.GET.get('condition')
if condition_id:
try:
condition = Condition.objects.select_related('source', 'target_option').get(id=condition_id)
if check_conditions([condition], values, set_prefix, set_index):
return Response({'result': True})
except Condition.DoesNotExist:
pass
return Response({'result': False})
@action(detail=True, permission_classes=(HasModelPermission | HasObjectPermission, ))
def options(self, request, pk=None):
project = self.get_object()
try:
try:
optionset_id = request.GET.get('optionset')
optionset = OptionSet.objects.get(pk=optionset_id)
except (ValueError, OptionSet.DoesNotExist):
raise NotFound()
# check if the optionset belongs to this catalog and if it has a provider
if Question.objects.filter_by_catalog(project.catalog).filter(optionsets=optionset) and \
optionset.provider is not None:
options = optionset.provider.get_options(project, search=request.GET.get('search'))
return Response(options)
except OptionSet.DoesNotExist:
pass
# if it didn't work return 404
raise NotFound()
@action(detail=True, permission_classes=(IsAuthenticated, ))
def progress(self, request, pk=None):
project = self.get_object()
return Response(project.progress)
def perform_create(self, serializer):
project = serializer.save(site=get_current_site(self.request))
# add current user as owner
membership = Membership(project=project, user=self.request.user, role='owner')
membership.save()
class ProjectNestedViewSetMixin(NestedViewSetMixin):
def initial(self, request, *args, **kwargs):
self.project = self.get_project_from_parent_viewset()
super().initial(request, *args, **kwargs)
def get_project_from_parent_viewset(self):
try:
return Project.objects.filter_user(self.request.user).get(pk=self.get_parents_query_dict().get('project'))
except Project.DoesNotExist:
raise Http404
def get_list_permission_object(self):
return self.project
def get_detail_permission_object(self, obj):
return self.project
def perform_create(self, serializer):
serializer.save(project=self.project)
class ProjectMembershipViewSet(ProjectNestedViewSetMixin, ModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
filter_backends = (DjangoFilterBackend, )
filterset_fields = (
'user',
'user__username',
'role'
)
def get_queryset(self):
try:
return Membership.objects.filter(project=self.project)
except AttributeError:
# this is needed for the swagger ui
return Membership.objects.none()
def get_serializer_class(self):
if self.action == 'update':
return ProjectMembershipUpdateSerializer
else:
return ProjectMembershipSerializer
class ProjectIntegrationViewSet(ProjectNestedViewSetMixin, ModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ProjectIntegrationSerializer
filter_backends = (DjangoFilterBackend, )
filterset_fields = (
'provider_key',
)
def get_queryset(self):
try:
return Integration.objects.filter(project=self.project)
except AttributeError:
# this is needed for the swagger ui
return Integration.objects.none()
class ProjectIssueViewSet(ProjectNestedViewSetMixin, ListModelMixin, RetrieveModelMixin,
UpdateModelMixin, GenericViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ProjectIssueSerializer
filter_backends = (DjangoFilterBackend, )
filterset_fields = (
'task',
'task__uri',
'status'
)
def get_queryset(self):
try:
return Issue.objects.filter(project=self.project).prefetch_related('resources')
except AttributeError:
# this is needed for the swagger ui
return Issue.objects.none()
class ProjectSnapshotViewSet(ProjectNestedViewSetMixin, CreateModelMixin, RetrieveModelMixin,
UpdateModelMixin, ListModelMixin, GenericViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ProjectSnapshotSerializer
def get_queryset(self):
try:
return self.project.snapshots.all()
except AttributeError:
# this is needed for the swagger ui
return Snapshot.objects.none()
class ProjectValueViewSet(ProjectNestedViewSetMixin, ModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ProjectValueSerializer
filter_backends = (ValueFilterBackend, DjangoFilterBackend)
filterset_fields = (
'attribute__path',
'option',
'option__path',
)
def get_queryset(self):
try:
return self.project.values.filter(snapshot=None)
except AttributeError:
# this is needed for the swagger ui
return Value.objects.none()
@action(detail=True, methods=['DELETE'],
permission_classes=(HasModelPermission | HasObjectPermission, ))
def set(self, request, parent_lookup_project, pk=None):
# delete all values for questions in questionset collections with the attribute
# for this value and the same set_prefix and set_index
value = self.get_object()
value.delete()
attributes = Question.objects.filter_by_catalog(self.project.catalog) \
.filter(questionset__is_collection=True, questionset__attribute=value.attribute) \
.values_list('attribute', flat=True)
values = self.get_queryset().filter(attribute__in=attributes, set_prefix=value.set_prefix, set_index=value.set_index)
values.delete()
return Response(status=204)
@action(detail=True, methods=['GET', 'POST'],
permission_classes=(HasModelPermission | HasObjectPermission, ))
def file(self, request, parent_lookup_project, pk=None):
value = self.get_object()
if request.method == 'POST':
value.file = request.FILES.get('file')
# check if the project is reached
if value.file and value.file.size + value.project.file_size > human2bytes(settings.PROJECT_FILE_QUOTA):
raise serializers.ValidationError({
'value': [_('You reached the file quota for this project.')]
})
value.save()
serializer = self.get_serializer(value)
return Response(serializer.data)
else:
if value.file:
return return_file_response(value.file.name, value.file_type)
# if it didn't work return 404
raise NotFound()
class ProjectQuestionSetViewSet(ProjectNestedViewSetMixin, RetrieveModelMixin, GenericViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = QuestionSetSerializer
def get_queryset(self):
try:
return QuestionSet.objects.order_by_catalog(self.project.catalog).select_related('section', 'section__catalog')
except AttributeError:
# this is needed for the swagger ui
return QuestionSet.objects.none()
def dispatch(self, *args, **kwargs):
response = super().dispatch(*args, **kwargs)
if response.status_code == 200 and kwargs.get('pk'):
try:
continuation = Continuation.objects.get(project=self.project, user=self.request.user)
except Continuation.DoesNotExist:
continuation = Continuation(project=self.project, user=self.request.user)
continuation.questionset_id = kwargs.get('pk')
continuation.save()
return response
def retrieve(self, request, *args, **kwargs):
questionset = self.get_object()
conditions = questionset.conditions.select_related('source', 'target_option')
values = self.project.values.filter(snapshot=None).select_related('attribute', 'option')
if check_conditions(conditions, values):
serializer = self.get_serializer(questionset)
return Response(serializer.data)
else:
if request.GET.get('back') == 'true' and questionset.prev is not None:
url = reverse('v1-projects:project-questionset-detail', args=[self.project.id, questionset.prev]) + '?back=true'
return HttpResponseRedirect(url, status=303)
elif questionset.next is not None:
url = reverse('v1-projects:project-questionset-detail', args=[self.project.id, questionset.next])
return HttpResponseRedirect(url, status=303)
else:
# indicate end of catalog
return Response(status=204)
@action(detail=False, url_path='continue', permission_classes=(HasModelPermission | HasObjectPermission, ))
def get_continue(self, request, pk=None, parent_lookup_project=None):
try:
continuation = Continuation.objects.get(project=self.project, user=self.request.user)
if continuation.questionset.section.catalog == self.project.catalog:
questionset = continuation.questionset
else:
questionset = self.get_queryset().first()
except Continuation.DoesNotExist:
questionset = self.get_queryset().first()
serializer = self.get_serializer(questionset)
return Response(serializer.data)
class MembershipViewSet(ReadOnlyModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = MembershipSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'user',
'user__username',
'role'
)
def get_queryset(self):
return Membership.objects.filter_user(self.request.user)
def get_detail_permission_object(self, obj):
return obj.project
class IntegrationViewSet(ReadOnlyModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = IntegrationSerializer
filter_backends = (DjangoFilterBackend, )
filterset_fields = (
'project',
'provider_key'
)
def get_queryset(self):
return Integration.objects.filter_user(self.request.user)
def get_detail_permission_object(self, obj):
return obj.project
class IssueViewSet(ReadOnlyModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = IssueSerializer
filter_backends = (DjangoFilterBackend, )
filterset_fields = (
'task',
'task__uri',
'status'
)
def get_queryset(self):
return Issue.objects.filter_user(self.request.user).prefetch_related('resources')
def get_detail_permission_object(self, obj):
return obj.project
class SnapshotViewSet(ReadOnlyModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = SnapshotSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'title',
'project'
)
def get_queryset(self):
return Snapshot.objects.filter_user(self.request.user)
def get_detail_permission_object(self, obj):
return obj.project
class ValueViewSet(ReadOnlyModelViewSet):
permission_classes = (HasModelPermission | HasObjectPermission, )
serializer_class = ValueSerializer
filter_backends = (SnapshotFilterBackend, DjangoFilterBackend)
filterset_fields = (
'project',
'attribute',
'attribute__path',
'option',
'option__path',
)
def get_queryset(self):
return Value.objects.filter_user(self.request.user)
def get_detail_permission_object(self, obj):
return obj.project
@action(detail=True, permission_classes=(HasModelPermission | HasObjectPermission, ))
def file(self, request, pk=None):
value = self.get_object()
if value.file:
return return_file_response(value.file.name, value.file_type)
# if it didn't work return 404
raise NotFound()
|
from reportlab.graphics import renderPM
from svglib.svglib import svg2rlg
from io import BytesIO
import regex
def convert_svg(file):
# Converts a SVG file to a png file
# Returns a python file object
draw = svg2rlg(file)
buff = BytesIO(draw.asString('png'))
return buff
def get_dimensions(file):
# Gets the x by y dimension size of a maze
with open(file, 'r') as f:
lines = f.readlines()
# maximum is guaranteed to be above or at 0
x_max = 0
x_count = 0
y_max = 0
y_count = 0
y_passed_max = False
for line in lines:
x = regex.search(r'x=\K"(.*?)"', line)
y = regex.search(r'y=\K"(.*?)"', line)
if y:
if int(y.group(1)) >= y_max:
y_max = int(y.group(1))
else:
y_passed_max = True
if not y_passed_max:
y_count += 1
if x:
if int(x.group(1)) > x_max:
x_max = int(x.group(1))
x_count += 1
height = y_count
# Count the last x that was missed
width = x_count + 1
return width, height
def maze_2d_converter(maze, width, height):
# Converts a 1d Maze to a 2d Maze
# Using information from the SVG
x_counter = 0
y_counter = 0
maze_2d = [[None for x in range(width)] for y in range(height)]
for cell in maze:
maze_2d[y_counter][x_counter] = cell
x_counter += 1
if x_counter >= width:
# TODO replace mutable
x_counter = 0
y_counter += 1
return maze_2d
def svg_to_array(file):
# Converts a premade Maze SVG File to
# an array
temp = []
svg_map = {'black': 'W', 'white': 'O', 'red': 'F',
'green': 'S', 'orange': 'P'}
with open(file, 'r') as f:
lines = f.readlines()
for line in lines:
color = regex.search(r'fill=\K"(.*?)"', line)
if color:
temp.append(svg_map.get(color.group(1), None))
w, h = get_dimensions(file)
maze = maze_2d_converter(temp, w, h)
return maze
if __name__ == '__main__':
svg_to_array('maze.svg')
|
import parsl
from parsl.dataflow.error import DependencyError
from concurrent.futures import Future
@parsl.python_app
def copy_app(v):
return v
def test_future_result_dependency():
plain_fut = Future()
parsl_fut = copy_app(plain_fut)
assert not parsl_fut.done()
message = "Test"
plain_fut.set_result(message)
assert parsl_fut.result() == message
def test_future_fail_dependency():
plain_fut = Future()
parsl_fut = copy_app(plain_fut)
assert not parsl_fut.done()
plain_fut.set_exception(ValueError("Plain failure"))
assert isinstance(parsl_fut.exception(), DependencyError)
|
from django.urls import path, include
from . import views
app_name = 'accounts'
urlpatterns = [
path('login/', views.login, name='login'),
path('logout/', views.logout, name='logout'),
path('signup/', views.signup, name='signup'),
path('skintype_test/', views.skin_type_test, name='skin_type_test'),
path('skintype_test/skintype/', views.skin_type, name='skin_type'),
path('skintype_test/skintype/<str:type>', views.skin_type_result, name='skin_type_result'),
path('profile/update/', views.update_profile, name='update_profile'),
path('profile/<str:username>/', views.profile_detail, name='profile_detail'),
path('settings/', views.settings, name='settings'),
path('settings/password/', views.password, name='password'),
]
|
"""This module contain functions for signup."""
from django.http import JsonResponse
from django.contrib.auth.models import User
from django.contrib.auth.hashers import make_password
from ..models import Profile
from .email_sender import send_message_with_url_for_registration
def signup_processing(request: object, signup_form: object) -> object:
"""Check user in database and create him after returned user."""
try:
user = User.objects.get(email=signup_form.cleaned_data["email"])
return JsonResponse({"confirmation": "user_found"})
except Exception:
pass
user = User(
username=signup_form.cleaned_data['email'],
email=signup_form.cleaned_data['email'],
password=make_password(signup_form.cleaned_data['password1'])
)
user.is_active = True
user.save()
profile = Profile(
user=user,
)
profile.save()
return send_message_with_url_for_registration(request, user)
|
from art import logo
alphabets = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p',
'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def caesar(input_text, shift_amount, encoding_type):
output = ""
if encoding_type == "decode":
shift_amount *= -1
elif encoding_type != "decode" and encoding_type !="encode":
print("You have entered an invalid type of encoding")
return
for char in input_text:
if char in alphabets:
position = alphabets.index(char)
new_position = position + shift_amount
output += alphabets[new_position]
else:
output += char
print(f"Here's the {encoding_type}d result: {output}")
print(logo)
finished = False
while not finished:
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
shift = shift % 26
caesar(input_text=text, shift_amount=shift, encoding_type=direction)
restart = input("Type 'yes' if you want to go again. Otherwise type 'no'.\n")
if restart == "no":
finished = True
print("Cya")
|
# Script Name : create_dir_if_not_there.py
# Author : Craig Richards
# Created : 09th January 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Checks to see if a directory exists in the users home directory, if not then create it
import os # Import the OS module
home = os.path.expanduser("~") # Set the variable home by expanding the users set home directory
print
home # Print the location
if not os.path.exists(home + '/testdir'): # Check to see if the directory exists
os.makedirs(home + '/testdir') # If not create the directory, inside their home directory
|
from Tkinter import *
import Image, ImageTk, ImageDraw, sys, math
import phm, time
class disp:
"""
Class for displaying items in a canvas using a global coordinate system.
"""
border = 2
pad = 4
bgcolor = '#dbdbdb'
bordcol = '#555555'
gridcol = '#e0e0e0'
gridcol2 ='#e0e0e0'
textcolor = 'blue'
aw = 30 # width to display axis labels
traces = []
xtext = []
ytext = []
markerval = []
markertext = None
def show_xy(self,event):
"""
Prints the XY coordinated of the current cursor position
"""
ix = self.canvas.canvasx(event.x) - self.border
iy = self.YLIM - self.canvas.canvasy(event.y) #- self.border
x = ix * self.xscale + self.xmin
y = iy * self.yscale + self.ymin
s = 'x = %5.3f\ny = %5.3f' % (x,y)
try:
self.canvas.delete(self.markertext)
except:
pass
self.markertext = self.canvas.create_text(self.border + 1,\
self.SCY-1, anchor = SW, justify = LEFT, text = s)
self.markerval = [x,y]
def __init__(self, parent, width=400., height=300.,color='ivory'):
self.parent = parent
self.SCX = width - self.aw - self.border - self.pad
self.SCY = height - self.aw - self.border - self.pad
self.XLIM = self.SCX + 2 * self.border
self.YLIM = self.SCY + 2 * self.border
f = Frame(parent, bg = self.bgcolor, borderwidth = self.pad)
f.pack()
self.yaxis = Canvas(f, width = self.aw, height = self.SCY, bg = self.bgcolor)
self.yaxis.pack(side = LEFT, anchor = N, pady = self.border)
f1 = Frame(f, bg = self.bgcolor)
f1.pack()
self.canvas = Canvas(f1, background=color, \
width = self.XLIM, height = self.YLIM, )
self.canvas.pack(side = TOP)
self.canvas.bind("<Button-1>", self.show_xy)
self.xaxis = Canvas(f1, width = self.SCX, height = self.aw, bg = self.bgcolor)
self.xaxis.pack(side = LEFT, anchor = N, padx = self.border)
b1 = (self.border - 1, self.border-1)
b2 = (self.XLIM - self.border + 1, self.YLIM - self.border + 2)
self.canvas.create_rectangle ([b1,b2], outline = self.bordcol)
self.canvas.pack()
self.setWorld(-0.5 * self.SCX, -0.5*self.SCY, 0.5 * self.SCX,\
0.5* self.SCY)
self.grid(10,100)
def mark_axes(self, xlab='milli seconds', ylab='Volts', numchans=1):
numchans = 1
for t in self.xtext: # display after dividing by scale factors
self.xaxis.delete(t)
for t in self.ytext:
self.yaxis.delete(t)
self.xtext = []
self.ytext = []
dx = float(self.SCX)/5
for x in range(0,6):
a = numchans * x *(self.xmax - self.xmin)/5 + self.xmin
s = '%4.1f'%(a)
adjust = 0
if x == 0: adjust = 6
if x == 5: adjust = -10
t = self.xaxis.create_text(int(x*dx)+adjust,1,text = s, anchor=N, \
fill = self.textcolor)
self.xtext.append(t)
self.xtext.append(self.xaxis.create_text(int(self.SCX/2) \
,self.aw,text = xlab, anchor=S, fill = self.textcolor))
dy = float(self.SCY)/5
for y in range(0,6):
a = y*(self.ymax - self.ymin)/5 # + self.ymin
if self.ymax > 99:
s = '%4.0f'%(self.ymax-a)
else:
s = '%4.1f'%(self.ymax-a)
adjust = 0
if y == 0: adjust = 6
if y == 5: adjust = -5
t = self.yaxis.create_text(self.aw, int(y*dy)+adjust, \
text = s,anchor = E,fill = self.textcolor)
self.ytext.append(t)
self.ytext.append(self.yaxis.create_text(0,self.SCY/2,\
text = ylab, anchor=W, fill = self.textcolor))
def setWorld(self, x1, y1, x2, y2):
#Calculate the scale factors to be used by functions drawPoint etc.
self.xmin = float(x1)
self.ymin = float(y1)
self.xmax = float(x2)
self.ymax = float(y2)
self.xscale = (self.xmax - self.xmin) / (self.SCX)
self.yscale = (self.ymax - self.ymin) / (self.SCY)
def w2s(self, p): # World to Screen conversion
ip = []
for xy in p:
ix = self.border + int( (float(xy[0]) - self.xmin) / self.xscale)
iy = self.border + int( (float(xy[1]) - self.ymin) / self.yscale)
iy = self.YLIM - iy
ip.append((ix,iy))
return ip
def box(self, x1, y1, x2, y2, col):
ip = self.w2s([(x1,y1),(x2,y2)])
self.canvas.create_rectangle(ip, outline=col)
def line(self, points, col, permanent = False, smooth = 1):
ip = self.w2s(points)
t = self.canvas.create_line(ip, fill=col, smooth = smooth)
if permanent == False:
self.traces.append(t)
def delete_lines(self):
for t in self.traces:
self.canvas.delete(t)
self.traces = []
def grid(self, major, minor):
dx = (self.xmax - self.xmin) / major
dy = (self.ymax - self.ymin) / major
x = self.xmin + dx
while x < self.xmax:
self.line([(x,self.ymin),(x,self.ymax)],self.gridcol, True)
x = x +dx
y = self.ymin + dy
while y < self.ymax:
self.line([(self.xmin,y),(self.xmax,y)],self.gridcol, True)
y = y +dy
dx = (self.xmax - self.xmin) / minor
dy = (self.ymax - self.ymin) / minor
x = self.xmin + dx
while x < self.xmax:
self.line([(x, 0.),(x, dy)],self.gridcol2, True)
x = x +dx
y = self.ymin + dy
while y < self.ymax:
self.line([(0., y),(dx,y)],self.gridcol2, True)
y = y +dy
|
# Pretty-printer utilities.
# Copyright (C) 2013-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb.printing
class BoundPrinter:
"""Adds size field to a _rawbound128 type."""
def __init__ (self, val):
self.val = val
def to_string (self):
upper = self.val["ubound"]
lower = self.val["lbound"]
size = (long) ((upper) - (lower))
if size > -1:
size = size + 1
result = '{lbound = %s, ubound = %s} : size %s' % (lower, upper, size)
return result
# There are two pattern matching used: first one is related to a library
# second is related to the type. Since we are displaying a register all
# libraries are accepted. Type to be processed is the same present
# in the xml file.
def build_pretty_printer ():
pp = gdb.printing.RegexpCollectionPrettyPrinter (".*")
pp.add_printer ('bound', '^__gdb_builtin_type_bound128', BoundPrinter)
return pp
gdb.printing.register_pretty_printer (gdb.current_objfile (),
build_pretty_printer ())
|
#!/usr/bin/python3
# File name: vista.py
# Version: 1.0.0
# Author: Joseph Adams
# Email: josephdadams@gmail.com
# Date created: 7/15/2020
# Date last modified: 4/19/2021
import sys
import json
import requests
try:
stdinput = sys.stdin.readline()
data = json.loads(stdinput)
ip = data['params']['ip']
midiport = data['params']['midiport']
cueList = data['params']['cuelist']
cue = data['params']['cue']
url = 'http://' + ip + ':4000/sendmidi'
jsonData = {
"midiport": midiport,
"midicommand":"msc",
"deviceid": "0",
"commandformat": "lighting.general",
"command": "go",
"cue": cue,
"cuelist": cueList,
"cuepath": ""
}
r = requests.post(url = url, json = jsonData)
print('{ "complete": 1 }')
except:
print('{ "complete": 1, "code": 999, "description": "Failed to execute." }')
|
#! /usr/bin/python
import hacking
if __name__ == '__main__':
hacking.reexec_if_needed('spartan6.py')
from myhdl import Signal, SignalType, ResetSignal, instance, always_comb, intbv, always, always_seq
use_xilinx = 1
_one = '1\'b1'
_zero = '1\'b0'
def make_params(**kwargs):
params = []
for k, v in kwargs.items():
if isinstance(v, basestring):
v = '"' + v + '"'
params.append('.%s(%s)' % (k.upper(), v))
params = ','.join(params)
return params
def make_comments(**kwargs):
params = []
for k, v in kwargs.items():
if isinstance(v, basestring):
v = '"' + v + '"'
params.append('(*%s = %s*)' % (k.upper(), v))
params = '\n'.join(params)
return params
def startup_spartan6(name, cfgclk = None, cfgmclk = None):
if use_xilinx:
if cfgclk is None:
cfgclk = ''
else:
cfgclk.driven = 'wire'
if cfgmclk is None:
cfmgclk = ''
else:
cfgmclk.driven = 'wire'
lowTime = 100
highTime = 100
@instance
def cfgclkGen():
while True:
yield delay(lowTime)
cfgclk.next = 1
yield delay(highTime)
cfgclk.next = 0
@instance
def cfgmclkGen():
while True:
yield delay(lowTime)
cfgmclk.next = 1
yield delay(highTime)
cfgmclk.next = 0
return cfgclkGen, cfgmclkGen
if use_xilinx:
startup_spartan6.verilog_code = r'''
STARTUP_SPARTAN6 $name (
.CFGCLK($cfgclk),
.CFGMCLK($cfgmclk),
.EOS(),
.CLK(),
.GSR(),
.GTS(),
.KEYCLEARB()
);
'''.strip()
def bufg(name, i, o):
o.driven = 'wire'
@always_comb
def comb():
o.next = i
return comb
bufg.verilog_code = r'''
BUFG $name (
.I ($i),
.O ($o)
);
'''.strip()
def bufgce(name, i, o, ce):
i.read = True
ce.read = True
o.driven = 'wire'
@always_comb
def comb():
if ce:
o.next = i
else:
o.next = 0
return comb
bufgce.verilog_code = r'''
BUFGCE $name (
.I ($i),
.O ($o),
.CE ($ce)
);
'''.strip()
def ibufg(name, i, o):
o.driven = 'wire'
@always_comb
def comb():
o.next = i
return comb
if use_xilinx:
ibufg.verilog_code = r'''
IBUFG $name (
.I ($i),
.O ($o)
);
'''.strip()
def ibufds(name, i, ib, o):
print type(i), i, type(ib), ib, type(o), o
o.driven = 'wire'
@always_comb
def comb():
o.next = i
return comb
if use_xilinx:
ibufds.verilog_code = r'''
IBUFDS $name (
.I ($i),
.IB ($ib),
.O ($o)
);
'''.strip()
def ibufds_vec(name, i, ib, o):
print type(i), i, type(ib), ib, type(o), o
o.driven = 'wire'
@always_comb
def comb():
o.next = i
assert len(i) == len(ib) == len(o)
ii = name + '_ii'
iname = name + '_block'
n = len(i)
return comb
ibufds_vec.verilog_code = r'''
genvar $ii;
generate
for ($ii = 0; $ii < $n; $ii = $ii + 1) begin : $iname
IBUFDS $name (
.I ($i[$ii]),
.IB ($ib[$ii]),
.O ($o[$ii])
);
end
endgenerate
'''.strip()
def ibufgds(name, i, ib, o):
o.driven = 'wire'
@always_comb
def comb():
o.next = i
return comb
if use_xilinx:
ibufgds.verilog_code = r'''
IBUFGDS $name (
.I ($i),
.IB ($ib),
.O ($o)
);
'''.strip()
def ibufgds_diff_out(name, i, ib, o, ob):
o.driven = 'wire'
ob.driven = 'wire'
@always_comb
def comb():
o.next = i
ob.next = ib
return comb
if use_xilinx:
ibufgds_diff_out.verilog_code = r'''
IBUFGDS_DIFF_OUT $name (
.I ($i),
.IB ($ib),
.O ($o),
.OB ($ob)
);
'''.strip()
def iobuf(name, i, o, t, io):
if isinstance(i, SignalType):
assert len(i) == len(io)
i.read = True
if isinstance(o, SignalType):
assert len(o) == len(io)
o.driven = 'wire'
if isinstance(t, SignalType):
assert len(t) == len(io)
t.read = True
else:
t = intbv(~0)[len(io):]
io.read = True
io.driven = 'wire'
ii = name + '_ii'
iname = name + '_block'
n = len(io)
@always_comb
def i_comb():
o.next[ii] = io
@always_comb
def o_comb():
for ii in range(len(io)):
if t[ii]:
io.next[ii] = i[ii]
else:
io.next[ii] = 0
return i_comb, o_comb
iobuf.verilog_code = r'''
genvar $ii;
generate
for ($ii = 0; $ii < $n; $ii = $ii + 1) begin : $iname
IOBUF $name (
.I ($i[$ii]),
.O ($o[$ii]),
.T ($t[$ii]),
.IO ($io[$ii])
);
end
endgenerate
'''.strip()
def iobuf_oe(name, i, o, oe, io):
if isinstance(i, SignalType):
assert len(i) == len(io)
i.read = True
if isinstance(o, SignalType):
assert len(o) == len(io)
o.driven = 'wire'
if isinstance(oe, SignalType):
assert len(oe) == 1
oe.read = True
else:
oe = 0
io.read = True
io.driven = 'wire'
ii = name + '_ii'
iname = name + '_block'
n = len(io)
@always_comb
def i_comb():
o.next[ii] = io
@always_comb
def o_comb():
for ii in range(len(io)):
if t[ii]:
io.next[ii] = i[ii]
else:
io.next[ii] = 0
return i_comb, o_comb
iobuf_oe.verilog_code = r'''
genvar $ii;
generate
for ($ii = 0; $ii < $n; $ii = $ii + 1) begin : $iname
IOBUF $name (
.I ($i[$ii]),
.O ($o[$ii]),
.T (~$oe),
.IO ($io[$ii])
);
end
endgenerate
'''.strip()
def iddr2(name, d, q0, q1, c0, c1 = None, ce = _one, r = _zero, s = _zero,
ddr_alignment = 'NONE',
init_q0 = _zero, init_q1 = _zero,
srtype = 'SYNC'):
insts = []
print "IDDR2 c0", c0
print "IDDR2 c1", c1
if c1 is None:
c1 = Signal(False)
@always_comb
def c1_comb():
c1.next = not c0
insts.append(c1_comb)
print "IDDR2 fake c1", c1
insts.append(iddr2_int(name, d, q0, q1, c0, c1, ce, r, s,
ddr_alignment, init_q0, init_q1, srtype))
return insts
def iddr2_int(name, d, q0, q1, c0, c1, ce = _one, r = _zero, s = _zero,
ddr_alignment = 'NONE',
init_q0 = _zero, init_q1 = _zero,
srtype = 'SYNC'):
d.read = True
c0.read = True
c1.read = True
if isinstance(ce, SignalType):
ce.read = True
if isinstance(r, SignalType):
r.read = True
if isinstance(s, SignalType):
s.read = True
q0.driven = 'wire'
q1.driven = 'wire'
assert len(d) == len(q0) == len(q1)
ii = name + '_ii'
iname = name + '_block'
n = len(d)
# Silly stuff to convince MyHDL that the signal is used
@always_comb
def comb():
q0.next = d and c0 and ce and r and s
q1.next = d and c1 and ce and r and s
return comb
iddr2_int.verilog_code = r'''
genvar $ii;
generate
for ($ii = 0; $ii < $n; $ii = $ii + 1) begin : $iname
IDDR2 #(
.DDR_ALIGNMENT("$ddr_alignment"),
.INIT_Q0($init_q0),
.INIT_Q1($init_q1),
.SRTYPE("$srtype")
) $name (
.D ($d[$ii]),
.Q0 ($q0[$ii]),
.Q1 ($q1[$ii]),
.C0 ($c0),
.C1 ($c1),
.CE ($ce),
.R ($r),
.S ($s)
);
end
endgenerate
'''.strip()
def oddr2(name, d0, d1, q, c0, c1 = None, ce = _one, r = _zero, s = _zero,
ddr_alignment = 'NONE',
init = 0,
srtype = 'SYNC'):
insts = []
if c1 is None:
c1 = Signal(False)
@always_comb
def c1_comb():
c1.next = not c0
insts.append(c1_comb)
d0.read = True
d1.read = True
c0.read = True
c1.read = True
q.driven = 'wire'
insts.append(oddr2_int(name, d0, d1, q, c0, c1, ce, r, s,
ddr_alignment, init, srtype))
return insts
def oddr2_int(name, d0, d1, q, c0, c1, ce = _one, r = _zero, s = _zero,
ddr_alignment = 'NONE',
init = 0,
srtype = 'SYNC'):
d0.read = True
d1.read = True
c0.read = True
c1.read = True
if isinstance(ce, SignalType):
ce.read = True
if isinstance(r, SignalType):
r.read = True
if isinstance(s, SignalType):
s.read = True
q.driven = 'wire'
assert len(d0) == len(d1) == len(q)
ii = name + '_ii'
iname = name + '_block'
n = len(q)
@always_comb
def comb():
q.next = d0 or d1
return comb
oddr2_int.verilog_code = r'''
genvar $ii;
generate
for ($ii = 0; $ii < $n; $ii = $ii + 1) begin : $iname
ODDR2 #(
.DDR_ALIGNMENT("$ddr_alignment"),
.INIT($init),
.SRTYPE("$srtype")
) $name (
.D0 ($d0[$ii]),
.D1 ($d1[$ii]),
.Q ($q[$ii]),
.C0 ($c0),
.C1 ($c1),
.CE ($ce),
.R ($r),
.S ($s)
);
end
endgenerate
'''.strip()
def iodelay2_se(
name,
busy = '',
dataout = '',
dataout2 = '',
dout = '',
tout = '',
cal = '',
ce = '',
clk = '',
idatain = '',
inc = '',
ioclk = '',
odatain = '',
rst = '',
t = '',
counter_wraparound = 'WRAPAROUND',
data_rate = 'SDR',
delay_src = 'IDATAIN',
idelay_type = 'DEFAULT',
idelay_value = 0,
idelay2_value = 0,
odelay_value = 0,
serdes_mode = 'NONE'):
odatain.read = True
idatain.read = True
t.read = True
dataout.driven = 'wire'
dout.driven = 'wire'
tout.driven = 'wire'
ii = name + '_ii'
iname = name + '_block'
n = len(dout)
@always_comb
def comb():
q.next = d0
return comb
iodelay2_se.verilog_code = r'''
genvar $ii;
generate
for ($ii = 0; $ii < $n; $ii = $ii + 1) begin : $iname
IODELAY2 #(
.COUNTER_WRAPAROUND("$counter_wraparound"),
.DATA_RATE("$data_rate"),
.DELAY_SRC("$delay_src"),
.IDELAY2_VALUE($idelay2_value),
.IDELAY_TYPE("$idelay_type"),
.IDELAY_VALUE($idelay_value),
.ODELAY_VALUE($odelay_value),
.SERDES_MODE("$serdes_mode")
)
IODELAY2_inst (
.BUSY($busy),
.DATAOUT($dataout[$ii]),
.DATAOUT2($dataout2),
.DOUT($dout[$ii]),
.TOUT($tout[$ii]),
.CAL($cal),
.CE($ce),
.CLK($clk),
.IDATAIN($idatain[$ii]),
.INC($inc),
.IOCLK0($ioclk),
.IOCLK1(~$ioclk),
.ODATAIN($odatain[$ii]),
.RST($rst),
.T($t[$ii])
);
end
endgenerate
'''.strip()
def iodelay2_fixed(
name,
busy = '',
dataout = '',
dataout2 = '',
dout = '',
tout = '',
idatain = '',
odatain = '',
rst = '',
cal = '',
t = '',
counter_wraparound = 'WRAPAROUND',
data_rate = 'SDR',
delay_src = 'IDATAIN',
idelay_value = 0,
idelay2_value = 0,
odelay_value = 0,
serdes_mode = 'NONE'):
idelay_type = 'FIXED'
odatain.read = True
idatain.read = True
t.read = True
dataout.driven = 'wire'
dout.driven = 'wire'
tout.driven = 'wire'
ii = name + '_ii'
iname = name + '_block'
n = len(dout)
@always_comb
def comb():
q.next = d0
return comb
iodelay2_fixed.verilog_code = r'''
genvar $ii;
generate
for ($ii = 0; $ii < $n; $ii = $ii + 1) begin : $iname
IODELAY2 #(
.COUNTER_WRAPAROUND("$counter_wraparound"),
.DATA_RATE("$data_rate"),
.DELAY_SRC("$delay_src"),
.IDELAY2_VALUE($idelay2_value),
.IDELAY_TYPE("$idelay_type"),
.IDELAY_VALUE($idelay_value),
.ODELAY_VALUE($odelay_value),
.SERDES_MODE("$serdes_mode")
)
IODELAY2_inst (
.BUSY($busy),
.DATAOUT($dataout[$ii]),
.DATAOUT2($dataout2),
.DOUT($dout[$ii]),
.TOUT($tout[$ii]),
.IDATAIN($idatain[$ii]),
.ODATAIN($odatain[$ii]),
.RST($rst),
.CAL($cal),
.T($t[$ii])
);
end
endgenerate
'''.strip()
def iobuf_ddr2(name, i0, i1, ic0, ic1, o0, o1, oe0, oe1, oc0, oc1, io,
ddr_alignment = 'NONE',
srtype = 'SYNC'):
i = Signal(intbv(0)[len(io):])
o = Signal(intbv(0)[len(io):])
t = Signal(intbv(0)[len(io):])
t0 = Signal(intbv(0)[len(io):])
t1 = Signal(intbv(0)[len(io):])
insts = []
iobuf_inst = iobuf(name + '_iobuf', i, o, t, io)
insts.append(iobuf_inst)
iddr2_inst = iddr2(name + '_iddr2', o, i0, i1, ic0, ic1,
ddr_alignment = ddr_alignment,
srtype = srtype)
insts.append(iddr2_inst)
oddr2_inst = oddr2(name + '_oddr2', o0, o1, i, oc0, oc1,
ddr_alignment = ddr_alignment,
srtype = srtype)
insts.append(oddr2_inst)
tddr2_inst = oddr2(name + '_tddr2', t0, t1, t, oc0, oc1,
ddr_alignment = ddr_alignment,
srtype = srtype)
insts.append(tddr2_inst)
@always_comb
def comb():
if oe0:
t0.next = 0
else:
t0.next = (1<<len(t0))-1
if oe1:
t1.next = 0
else:
t1.next = (1<<len(t1))-1
insts.append(comb)
return insts
def iobuf_ddr2_se(name, i0, i1, o0, o1, oe0, oe1, io, c,
ddr_alignment = 'NONE',
srtype = 'SYNC'):
i = Signal(intbv(0)[len(io):])
o = Signal(intbv(0)[len(io):])
t = Signal(intbv(0)[len(io):])
t0 = Signal(intbv(0)[len(io):])
t1 = Signal(intbv(0)[len(io):])
insts = []
iobuf_inst = iobuf(name + '_iobuf', i, o, t, io)
insts.append(iobuf_inst)
iddr2_inst = iddr2_se(name + '_iddr2', o, i0, i1, c,
ddr_alignment = ddr_alignment,
srtype = srtype)
insts.append(iddr2_inst)
oddr2_inst = oddr2_se(name + '_oddr2', o0, o1, i, c,
ddr_alignment = ddr_alignment,
srtype = srtype)
insts.append(oddr2_inst)
tddr2_inst = oddr2_se(name + '_tddr2', t0, t1, t, c,
ddr_alignment = ddr_alignment,
srtype = srtype)
insts.append(tddr2_inst)
@always_comb
def comb():
if oe0:
t0.next = 0
else:
t0.next = (1<<len(t0))-1
if oe1:
t1.next = 0
else:
t1.next = (1<<len(t1))-1
insts.append(comb)
return insts
def iobuf_delay_ddr2_fixed(name, i0, i1, o0, o1, oe0, oe1, io, clk, clk_b = None,
ddr_alignment = 'NONE',
srtype = 'SYNC',
idelay_value = 0,
odelay_value = 0):
insts = []
i0.driven = 'wire'
i1.driven = 'wire'
o0.read = True
o1.read = True
oe0.read = True
oe1.read = True
io.driven = 'wire'
clk.read = True
if clk_b is not None:
clk_b.read = True
i = Signal(intbv(0)[len(io):])
o = Signal(intbv(0)[len(io):])
t = Signal(intbv(0)[len(io):])
iobuf_inst = iobuf(name + '_iobuf', i, o, t, io)
insts.append(iobuf_inst)
i2 = Signal(intbv(0)[len(io):])
o2 = Signal(intbv(0)[len(io):])
t2 = Signal(intbv(0)[len(io):])
iodelay_inst = iodelay2_fixed(name + '_iodelay',
dout = i, odatain = i2,
dataout = o2, idatain = o,
t = t2, tout = t,
data_rate = 'DDR',
idelay_value = idelay_value,
odelay_value = odelay_value,
delay_src = 'IO',
)
insts.append(iodelay_inst)
iddr2_inst = iddr2(name + '_iddr2', o2, i0, i1, clk, clk_b,
ddr_alignment = ddr_alignment,
srtype = srtype)
insts.append(iddr2_inst)
oddr2_inst = oddr2(name + '_oddr2', o0, o1, i2, clk, clk_b,
ddr_alignment = ddr_alignment,
srtype = srtype)
insts.append(oddr2_inst)
t0 = Signal(intbv(0)[len(io):])
t1 = Signal(intbv(0)[len(io):])
tddr2_inst = oddr2(name + '_tddr2', t0, t1, t2, clk, clk_b,
ddr_alignment = ddr_alignment,
srtype = srtype)
insts.append(tddr2_inst)
@always_comb
def comb():
if oe0:
t0.next = 0
else:
t0.next = (1<<len(t0))-1
if oe1:
t1.next = 0
else:
t1.next = (1<<len(t1))-1
insts.append(comb)
return insts
def pll_adv(
name,
rst = None,
clkinsel = 1, # default to clkin1
clkin1 = 0,
clkin2 = 0,
clkfbin = 0,
clkfbdcm = '',
clkfbout = '',
clkoutdcm0 = '',
clkoutdcm1 = '',
clkoutdcm2 = '',
clkoutdcm3 = '',
clkoutdcm4 = '',
clkoutdcm5 = '',
clkout0 = '',
clkout1 = '',
clkout2 = '',
clkout3 = '',
clkout4 = '',
clkout5 = '',
locked = '',
BANDWIDTH = "OPTIMIZED",
CLKIN1_PERIOD = 1000, # (ps)
CLKIN2_PERIOD = 1000, # (ps)
DIVCLK_DIVIDE = 1,
CLKFBOUT_MULT = 1,
CLKOUT0_DIVIDE = 1,
CLKOUT1_DIVIDE = 1,
CLKOUT2_DIVIDE = 1,
CLKOUT3_DIVIDE = 1,
CLKOUT4_DIVIDE = 1,
CLKOUT5_DIVIDE = 1,
CLKOUT0_PHASE = 0.000,
CLKOUT1_PHASE = 0.000,
CLKOUT2_PHASE = 0.000,
CLKOUT3_PHASE = 0.000,
CLKOUT4_PHASE = 0.000,
CLKOUT5_PHASE = 0.000,
CLKOUT0_DUTY_CYCLE = 0.500,
CLKOUT1_DUTY_CYCLE = 0.500,
CLKOUT2_DUTY_CYCLE = 0.500,
CLKOUT3_DUTY_CYCLE = 0.500,
CLKOUT4_DUTY_CYCLE = 0.500,
CLKOUT5_DUTY_CYCLE = 0.500,
SIM_DEVICE = "SPARTAN6",
COMPENSATION = "INTERNAL",
CLKFBOUT_PHASE = 0.000,
REF_JITTER = 0.005
):
insts = []
for s in [ rst, clkinsel, clkin1, clkin2, clkfbin ]:
if isinstance(s, SignalType):
s.read = True
for s in [ clkfbdcm, clkfbout,
clkoutdcm0, clkoutdcm1, clkoutdcm2, clkoutdcm3, clkoutdcm4, clkoutdcm5,
clkout0, clkout1, clkout2, clkout3, clkout4, clkout5,
locked ]:
if isinstance(s, SignalType):
s.driven = 'wire'
@always_comb
def inst():
s.next = clkin1 or clkfbin
insts.append(inst)
rst_inv = ''
if rst is None:
rst = 0
elif isinstance(rst, ResetSignal):
if not rst.active:
rst_inv = '!'
return insts
pll_adv.verilog_code = '''
PLL_ADV #(
.BANDWIDTH ("$BANDWIDTH"),
.CLKIN1_PERIOD ($CLKIN1_PERIOD),
.CLKIN2_PERIOD ($CLKIN2_PERIOD),
.DIVCLK_DIVIDE ($DIVCLK_DIVIDE),
.CLKFBOUT_MULT ($CLKFBOUT_MULT),
.CLKFBOUT_PHASE ($CLKFBOUT_PHASE),
.CLKOUT0_DIVIDE ($CLKOUT0_DIVIDE),
.CLKOUT1_DIVIDE ($CLKOUT1_DIVIDE),
.CLKOUT2_DIVIDE ($CLKOUT2_DIVIDE),
.CLKOUT3_DIVIDE ($CLKOUT3_DIVIDE),
.CLKOUT4_DIVIDE ($CLKOUT4_DIVIDE),
.CLKOUT5_DIVIDE ($CLKOUT5_DIVIDE),
.CLKOUT0_PHASE ($CLKOUT0_PHASE),
.CLKOUT1_PHASE ($CLKOUT1_PHASE),
.CLKOUT2_PHASE ($CLKOUT2_PHASE),
.CLKOUT3_PHASE ($CLKOUT3_PHASE),
.CLKOUT4_PHASE ($CLKOUT4_PHASE),
.CLKOUT5_PHASE ($CLKOUT5_PHASE),
.CLKOUT0_DUTY_CYCLE ($CLKOUT0_DUTY_CYCLE),
.CLKOUT1_DUTY_CYCLE ($CLKOUT1_DUTY_CYCLE),
.CLKOUT2_DUTY_CYCLE ($CLKOUT2_DUTY_CYCLE),
.CLKOUT3_DUTY_CYCLE ($CLKOUT3_DUTY_CYCLE),
.CLKOUT4_DUTY_CYCLE ($CLKOUT4_DUTY_CYCLE),
.CLKOUT5_DUTY_CYCLE ($CLKOUT5_DUTY_CYCLE),
.SIM_DEVICE ("SPARTAN6"),
.COMPENSATION ("INTERNAL"),
.REF_JITTER ($REF_JITTER)
)
$name
(
.RST ($rst_inv$rst),
.CLKFBIN ($clkfbin),
.CLKINSEL ($clkinsel),
.CLKIN1 ($clkin1),
.CLKIN2 ($clkin2),
.CLKFBDCM ($clkfbdcm),
.CLKFBOUT ($clkfbout),
.CLKOUTDCM0 ($clkoutdcm0),
.CLKOUTDCM1 ($clkoutdcm1),
.CLKOUTDCM2 ($clkoutdcm2),
.CLKOUTDCM3 ($clkoutdcm3),
.CLKOUTDCM4 ($clkoutdcm4),
.CLKOUTDCM5 ($clkoutdcm5),
.CLKOUT0 ($clkout0),
.CLKOUT1 ($clkout1),
.CLKOUT2 ($clkout2),
.CLKOUT3 ($clkout3),
.CLKOUT4 ($clkout4),
.CLKOUT5 ($clkout5),
.REL (1'b0),
.LOCKED ($locked),
.DADDR (5'b0),
.DCLK (1'b0),
.DEN (1'b0),
.DI (16'b0),
.DWE (1'b0),
.DO (),
.DRDY ()
);
'''
def bufpll_mcb(
name,
gclk,
pllin0,
pllin1,
locked,
ioclk0,
ioclk1,
serdesstrobe0,
serdesstrobe1,
lock,
):
gclk.read = True
pllin0.read = True
pllin1.read = True
lock.read = True
ioclk0.driven = 'wire'
ioclk1.driven = 'wire'
serdesstrobe0.driven = 'wire'
serdesstrobe1.driven = 'wire'
lock.driven = 'wire'
@always_comb
def comb():
serdesstrobe0.next = pllin0
serdesstrobe1.next = pllin1
return comb
bufpll_mcb.verilog_code = '''
BUFPLL_MCB $name
(
.GCLK ($gclk),
.PLLIN0 ($pllin0),
.PLLIN1 ($pllin1),
.LOCKED ($locked),
.IOCLK0 ($ioclk0),
.IOCLK1 ($ioclk1),
.SERDESSTROBE0 ($serdesstrobe0),
.SERDESSTROBE1 ($serdesstrobe1),
.LOCK ($lock)
);
'''
def mcb_ui_top(
name,
mcbx_dram_clk,
mcbx_dram_clk_n,
mcbx_dram_cke,
mcbx_dram_ras_n,
mcbx_dram_cas_n,
mcbx_dram_we_n,
mcbx_dram_ba,
mcbx_dram_addr,
mcbx_dram_dqs,
mcbx_dram_dqs_n,
mcbx_dram_udqs,
mcbx_dram_udqs_n,
mcbx_dram_ldm,
mcbx_dram_udm,
mcbx_dram_dq,
mcbx_dram_odt,
mcbx_dram_ddr3_rst,
mcbx_rzq,
mcbx_zio,
sys_rst,
ui_clk,
sysclk_2x,
sysclk_2x_180,
pll_ce_0,
pll_ce_90,
pll_lock,
sysclk_2x_bufpll_o = '',
sysclk_2x_180_bufpll_o = '',
pll_ce_0_bufpll_o = '',
pll_ce_90_bufpll_o = '',
pll_lock_bufpll_o = '',
p0_arb_en = 1,
p0_cmd_clk = 0,
p0_cmd_en = 0,
p0_cmd_instr = 0,
p0_cmd_bl = 0,
p0_cmd_byte_addr = 0,
p0_cmd_empty = '',
p0_cmd_full = '',
p0_wr_clk = 0,
p0_wr_en = 0,
p0_wr_mask = 0,
p0_wr_data = 0,
p0_wr_full = '',
p0_wr_empty = '',
p0_wr_count = '',
p0_wr_underrun = '',
p0_wr_error = '',
p0_rd_clk = 0,
p0_rd_en = 0,
p0_rd_data = '',
p0_rd_full = '',
p0_rd_empty = '',
p0_rd_count = '',
p0_rd_overflow = '',
p0_rd_error = '',
p1_arb_en = 1,
p1_cmd_clk = 0,
p1_cmd_en = 0,
p1_cmd_instr = 0,
p1_cmd_bl = 0,
p1_cmd_byte_addr = 0,
p1_cmd_empty = '',
p1_cmd_full = '',
p1_wr_clk = 0,
p1_wr_en = 0,
p1_wr_mask = 0,
p1_wr_data = 0,
p1_wr_full = '',
p1_wr_empty = '',
p1_wr_count = '',
p1_wr_underrun = '',
p1_wr_error = '',
p1_rd_clk = 0,
p1_rd_en = 0,
p1_rd_data = '',
p1_rd_full = '',
p1_rd_empty = '',
p1_rd_count = '',
p1_rd_overflow = '',
p1_rd_error = '',
p2_arb_en = 1,
p2_cmd_clk = 0,
p2_cmd_en = 0,
p2_cmd_instr = 0,
p2_cmd_bl = 0,
p2_cmd_byte_addr = 0,
p2_cmd_empty = '',
p2_cmd_full = '',
p2_wr_clk = 0,
p2_wr_en = 0,
p2_wr_mask = 0,
p2_wr_data = 0,
p2_wr_full = '',
p2_wr_empty = '',
p2_wr_count = '',
p2_wr_underrun = '',
p2_wr_error = '',
p2_rd_clk = 0,
p2_rd_en = 0,
p2_rd_data = '',
p2_rd_full = '',
p2_rd_empty = '',
p2_rd_count = '',
p2_rd_overflow = '',
p2_rd_error = '',
p3_arb_en = 1,
p3_cmd_clk = 0,
p3_cmd_en = 0,
p3_cmd_instr = 0,
p3_cmd_bl = 0,
p3_cmd_byte_addr = 0,
p3_cmd_empty = '',
p3_cmd_full = '',
p3_wr_clk = 0,
p3_wr_en = 0,
p3_wr_mask = 0,
p3_wr_data = 0,
p3_wr_full = '',
p3_wr_empty = '',
p3_wr_count = '',
p3_wr_underrun = '',
p3_wr_error = '',
p3_rd_clk = 0,
p3_rd_en = 0,
p3_rd_data = '',
p3_rd_full = '',
p3_rd_empty = '',
p3_rd_count = '',
p3_rd_overflow = '',
p3_rd_error = '',
p4_arb_en = 1,
p4_cmd_clk = 0,
p4_cmd_en = 0,
p4_cmd_instr = 0,
p4_cmd_bl = 0,
p4_cmd_byte_addr = 0,
p4_cmd_empty = '',
p4_cmd_full = '',
p4_wr_clk = 0,
p4_wr_en = 0,
p4_wr_mask = 0,
p4_wr_data = 0,
p4_wr_full = '',
p4_wr_empty = '',
p4_wr_count = '',
p4_wr_underrun = '',
p4_wr_error = '',
p4_rd_clk = 0,
p4_rd_en = 0,
p4_rd_data = '',
p4_rd_full = '',
p4_rd_empty = '',
p4_rd_count = '',
p4_rd_overflow = '',
p4_rd_error = '',
p5_arb_en = 1,
p5_cmd_clk = 0,
p5_cmd_en = 0,
p5_cmd_instr = 0,
p5_cmd_bl = 0,
p5_cmd_byte_addr = 0,
p5_cmd_empty = '',
p5_cmd_full = '',
p5_wr_clk = 0,
p5_wr_en = 0,
p5_wr_mask = 0,
p5_wr_data = 0,
p5_wr_full = '',
p5_wr_empty = '',
p5_wr_count = '',
p5_wr_underrun = '',
p5_wr_error = '',
p5_rd_clk = 0,
p5_rd_en = 0,
p5_rd_data = '',
p5_rd_full = '',
p5_rd_empty = '',
p5_rd_count = '',
p5_rd_overflow = '',
p5_rd_error = '',
status = '',
selfrefresh_enter = 0,
selfrefresh_mode = '',
uo_done_cal = '',
C_MEMCLK_PERIOD = 2500,
C_P0_MASK_SIZE = 4,
C_P0_DATA_PORT_SIZE = 32,
C_P1_MASK_SIZE = 4,
C_P1_DATA_PORT_SIZE = 32,
C_PORT_ENABLE = 0x2f, # 6'b111111
C_PORT_CONFIG = "B128",
C_MEM_ADDR_ORDER = "BANK_ROW_COLUMN",
# The following parameter reflects the GUI selection of the
# Arbitration algorithm. Zero value corresponds to round robin
# algorithm and one to custom selection. The parameter is used to
# calculate the arbitration time slot parameters.
C_ARB_ALGORITHM = 0,
C_ARB_NUM_TIME_SLOTS = 12,
C_ARB_TIME_SLOT_0 = "18'o012345",
C_ARB_TIME_SLOT_1 = "18'o123450",
C_ARB_TIME_SLOT_2 = "18'o234501",
C_ARB_TIME_SLOT_3 = "18'o345012",
C_ARB_TIME_SLOT_4 = "18'o450123",
C_ARB_TIME_SLOT_5 = "18'o501234",
C_ARB_TIME_SLOT_6 = "18'o012345",
C_ARB_TIME_SLOT_7 = "18'o123450",
C_ARB_TIME_SLOT_8 = "18'o234501",
C_ARB_TIME_SLOT_9 = "18'o345012",
C_ARB_TIME_SLOT_10 = "18'o450123",
C_ARB_TIME_SLOT_11 = "18'o501234",
C_MEM_TRAS = 45000,
C_MEM_TRCD = 12500,
C_MEM_TREFI = 7800000,
C_MEM_TRFC = 105000, # 127500
C_MEM_TRP = 15000, # 12500
C_MEM_TWR = 15000,
C_MEM_TRTP = 7500,
C_MEM_TWTR = 7500,
C_NUM_DQ_PINS = 8,
C_MEM_TYPE = "DDR3",
C_MEM_DENSITY = "512M",
C_MEM_BURST_LEN = 8,
C_MEM_CAS_LATENCY = 4,
C_MEM_ADDR_WIDTH = 13,
C_MEM_BANKADDR_WIDTH = 3,
C_MEM_NUM_COL_BITS = 11,
C_MEM_DDR3_CAS_LATENCY = 7,
C_MEM_MOBILE_PA_SR = "FULL",
C_MEM_DDR1_2_ODS = "FULL",
C_MEM_DDR3_ODS = "DIV6",
C_MEM_DDR2_RTT = "50OHMS",
C_MEM_DDR3_RTT = "DIV2",
C_MEM_MDDR_ODS = "FULL",
C_MEM_DDR2_DIFF_DQS_EN = "YES",
C_MEM_DDR2_3_PA_SR = "OFF",
C_MEM_DDR3_CAS_WR_LATENCY = 5,
C_MEM_DDR3_AUTO_SR = "ENABLED",
C_MEM_DDR2_3_HIGH_TEMP_SR = "NORMAL",
C_MEM_DDR3_DYN_WRT_ODT = "OFF",
C_MC_CALIB_BYPASS = "NO",
C_MC_CALIBRATION_MODE = "CALIBRATION",
C_MC_CALIBRATION_DELAY = "HALF",
C_SKIP_IN_TERM_CAL = 0,
C_SKIP_DYNAMIC_CAL = 0,
LDQSP_TAP_DELAY_VAL = 0,
UDQSP_TAP_DELAY_VAL = 0,
LDQSN_TAP_DELAY_VAL = 0,
UDQSN_TAP_DELAY_VAL = 0,
DQ0_TAP_DELAY_VAL = 0,
DQ1_TAP_DELAY_VAL = 0,
DQ2_TAP_DELAY_VAL = 0,
DQ3_TAP_DELAY_VAL = 0,
DQ4_TAP_DELAY_VAL = 0,
DQ5_TAP_DELAY_VAL = 0,
DQ6_TAP_DELAY_VAL = 0,
DQ7_TAP_DELAY_VAL = 0,
DQ8_TAP_DELAY_VAL = 0,
DQ9_TAP_DELAY_VAL = 0,
DQ10_TAP_DELAY_VAL = 0,
DQ11_TAP_DELAY_VAL = 0,
DQ12_TAP_DELAY_VAL = 0,
DQ13_TAP_DELAY_VAL = 0,
DQ14_TAP_DELAY_VAL = 0,
DQ15_TAP_DELAY_VAL = 0,
C_CALIB_SOFT_IP = "TRUE",
C_SIMULATION = "FALSE",
):
mcbx_dram_clk.driven = 'wire'
mcbx_dram_clk_n.driven = 'wire'
if isinstance(mcbx_dram_cke, SignalType):
mcbx_dram_cke.driven = 'wire'
mcbx_dram_ras_n.driven = 'wire'
mcbx_dram_cas_n.driven = 'wire'
mcbx_dram_we_n.driven = 'wire'
mcbx_dram_ba.driven = 'wire'
mcbx_dram_addr.driven = 'wire'
mcbx_dram_dqs.read = True
mcbx_dram_dqs.driven = 'wire'
mcbx_dram_dqs_n.read = True
mcbx_dram_dqs_n.driven = 'wire'
mcbx_dram_udqs.read = True
mcbx_dram_udqs.driven = 'wire'
mcbx_dram_udqs_n.read = True
mcbx_dram_udqs_n.driven = 'wire'
mcbx_dram_ldm.driven = 'wire'
mcbx_dram_udm.driven = 'wire'
mcbx_dram_dq.read = True
mcbx_dram_dq.driven = 'wire'
for s in [ sys_rst, ui_clk, sysclk_2x, sysclk_2x_180,
pll_ce_0, pll_ce_90, pll_lock,
p0_cmd_clk, p0_cmd_en, p0_cmd_instr, p0_cmd_bl, p0_cmd_byte_addr,
p0_wr_en, p0_wr_mask, p0_wr_data, p0_rd_en,
p1_cmd_clk, p1_cmd_en, p1_cmd_instr, p1_cmd_bl, p1_cmd_byte_addr,
p1_wr_en, p1_wr_mask, p1_wr_data, p1_rd_en,
p2_cmd_clk, p2_cmd_en, p2_cmd_instr, p2_cmd_bl, p2_cmd_byte_addr,
p2_wr_en, p2_wr_mask, p2_wr_data, p2_rd_en,
p3_cmd_clk, p3_cmd_en, p3_cmd_instr, p3_cmd_bl, p3_cmd_byte_addr,
p3_wr_en, p3_wr_mask, p3_wr_data, p3_rd_en,
p4_cmd_clk, p4_cmd_en, p4_cmd_instr, p4_cmd_bl, p4_cmd_byte_addr,
p4_wr_en, p4_wr_mask, p4_wr_data, p4_rd_en,
p5_cmd_clk, p5_cmd_en, p5_cmd_instr, p5_cmd_bl, p5_cmd_byte_addr,
p5_wr_en, p5_wr_mask, p5_wr_data, p5_rd_en,
]:
if isinstance(s, SignalType):
s.read = True
for s in [ p0_cmd_empty, p0_cmd_full,
p0_wr_empty, p0_wr_full, p0_wr_error, p0_wr_underrun, p0_wr_count,
p0_rd_empty, p0_rd_full, p0_rd_error, p0_rd_overflow, p0_rd_count, p0_rd_data,
p1_cmd_empty, p1_cmd_full,
p1_wr_empty, p1_wr_full, p1_wr_error, p1_wr_underrun, p1_wr_count,
p1_rd_empty, p1_rd_full, p1_rd_error, p1_rd_overflow, p1_rd_count, p1_rd_data,
p2_cmd_empty, p2_cmd_full,
p2_wr_empty, p2_wr_full, p2_wr_error, p2_wr_underrun, p2_wr_count,
p2_rd_empty, p2_rd_full, p2_rd_error, p2_rd_overflow, p2_rd_count, p2_rd_data,
p3_cmd_empty, p3_cmd_full,
p3_wr_empty, p3_wr_full, p3_wr_error, p3_wr_underrun, p3_wr_count,
p3_rd_empty, p3_rd_full, p3_rd_error, p3_rd_overflow, p3_rd_count, p3_rd_data,
p4_cmd_empty, p4_cmd_full,
p4_wr_empty, p4_wr_full, p4_wr_error, p4_wr_underrun, p4_wr_count,
p4_rd_empty, p4_rd_full, p4_rd_error, p4_rd_overflow, p4_rd_count, p4_rd_data,
p5_cmd_empty, p5_cmd_full,
p5_wr_empty, p5_wr_full, p5_wr_error, p5_wr_underrun, p5_wr_count,
p5_rd_empty, p5_rd_full, p5_rd_error, p5_rd_overflow, p5_rd_count, p5_rd_data,
uo_done_cal
]:
if isinstance(s, SignalType):
s.driven = 'wire'
rst_inv = ''
if sys_rst is None:
sys_rst = 0
elif isinstance(sys_rst, ResetSignal):
if not sys_rst.active:
rst_inv = '!'
C_MC_CALIBRATION_CLK_DIV = 1
# 16 clock cycles are added to avoid trfc violations
C_MEM_TZQINIT_MAXCNT = 512 + 16
C_SKIP_DYN_IN_TERM = 1
C_MC_CALIBRATION_RA = intbv(0)[16:]
C_MC_CALIBRATION_BA = intbv(0)[3:]
C_MC_CALIBRATION_CA = intbv(0)[12:]
C_MCB_USE_EXTERNAL_BUFPLL = 1
insts = []
if isinstance(status, SignalType):
@always (sysclk_2x.posedge)
def inst():
status.next = not sys_rst
insts.append(inst)
if isinstance(uo_done_cal, SignalType):
@always (sysclk_2x_180.posedge)
def inst():
uo_done_cal.next = not sys_rst
insts.append(inst)
blah = Signal(False)
if p2_cmd_clk is not None:
@always_seq (p2_cmd_clk.posedge, None)
def blah_inst():
p2_cmd_full.next = p2_cmd_en ^ p2_cmd_instr ^ p2_cmd_bl ^ p2_cmd_byte_addr
p2_cmd_empty.next = p2_cmd_en
if p2_rd_clk is not None:
@always_seq (p2_rd_clk.posedge, None)
def blah_inst():
p2_rd_full.next = p2_rd_en
p2_rd_empty.next = p2_rd_en
p2_rd_count.next = p2_rd_en
p2_rd_overflow.next = p2_rd_en
p2_rd_error.next = p2_rd_en
p2_rd_data.next = p2_rd_en
if p2_wr_clk is not None:
@always_seq (p2_wr_clk.posedge, None)
def blah_inst():
p2_wr_full.next = p2_wr_en
p2_wr_empty.next = p2_wr_en
p2_wr_count.next = p2_wr_en
p2_wr_underrun.next = p2_wr_mask
p2_wr_error.next = p2_wr_data
insts.append(blah_inst)
return insts
mcb_ui_top.verilog_code = '''
mcb_ui_top #(
// Raw Wrapper Parameters
.C_MEMCLK_PERIOD ($C_MEMCLK_PERIOD),
.C_P0_MASK_SIZE ($C_P0_MASK_SIZE),
.C_P0_DATA_PORT_SIZE ($C_P0_DATA_PORT_SIZE),
.C_P1_MASK_SIZE ($C_P1_MASK_SIZE),
.C_P1_DATA_PORT_SIZE ($C_P1_DATA_PORT_SIZE),
.C_PORT_ENABLE ($C_PORT_ENABLE),
.C_PORT_CONFIG ("$C_PORT_CONFIG"),
.C_MEM_ADDR_ORDER ("$C_MEM_ADDR_ORDER"),
.C_ARB_ALGORITHM ($C_ARB_ALGORITHM),
.C_ARB_NUM_TIME_SLOTS ($C_ARB_NUM_TIME_SLOTS),
.C_ARB_TIME_SLOT_0 ($C_ARB_TIME_SLOT_0),
.C_ARB_TIME_SLOT_1 ($C_ARB_TIME_SLOT_1),
.C_ARB_TIME_SLOT_2 ($C_ARB_TIME_SLOT_2),
.C_ARB_TIME_SLOT_3 ($C_ARB_TIME_SLOT_3),
.C_ARB_TIME_SLOT_4 ($C_ARB_TIME_SLOT_4),
.C_ARB_TIME_SLOT_5 ($C_ARB_TIME_SLOT_5),
.C_ARB_TIME_SLOT_6 ($C_ARB_TIME_SLOT_6),
.C_ARB_TIME_SLOT_7 ($C_ARB_TIME_SLOT_7),
.C_ARB_TIME_SLOT_8 ($C_ARB_TIME_SLOT_8),
.C_ARB_TIME_SLOT_9 ($C_ARB_TIME_SLOT_9),
.C_ARB_TIME_SLOT_10 ($C_ARB_TIME_SLOT_10),
.C_ARB_TIME_SLOT_11 ($C_ARB_TIME_SLOT_11),
.C_MEM_TRAS ($C_MEM_TRAS),
.C_MEM_TRCD ($C_MEM_TRCD),
.C_MEM_TREFI ($C_MEM_TREFI),
.C_MEM_TRFC ($C_MEM_TRFC),
.C_MEM_TRP ($C_MEM_TRP),
.C_MEM_TWR ($C_MEM_TWR),
.C_MEM_TRTP ($C_MEM_TRTP),
.C_MEM_TWTR ($C_MEM_TWTR),
.C_NUM_DQ_PINS ($C_NUM_DQ_PINS),
.C_MEM_TYPE ("$C_MEM_TYPE"),
.C_MEM_DENSITY ("$C_MEM_DENSITY"),
.C_MEM_BURST_LEN ($C_MEM_BURST_LEN),
.C_MEM_CAS_LATENCY ($C_MEM_CAS_LATENCY),
.C_MEM_ADDR_WIDTH ($C_MEM_ADDR_WIDTH),
.C_MEM_BANKADDR_WIDTH ($C_MEM_BANKADDR_WIDTH),
.C_MEM_NUM_COL_BITS ($C_MEM_NUM_COL_BITS),
.C_MEM_DDR3_CAS_LATENCY ($C_MEM_DDR3_CAS_LATENCY),
.C_MEM_MOBILE_PA_SR ("$C_MEM_MOBILE_PA_SR"),
.C_MEM_DDR1_2_ODS ("$C_MEM_DDR1_2_ODS"),
.C_MEM_DDR3_ODS ("$C_MEM_DDR3_ODS"),
.C_MEM_DDR2_RTT ("$C_MEM_DDR2_RTT"),
.C_MEM_DDR3_RTT ("$C_MEM_DDR3_RTT"),
.C_MEM_MDDR_ODS ("$C_MEM_MDDR_ODS"),
.C_MEM_DDR2_DIFF_DQS_EN ("$C_MEM_DDR2_DIFF_DQS_EN"),
.C_MEM_DDR2_3_PA_SR ("$C_MEM_DDR2_3_PA_SR"),
.C_MEM_DDR3_CAS_WR_LATENCY ($C_MEM_DDR3_CAS_WR_LATENCY),
.C_MEM_DDR3_AUTO_SR ("$C_MEM_DDR3_AUTO_SR"),
.C_MEM_DDR2_3_HIGH_TEMP_SR ("$C_MEM_DDR2_3_HIGH_TEMP_SR"),
.C_MEM_DDR3_DYN_WRT_ODT ("$C_MEM_DDR3_DYN_WRT_ODT"),
.C_MEM_TZQINIT_MAXCNT ($C_MEM_TZQINIT_MAXCNT),
.C_MC_CALIB_BYPASS ("$C_MC_CALIB_BYPASS"),
.C_MC_CALIBRATION_RA ($C_MC_CALIBRATION_RA),
.C_MC_CALIBRATION_BA ($C_MC_CALIBRATION_BA),
.C_MC_CALIBRATION_CA ($C_MC_CALIBRATION_CA),
.C_CALIB_SOFT_IP ("$C_CALIB_SOFT_IP"),
.C_SKIP_IN_TERM_CAL ($C_SKIP_IN_TERM_CAL),
.C_SKIP_DYNAMIC_CAL ($C_SKIP_DYNAMIC_CAL),
.C_SKIP_DYN_IN_TERM ($C_SKIP_DYN_IN_TERM),
.LDQSP_TAP_DELAY_VAL ($LDQSP_TAP_DELAY_VAL),
.UDQSP_TAP_DELAY_VAL ($UDQSP_TAP_DELAY_VAL),
.LDQSN_TAP_DELAY_VAL ($LDQSN_TAP_DELAY_VAL),
.UDQSN_TAP_DELAY_VAL ($UDQSN_TAP_DELAY_VAL),
.DQ0_TAP_DELAY_VAL ($DQ0_TAP_DELAY_VAL),
.DQ1_TAP_DELAY_VAL ($DQ1_TAP_DELAY_VAL),
.DQ2_TAP_DELAY_VAL ($DQ2_TAP_DELAY_VAL),
.DQ3_TAP_DELAY_VAL ($DQ3_TAP_DELAY_VAL),
.DQ4_TAP_DELAY_VAL ($DQ4_TAP_DELAY_VAL),
.DQ5_TAP_DELAY_VAL ($DQ5_TAP_DELAY_VAL),
.DQ6_TAP_DELAY_VAL ($DQ6_TAP_DELAY_VAL),
.DQ7_TAP_DELAY_VAL ($DQ7_TAP_DELAY_VAL),
.DQ8_TAP_DELAY_VAL ($DQ8_TAP_DELAY_VAL),
.DQ9_TAP_DELAY_VAL ($DQ9_TAP_DELAY_VAL),
.DQ10_TAP_DELAY_VAL ($DQ10_TAP_DELAY_VAL),
.DQ11_TAP_DELAY_VAL ($DQ11_TAP_DELAY_VAL),
.DQ12_TAP_DELAY_VAL ($DQ12_TAP_DELAY_VAL),
.DQ13_TAP_DELAY_VAL ($DQ13_TAP_DELAY_VAL),
.DQ14_TAP_DELAY_VAL ($DQ14_TAP_DELAY_VAL),
.DQ15_TAP_DELAY_VAL ($DQ15_TAP_DELAY_VAL),
.C_MC_CALIBRATION_CLK_DIV ($C_MC_CALIBRATION_CLK_DIV),
.C_MC_CALIBRATION_MODE ("$C_MC_CALIBRATION_MODE"),
.C_MC_CALIBRATION_DELAY ("$C_MC_CALIBRATION_DELAY"),
.C_SIMULATION ("$C_SIMULATION"),
.C_MCB_USE_EXTERNAL_BUFPLL ($C_MCB_USE_EXTERNAL_BUFPLL)
)
$name
(
// Raw Wrapper Signals
.sysclk_2x ($sysclk_2x),
.sysclk_2x_180 ($sysclk_2x_180),
.pll_ce_0 ($pll_ce_0),
.pll_ce_90 ($pll_ce_90),
.pll_lock ($pll_lock),
.sysclk_2x_bufpll_o ($sysclk_2x_bufpll_o),
.sysclk_2x_180_bufpll_o ($sysclk_2x_180_bufpll_o),
.pll_ce_0_bufpll_o ($pll_ce_0_bufpll_o),
.pll_ce_90_bufpll_o ($pll_ce_90_bufpll_o),
.pll_lock_bufpll_o ($pll_lock_bufpll_o),
.sys_rst ($rst_inv$sys_rst),
.p0_arb_en ($p0_arb_en),
.p0_cmd_clk ($p0_cmd_clk),
.p0_cmd_en ($p0_cmd_en),
.p0_cmd_instr ($p0_cmd_instr),
.p0_cmd_bl ($p0_cmd_bl),
.p0_cmd_byte_addr ($p0_cmd_byte_addr),
.p0_cmd_empty ($p0_cmd_empty),
.p0_cmd_full ($p0_cmd_full),
.p0_wr_clk ($p0_wr_clk),
.p0_wr_en ($p0_wr_en),
.p0_wr_mask ($p0_wr_mask),
.p0_wr_data ($p0_wr_data),
.p0_wr_full ($p0_wr_full),
.p0_wr_empty ($p0_wr_empty),
.p0_wr_count ($p0_wr_count),
.p0_wr_underrun ($p0_wr_underrun),
.p0_wr_error ($p0_wr_error),
.p0_rd_clk ($p0_rd_clk),
.p0_rd_en ($p0_rd_en),
.p0_rd_data ($p0_rd_data),
.p0_rd_full ($p0_rd_full),
.p0_rd_empty ($p0_rd_empty),
.p0_rd_count ($p0_rd_count),
.p0_rd_overflow ($p0_rd_overflow),
.p0_rd_error ($p0_rd_error),
.p1_arb_en ($p1_arb_en),
.p1_cmd_clk ($p1_cmd_clk),
.p1_cmd_en ($p1_cmd_en),
.p1_cmd_instr ($p1_cmd_instr),
.p1_cmd_bl ($p1_cmd_bl),
.p1_cmd_byte_addr ($p1_cmd_byte_addr),
.p1_cmd_empty ($p1_cmd_empty),
.p1_cmd_full ($p1_cmd_full),
.p1_wr_clk ($p1_wr_clk),
.p1_wr_en ($p1_wr_en),
.p1_wr_mask ($p1_wr_mask),
.p1_wr_data ($p1_wr_data),
.p1_wr_full ($p1_wr_full),
.p1_wr_empty ($p1_wr_empty),
.p1_wr_count ($p1_wr_count),
.p1_wr_underrun ($p1_wr_underrun),
.p1_wr_error ($p1_wr_error),
.p1_rd_clk ($p1_rd_clk),
.p1_rd_en ($p1_rd_en),
.p1_rd_data ($p1_rd_data),
.p1_rd_full ($p1_rd_full),
.p1_rd_empty ($p1_rd_empty),
.p1_rd_count ($p1_rd_count),
.p1_rd_overflow ($p1_rd_overflow),
.p1_rd_error ($p1_rd_error),
.p2_arb_en ($p2_arb_en),
.p2_cmd_clk ($p2_cmd_clk),
.p2_cmd_en ($p2_cmd_en),
.p2_cmd_instr ($p2_cmd_instr),
.p2_cmd_bl ($p2_cmd_bl),
.p2_cmd_byte_addr ($p2_cmd_byte_addr),
.p2_cmd_empty ($p2_cmd_empty),
.p2_cmd_full ($p2_cmd_full),
.p2_wr_clk ($p2_wr_clk),
.p2_wr_en ($p2_wr_en),
.p2_wr_mask ($p2_wr_mask),
.p2_wr_data ($p2_wr_data),
.p2_wr_full ($p2_wr_full),
.p2_wr_empty ($p2_wr_empty),
.p2_wr_count ($p2_wr_count),
.p2_wr_underrun ($p2_wr_underrun),
.p2_wr_error ($p2_wr_error),
.p2_rd_clk ($p2_rd_clk),
.p2_rd_en ($p2_rd_en),
.p2_rd_data ($p2_rd_data),
.p2_rd_full ($p2_rd_full),
.p2_rd_empty ($p2_rd_empty),
.p2_rd_count ($p2_rd_count),
.p2_rd_overflow ($p2_rd_overflow),
.p2_rd_error ($p2_rd_error),
.p3_arb_en ($p3_arb_en),
.p3_cmd_clk ($p3_cmd_clk),
.p3_cmd_en ($p3_cmd_en),
.p3_cmd_instr ($p3_cmd_instr),
.p3_cmd_bl ($p3_cmd_bl),
.p3_cmd_byte_addr ($p3_cmd_byte_addr),
.p3_cmd_empty ($p3_cmd_empty),
.p3_cmd_full ($p3_cmd_full),
.p3_wr_clk ($p3_wr_clk),
.p3_wr_en ($p3_wr_en),
.p3_wr_mask ($p3_wr_mask),
.p3_wr_data ($p3_wr_data),
.p3_wr_full ($p3_wr_full),
.p3_wr_empty ($p3_wr_empty),
.p3_wr_count ($p3_wr_count),
.p3_wr_underrun ($p3_wr_underrun),
.p3_wr_error ($p3_wr_error),
.p3_rd_clk ($p3_rd_clk),
.p3_rd_en ($p3_rd_en),
.p3_rd_data ($p3_rd_data),
.p3_rd_full ($p3_rd_full),
.p3_rd_empty ($p3_rd_empty),
.p3_rd_count ($p3_rd_count),
.p3_rd_overflow ($p3_rd_overflow),
.p3_rd_error ($p3_rd_error),
.p4_arb_en ($p4_arb_en),
.p4_cmd_clk ($p4_cmd_clk),
.p4_cmd_en ($p4_cmd_en),
.p4_cmd_instr ($p4_cmd_instr),
.p4_cmd_bl ($p4_cmd_bl),
.p4_cmd_byte_addr ($p4_cmd_byte_addr),
.p4_cmd_empty ($p4_cmd_empty),
.p4_cmd_full ($p4_cmd_full),
.p4_wr_clk ($p4_wr_clk),
.p4_wr_en ($p4_wr_en),
.p4_wr_mask ($p4_wr_mask),
.p4_wr_data ($p4_wr_data),
.p4_wr_full ($p4_wr_full),
.p4_wr_empty ($p4_wr_empty),
.p4_wr_count ($p4_wr_count),
.p4_wr_underrun ($p4_wr_underrun),
.p4_wr_error ($p4_wr_error),
.p4_rd_clk ($p4_rd_clk),
.p4_rd_en ($p4_rd_en),
.p4_rd_data ($p4_rd_data),
.p4_rd_full ($p4_rd_full),
.p4_rd_empty ($p4_rd_empty),
.p4_rd_count ($p4_rd_count),
.p4_rd_overflow ($p4_rd_overflow),
.p4_rd_error ($p4_rd_error),
.p5_arb_en ($p5_arb_en),
.p5_cmd_clk ($p5_cmd_clk),
.p5_cmd_en ($p5_cmd_en),
.p5_cmd_instr ($p5_cmd_instr),
.p5_cmd_bl ($p5_cmd_bl),
.p5_cmd_byte_addr ($p5_cmd_byte_addr),
.p5_cmd_empty ($p5_cmd_empty),
.p5_cmd_full ($p5_cmd_full),
.p5_wr_clk ($p5_wr_clk),
.p5_wr_en ($p5_wr_en),
.p5_wr_mask ($p5_wr_mask),
.p5_wr_data ($p5_wr_data),
.p5_wr_full ($p5_wr_full),
.p5_wr_empty ($p5_wr_empty),
.p5_wr_count ($p5_wr_count),
.p5_wr_underrun ($p5_wr_underrun),
.p5_wr_error ($p5_wr_error),
.p5_rd_clk ($p5_rd_clk),
.p5_rd_en ($p5_rd_en),
.p5_rd_data ($p5_rd_data),
.p5_rd_full ($p5_rd_full),
.p5_rd_empty ($p5_rd_empty),
.p5_rd_count ($p5_rd_count),
.p5_rd_overflow ($p5_rd_overflow),
.p5_rd_error ($p5_rd_error),
.ui_read (1'b0),
.ui_add (1'b0),
.ui_cs (1'b0),
.ui_clk ($ui_clk),
.ui_sdi (1'b0),
.ui_addr (5'b0),
.ui_broadcast (1'b0),
.ui_drp_update (1'b0),
.ui_done_cal (1'b1),
.ui_cmd (1'b0),
.ui_cmd_in (1'b0),
.ui_cmd_en (1'b0),
.ui_dqcount (4'b0),
.ui_dq_lower_dec (1'b0),
.ui_dq_lower_inc (1'b0),
.ui_dq_upper_dec (1'b0),
.ui_dq_upper_inc (1'b0),
.ui_udqs_inc (1'b0),
.ui_udqs_dec (1'b0),
.ui_ldqs_inc (1'b0),
.ui_ldqs_dec (1'b0),
.uo_data (),
.uo_data_valid (),
.uo_done_cal ($uo_done_cal),
.uo_cmd_ready_in (),
.uo_refrsh_flag (),
.uo_cal_start (),
.uo_sdo (),
.mcbx_dram_addr ($mcbx_dram_addr),
.mcbx_dram_ba ($mcbx_dram_ba),
.mcbx_dram_ras_n ($mcbx_dram_ras_n),
.mcbx_dram_cas_n ($mcbx_dram_cas_n),
.mcbx_dram_we_n ($mcbx_dram_we_n),
.mcbx_dram_cke ($mcbx_dram_cke),
.mcbx_dram_clk ($mcbx_dram_clk),
.mcbx_dram_clk_n ($mcbx_dram_clk_n),
.mcbx_dram_dq ($mcbx_dram_dq),
.mcbx_dram_dqs ($mcbx_dram_dqs),
.mcbx_dram_dqs_n ($mcbx_dram_dqs_n),
.mcbx_dram_udqs ($mcbx_dram_udqs),
.mcbx_dram_udqs_n ($mcbx_dram_udqs_n),
.mcbx_dram_udm ($mcbx_dram_udm),
.mcbx_dram_ldm ($mcbx_dram_ldm),
.mcbx_dram_odt ($mcbx_dram_odt),
.mcbx_dram_ddr3_rst ($mcbx_dram_ddr3_rst),
.rzq ($mcbx_rzq),
.zio ($mcbx_zio),
.calib_recal (1'b0),
.status ($status),
.selfrefresh_enter ($selfrefresh_enter),
.selfrefresh_mode ($selfrefresh_mode)
);
'''
def main():
from myhdl import toVerilog
if 0:
clkout0 = Signal(False)
toVerilog(pll_adv, 'pll_adv_inst', clkout0 = clkout0)
print
print open('pll_adv.v', 'r').read()
if 1:
mcbx_dram_addr = Signal(intbv(0)[12:])
mcbx_dram_ba = Signal(intbv(0)[2:])
mcbx_dram_ras_n = Signal(False)
mcbx_dram_cas_n = Signal(False)
mcbx_dram_we_n = Signal(False)
mcbx_dram_cke = Signal(False)
mcbx_dram_clk = Signal(False)
mcbx_dram_clk_n = Signal(False)
mcbx_dram_dq = Signal(intbv(0)[16:])
mcbx_dram_dqs = Signal(False)
mcbx_dram_dqs_n = Signal(False)
mcbx_dram_udqs = Signal(False)
mcbx_dram_udqs_n = Signal(False)
mcbx_dram_udm = Signal(False)
mcbx_dram_ldm = Signal(False)
mcbx_dram_odt = Signal(False)
mcbx_dram_ddr3_rst = Signal(False)
mcbx_rzq = Signal(False)
mcbx_zio = Signal(False)
sys_rst = Signal(False)
sysclk_2x = Signal(False)
sysclk_2x_180 = Signal(False)
pll_ce_0 = Signal(False)
pll_ce_90 = Signal(False)
pll_lock = Signal(False)
ui_clk = Signal(False)
uo_done_cal = Signal(False)
toVerilog(mcb_ui_top,
'mcb_ui_inst',
mcbx_dram_clk,
mcbx_dram_clk_n,
mcbx_dram_cke,
mcbx_dram_ras_n,
mcbx_dram_cas_n,
mcbx_dram_we_n,
mcbx_dram_ba,
mcbx_dram_addr,
mcbx_dram_dqs,
mcbx_dram_dqs_n,
mcbx_dram_udqs,
mcbx_dram_udqs_n,
mcbx_dram_udm,
mcbx_dram_ldm,
mcbx_dram_dq,
mcbx_dram_odt,
mcbx_dram_ddr3_rst,
mcbx_rzq,
mcbx_zio,
sys_rst,
ui_clk,
sysclk_2x,
sysclk_2x_180,
pll_ce_0,
pll_ce_90,
pll_lock,
uo_done_cal = uo_done_cal
)
print
print open('mcb_ui_top.v', 'r').read()
if __name__ == '__main__':
main()
|
from docker import Client
cli = Client(base_url='unix://var/run/docker.sock')
print (cli.info())
|
# Generated by Django 3.1.7 on 2021-05-05 08:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app01', '0002_auto_20210426_1559'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=32, verbose_name='跟进人')),
],
),
migrations.AlterField(
model_name='customer',
name='class_list',
field=models.ManyToManyField(blank=True, to='app01.ClassList', verbose_name='已报班级'),
),
migrations.CreateModel(
name='ConsultRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note', models.TextField(verbose_name='跟进内容...')),
('status', models.CharField(choices=[('A', '近期无报名计划'), ('B', '1个月内报名'), ('C', '2周内报名'), ('D', '1周内报名'), ('E', '定金'), ('F', '到班'), ('G', '全款'), ('H', '无效')], help_text='选择客户此时的状态', max_length=8, verbose_name='跟进状态')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='跟进日期')),
('delete_status', models.BooleanField(default=False, verbose_name='删除状态')),
('consultant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='records', to='app01.userprofile', verbose_name='跟进人')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.customer', verbose_name='所咨询客户')),
],
),
]
|
"""
Public interface to OMeta, as well as the grammars used to compile grammar
definitions.
"""
from .builder import TreeBuilder, moduleFromGrammar
from .boot import BootOMetaGrammar
from .bootbase import BootBase
class OMeta(BootBase):
"""
Base class for grammar definitions.
"""
metagrammarClass = BootOMetaGrammar
@classmethod
def makeGrammar(cls, grammar, globals, name="Grammar"):
"""
Define a new subclass with the rules in the given grammar.
@param grammar: A string containing a PyMeta grammar.
@param globals: A dict of names that should be accessible by this
grammar.
@param name: The name of the class to be generated.
"""
g = cls.metagrammarClass(grammar)
tree = g.parseGrammar(name, TreeBuilder)
return moduleFromGrammar(tree, name, cls, globals)
|
from django.apps import AppConfig
class ArgumentConfig(AppConfig):
name = 'demoslogic.arguments'
|
import onnx
from onnx import helper
from onnx import TensorProto
graph = helper.make_graph(
[ # nodes
# fusable, const_min_negative should be replaced
helper.make_node("Conv", ["X", "W"], ["conv0_out"], "Conv0"),
helper.make_node("Clip", ["conv0_out", "const_min", "const_max"], ["clip0_out"], "Clip0"),
# mutable input. no fusion.
helper.make_node("Conv", ["X", "W"], ["conv1_out"], "Conv1"),
helper.make_node("Clip", ["conv1_out", "mutable_min", "const_max"], ["clip1_out"], "Clip1"),
# fusabled. default min/max.
helper.make_node("Conv", ["X", "W"], ["conv2_out"], "Conv2"),
helper.make_node("Clip", ["conv2_out"], ["clip2_out"], "Clip2"),
],
"ConvClipFusion", #name
[ # inputs
helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 7]),
helper.make_tensor_value_info('W', TensorProto.FLOAT, [1, 1, 1]),
helper.make_tensor_value_info('mutable_min', TensorProto.FLOAT, [1]),
],
[ # outputs
helper.make_tensor_value_info('clip0_out', TensorProto.FLOAT, None),
helper.make_tensor_value_info('clip1_out', TensorProto.FLOAT, None),
helper.make_tensor_value_info('clip2_out', TensorProto.FLOAT, None),
],
[ # initializers
helper.make_tensor('const_min', TensorProto.FLOAT, [1], [-1.0]),
helper.make_tensor('const_max', TensorProto.FLOAT, [1], [10.0])
])
model = helper.make_model(graph)
onnx.save(model, r'conv_clip11.onnx')
|
#!/usr/bin/env python
import argparse
import time
import subprocess
import json
import logging
import os
import sys
from uuid import uuid1
import imageio
from PIL import Image
from ..util import get_api
from .upload import upload_file
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def parse_args():
parser = argparse.ArgumentParser(description='Makes thumbnails for a video.')
parser.add_argument('--host', type=str, default='https://www.tatorapp.com', help='Host URL.')
parser.add_argument('--token', type=str, help='REST API token.')
parser.add_argument('--media', type=int, help='Unique integer identifying a media.')
parser.add_argument('input', type=str, help='Path to input file.')
parser.add_argument("-o", "--output", type=str, help='Path to output thumbnail.');
parser.add_argument("-g", "--gif", type=str, help='Path to output thumbnail gif.');
return parser.parse_args()
def get_metadata(path):
cmd = [
"ffprobe",
"-v","error",
"-show_entries", "stream",
"-print_format", "json",
"-select_streams", "v",
"{}".format(path)
]
output = subprocess.run(cmd, stdout=subprocess.PIPE, check=True).stdout
logger.info("Got info = {}".format(output))
video_info = json.loads(output)
stream = video_info["streams"][0]
seconds = float(stream["duration"]);
# Fill in object information based on probe
codec = stream["codec_name"]
fps_fractional = stream["avg_frame_rate"].split("/")
fps = float(fps_fractional[0]) / float(fps_fractional[1])
if "nb_frames" in stream:
num_frames = int(stream["nb_frames"])
else:
num_frames = round(fps * seconds)
width = stream["width"]
height = stream["height"]
return (codec, fps, num_frames, width, height)
def video_thumb(offset, name, new_path):
"""Creates a video thumbnail.
"""
cmd = [
"ffmpeg",
"-y",
"-ss",
time.strftime('%H:%M:%S', time.gmtime(offset)),
"-i",
"{}".format(new_path),
"-vframes",
"1",
name,
]
proc = subprocess.run(cmd, check=True)
elapsed = 0
while not os.path.exists(name):
time.sleep(0.2)
elapsed += 0.2
if elapsed > 5:
sys.exit(-1)
time.sleep(1.0)
image = Image.open(name)
image.thumbnail((256, 256), Image.ANTIALIAS)
image.save(name)
image.close()
def make_thumbnails(host, token, media_id, video_path, thumb_path, thumb_gif_path):
""" Makes thumbnails and gets metadata for original file.
"""
# Get the video information using ffprobe
cmd = [
"ffprobe",
"-v","error",
"-show_entries", "stream",
"-print_format", "json",
"-select_streams", "v",
"{}".format(video_path),
]
output = subprocess.run(cmd, stdout=subprocess.PIPE, check=True).stdout
logger.info("Got info = {}".format(output))
video_info = json.loads(output)
stream_idx=0
for idx, stream in enumerate(video_info["streams"]):
if stream["codec_type"] == "video":
stream_idx=idx
break
stream=video_info["streams"][stream_idx]
seconds = float(stream["duration"]);
# Compute evenly spaced intervals and filenames.
interval = float(seconds) / 12.0
offsets = [interval * k for k in range(1, 11)]
names = [os.path.join("/tmp", str(uuid1()) + '.jpg') for _ in range(9)]
names = [thumb_path,] + names
# Create thumbnail images for each offset.
for offset, name in zip(offsets, names):
video_thumb(offset, name, video_path)
images = [imageio.imread(name) for name in names]
imageio.mimsave(thumb_gif_path, images, duration=0.5)
# Get metadata for original file.
codec, fps, num_frames, width, height = get_metadata(video_path)
# Upload thumbnail and thumbnail gif.
thumbnail_url = upload_file(thumb_path, host)
thumbnail_gif_url = upload_file(thumb_gif_path, host)
# Update the media object.
api = get_api(host, token)
response = api.update_media(media_id, media_update={
'thumbnail_url': thumbnail_url,
'thumbnail_gif_url': thumbnail_gif_url,
'num_frames': num_frames,
'fps': fps,
'codec': codec,
'width': width,
'height': height,
})
logger.info(f'Thumbnail upload done! {response.message}')
if __name__ == '__main__':
args = parse_args()
make_thumbnails(args.host, args.token, args.media, args.input, args.output, args.gif)
|
SEARCH = "http://api.elsevier.com/content/search/scopus"
SEARCH_AUTHOR = "http://api.elsevier.com/content/search/author"
AUTHOR = "http://api.elsevier.com/content/author/author_id"
ABSTRACT = "http://api.elsevier.com/content/abstract/scopus_id"
CITATION = "http://api.elsevier.com/content/abstract/citations"
SERIAL_SEARCH = "https://api.elsevier.com/content/serial/title"
SERIAL_RETRIEVAL = "https://api.elsevier.com/content/serial/title/issn/"
AFFL_RETRIEVAL = "https://api.elsevier.com/content/affiliation/affiliation_id/"
|
import os
import sys
this_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.realpath(os.path.join(this_path, '..', '..'))
if root_path not in sys.path:
sys.path.insert(0, root_path)
|
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
import os
from django.utils import timezone
from django.contrib.auth import get_user_model
User = get_user_model()
# Create your models here.
class SoftDeletableQuerySet(models.query.QuerySet):
def delete(self):
self.update(deletedTime=timezone.now())
class SoftDeletableManager(models.Manager):
"""
仅返回未被删除的实例
"""
_queryset_class = SoftDeletableQuerySet
def get_queryset(self):
"""
在这里处理一下QuerySet, 然后返回没被标记位is_deleted的QuerySet
"""
kwargs = {'model': self.model, 'using': self._db}
if hasattr(self, '_hints'):
kwargs['hints'] = self._hints
return self._queryset_class(**kwargs).filter(deletedTime=None)
class BaseModel(models.Model):
"""
基础模型
"""
createTime = models.DateTimeField(verbose_name="创建时间",default=timezone.now,null=True)
updateTime = models.DateTimeField(verbose_name="修改时间",default=timezone.now,null=True)
deletedTime = models.DateTimeField("删除时间",null=True, blank=True,default=None)
createdBy = models.ForeignKey(User, null=True, blank=True, default=1, on_delete=models.SET_NULL, verbose_name='创建人ID', related_name="%(class)s_created_by")
updatedBy = models.ForeignKey(User, null=True, blank=True, default=1, on_delete=models.SET_NULL, verbose_name='修改人ID', related_name="%(class)s_updated_by")
isActive = models.BooleanField(default=True, verbose_name='是否正常')
objects = SoftDeletableManager()
def delete(self, using=None, soft=True, *args, **kwargs):
if soft:
self.deletedTime = timezone.now()
self.save()
else:
return super(SoftDeletableModel, self).delete(using=using, *args, **kwargs)
class Meta:
abstract = True
class CategoryInfo(MPTTModel,BaseModel):
'''
栏目信息表
'''
category_name = models.CharField(max_length=50, verbose_name='栏目名称',null=True, blank=True,)
parent = TreeForeignKey("self", on_delete=models.CASCADE, verbose_name='父栏目',blank=True, null=True, related_name="children")
def __unicode__(self):
return self.category_name
class MPTTMeta:
order_insertion_by = ['id']
class Meta:
verbose_name = '栏目管理'
verbose_name_plural = '栏目管理'
class Content(BaseModel):
'''
内容信息表
'''
content_title = models.CharField(max_length=255, verbose_name='文章标题',null=True, blank=True,)
content_category = models.ForeignKey(CategoryInfo, on_delete=models.SET_NULL, verbose_name='所属栏目',null=True, blank=True,)
content_url = models.CharField(max_length=255, verbose_name='文章跳转链接地址',null=True, blank=True,)
content_details = models.TextField(verbose_name='文章内容',null=True, blank=True,)
content_keyword = models.CharField(max_length=255, verbose_name='关键字',null=True, blank=True,)
content_description = models.CharField(max_length=400, verbose_name='文章描述',null=True, blank=True,)
content_img = models.CharField(max_length=255, verbose_name='文章缩略图',null=True, blank=True,)
content_sort = models.IntegerField(verbose_name='自定义顺序',null=True, blank=True,)
content_display = models.BooleanField(default=True,verbose_name='是否显示')
content_type = models.CharField(max_length=255, verbose_name='文章类型',null=True, blank=True,)
content_pub_datetime = models.DateTimeField(verbose_name="发布时间",default=timezone.now,null=True)
content_hit = models.IntegerField(verbose_name='点击次数',null=True, blank=True,)
def __unicode__(self):
return self.category_name
class Meta:
verbose_name = '内容管理'
verbose_name_plural = '内容管理'
|
import json
from . import models
async def post_infraction(
client,
*,
guild_id,
target_id,
moderator_id,
reason,
case_type,
extras=None,
cross_guild=False,
pardoned=False
):
if extras is None:
extras = {}
extras = json.dumps(extras)
conn = client.database.conn
lock = client.database.lock
execute = """
INSERT INTO moderation(
guild_id, target_id,
moderator_id, reason,
case_type, extras,
cross_guild, pardoned
) VALUES (
$1, $2, $3, $4, $5,
$6, $7, $8
) RETURNING *
"""
async with lock:
data = await conn.fetch(
execute, str(guild_id),
str(target_id), str(moderator_id),
reason, case_type, extras,
cross_guild, pardoned
)
return models.Infraction(client, data[0])
async def pardon_infraction(client, *, guild_id, target_id, moderator_id, reason, case_number):
conn = client.database.conn
lock = client.database.lock
execute = """
UPDATE moderation SET
pardoned = $1
WHERE
case_number = $2
"""
extras = json.dumps({'pardoned_case': case_number})
async with lock:
await conn.execute(execute, True, case_number)
infraction = await post_infraction(
client,
guild_id=guild_id,
target_id=target_id,
moderator_id=moderator_id,
reason=reason,
case_type='pardon',
extras=extras,
cross_guild=None,
pardoned=None
)
return infraction
async def get_infraction(client, *, case_number):
execute = """
SELECT * FROM moderation WHERE
case_number = $1
"""
conn = client.database.conn
lock = client.database.lock
async with lock:
data = await conn.fetch(execute, case_number)
if data:
return models.Infraction(client, data[0])
async def get_infractions_for_target(client, *, target_id):
execute = """
SELECT * FROM moderation WHERE
target_id = $1
"""
conn = client.database.conn
lock = client.database.lock
async with lock:
data = await conn.fetch(execute, str(target_id))
return [models.Infraction(client, d) for d in data]
async def get_infractions_for_target_in_guild(client, *, guild_id, target_id):
execute = """
SELECT * FROM moderation WHERE
target_id = $1
AND
guild_id = $2
"""
conn = client.database.conn
lock = client.database.lock
async with lock:
data = await conn.fetch(execute, str(target_id), str(guild_id))
return [models.Infraction(client, d) for d in data]
async def get_infractions_by_moderator(client, *, moderator_id):
execute = """
SELECT * FROM moderation WHERE
moderator_id = $1
"""
conn = client.database.conn
lock = client.database.lock
async with lock:
data = await conn.fetch(execute, str(moderator_id))
return [models.Infraction(client, d) for d in data]
async def get_infractions_by_moderator_in_guild(client, *, guild_id, moderator_id):
execute = """
SELECT * FROM moderation WHERE
moderator_id = $1
AND
guild_id = $2
"""
conn = client.database.conn
lock = client.database.lock
async with lock:
data = await conn.fetch(execute, str(moderator_id), str(guild_id))
return [models.Infraction(client, d) for d in data]
async def get_infractions_in_guild(client, *, guild_id):
execute = """
SELECT * FROM moderation WHERE
guild_id = $2
"""
conn = client.database.conn
lock = client.database.lock
async with lock:
data = await conn.fetch(execute, str(guild_id))
return [models.Infraction(client, d) for d in data]
async def delete_infraction(client, *, case_number):
execute = """
DELETE FROM moderation WHERE
case_number = $1
"""
conn = client.database.conn
lock = client.database.lock
async with lock:
await conn.execute(execute, case_number)
async def post_guild_setup(
client,
*,
guild_id,
log_channel_id=None,
muted_role_id=None,
disabled_commands=None,
prefixes=None,
level_roles=None,
join_roles=None
):
if log_channel_id is not None:
log_channel_id = str(log_channel_id)
if muted_role_id is not None:
muted_role_id = str(muted_role_id)
if disabled_commands is None:
disabled_commands = []
if prefixes is None:
prefixes = ['!']
if level_roles is None:
level_roles = {}
if join_roles is None:
join_roles = []
conn = client.database.conn
lock = client.database.lock
execute = """
INSERT INTO guilds (
guild_id, log_channel_id,
muted_role_id, disabled_commands,
prefixes, level_roles,
join_roles
) VALUES (
$1, $2, $3, $4, $5, $6, $7
)
"""
async with lock:
await conn.execute(
execute,
str(guild_id),
log_channel_id,
muted_role_id,
json.dumps(disabled_commands),
json.dumps(prefixes),
json.dumps(level_roles),
json.dumps(join_roles)
)
async def update_guild_setup(
client,
*,
guild_id,
log_channel_id=None,
muted_role_id=None,
disabled_commands=None,
prefixes=None,
level_roles=None,
join_roles=None
):
params = {}
if log_channel_id is not None:
params['log_channel_id'] = str(log_channel_id)
if muted_role_id is not None:
params['muted_role_id'] = str(muted_role_id)
if disabled_commands is not None:
params['disabled_commands'] = json.dumps(disabled_commands)
if prefixes is not None:
params['prefixes'] = json.dumps(prefixes)
if level_roles is not None:
params['level_roles'] = json.dumps(level_roles)
if join_roles is not None:
params['join_roles'] = json.dumps(join_roles)
execute = 'UPDATE guilds SET ' + ''.join((k + ' = $%s' % i) + (', ' if i < len(params) else ' ') for i, k in enumerate(params, start=1)) + ('WHERE guild_id = $%s' % (len(params) + 1))
conn = client.database.conn
lock = client.database.lock
async with lock:
await conn.execute(execute, *params.values(), str(guild_id))
async def delete_guild_setup(client, *, guild_id):
execute = """
DELETE FROM guilds WHERE
guild_id = $1
"""
conn = client.database.conn
lock = client.database.lock
async with lock:
await conn.execute(execute, str(guild_id))
async def get_guild_setup(client, *, guild_id):
execute = """
SELECT * FROM guilds WHERE
guild_id = $1
"""
conn = client.database.conn
lock = client.database.lock
async with lock:
data = await conn.fetch(execute, str(guild_id))
if data:
return dict(data[0])
|
from pypureclient.flashblade import SyslogServerPostOrPatch
# Post a syslog server using a TCP connection
attr = SyslogServerPostOrPatch(uri='tcp://my_syslog_host.domain.com:541')
res = client.post_syslog_servers(syslog_server=attr, names=["main_syslog"])
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# Post a syslog server using a UDP connection
udp_attr = SyslogServerPostOrPatch(uri='udp://my_syslog_host.domain.com:540')
res = client.post_syslog_servers(syslog_server=udp_attr, names=["my_udp_connection"])
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
|
#!/usr/bin/python
#
## @file
#
# Joystick monitoring class specialized for a Gamepad 310 controller.
#
# Hazen 01/14
#
import storm_control.hal4000.joystick
import storm_control.sc_hardware.logitech.gamepad310 as gamepad310
# Debugging
import storm_control.sc_library.hdebug as hdebug
class AJoystick(joystick.JoystickObject):
@hdebug.debug
def __init__(self, hardware, parameters, parent = None):
jstick = gamepad310.Gamepad310()
joystick.JoystickObject.__init__(self, parameters, jstick, parent)
#
# The MIT License
#
# Copyright (c) 2014 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
|
C = int(input())
while(C > 0):
N = int(input())
vector = []
for i in range(N):
if i%2 == 0:
vector.append(1)
else:
vector.append(-1)
print(sum(vector))
C -= 1
|
from typing import List, Dict
from abc import ABC, abstractmethod
import numpy as np
from tdw.add_ons.model_verifier.model_tests.model_test import ModelTest
from tdw.librarian import ModelRecord
from tdw.tdw_utils import TDWUtils
from tdw.output_data import OutputData, Images
class RotateObjectTest(ModelTest, ABC):
"""
These tests add an object and then rotate it.
"""
""":class_var
The ID of the object.
"""
OBJECT_ID: int = 0
""":class_var
Rotate by this many degrees per frame.
"""
DELTA_THETA: int = 15
""":class_var
The Unity pink color.
"""
PINK: tuple = (255, 0, 255)
""":class_var
Look at this position.
"""
LOOK_AT: Dict[str, float] = {"x": 0, "y": 0.5, "z": 0}
""":class_var
The position of the avatar.
"""
AVATAR_POSITION: Dict[str, float] = {"x": 1.75, "y": 0.5, "z": 0}
def __init__(self, record: ModelRecord):
"""
:param record: The model record.
"""
super().__init__(record=record)
self._axis: str = "yaw"
self._angle: int = 0
def start(self) -> List[dict]:
"""
:return: A list of commands to start the test.
"""
scale = TDWUtils.get_unit_scale(self._record)
# Create the scene. Add the avatar. Add the object.
return [{"$type": "send_images",
"frequency": "always"},
{"$type": "add_object",
"name": self._record.name,
"url": self._record.get_url(),
"position": {"x": 0, "y": 0, "z": 0},
"scale_factor": self._record.scale_factor,
"id": RotateObjectTest.OBJECT_ID},
{"$type": "scale_object",
"id": RotateObjectTest.OBJECT_ID,
"scale_factor": {"x": scale, "y": scale, "z": scale}}]
def on_send(self, resp: List[bytes]) -> List[dict]:
"""
:param resp: The response from the build.
:return: A list of commands to continue or end the test.
"""
for i in range(len(resp) - 1):
r_id = OutputData.get_data_type_id(resp[i])
if r_id == "imag":
self._read_images(Images(resp[i]))
break
# Reading the images can cause the test to finish early.
if self.done:
return []
# Either end the test or reset the angle and start rotating around a new axis.
elif self._angle >= 360:
if self._axis == "yaw":
self._axis = "roll"
self._angle = 0
return [{"$type": "teleport_avatar_to",
"position": RotateObjectTest.AVATAR_POSITION},
{"$type": "look_at_position",
"position": RotateObjectTest.LOOK_AT},]
else:
self.done = True
return []
# Continue to rotate.
else:
self._angle += RotateObjectTest.DELTA_THETA
rad = np.radians(self._angle)
if self._axis == "yaw":
x = np.cos(rad) * RotateObjectTest.AVATAR_POSITION["x"] - np.sin(rad) * RotateObjectTest.AVATAR_POSITION["z"]
y = RotateObjectTest.AVATAR_POSITION["y"]
z = np.sin(rad) * RotateObjectTest.AVATAR_POSITION["x"] + np.cos(rad) * RotateObjectTest.AVATAR_POSITION["z"]
else:
x = np.cos(rad) * RotateObjectTest.AVATAR_POSITION["x"] - np.sin(rad) * RotateObjectTest.AVATAR_POSITION["z"]
y = (np.sin(rad) * RotateObjectTest.AVATAR_POSITION["x"] + np.cos(rad) * RotateObjectTest.AVATAR_POSITION["z"]) + RotateObjectTest.AVATAR_POSITION["y"]
z = RotateObjectTest.AVATAR_POSITION["z"]
return [{"$type": "teleport_avatar_to",
"position": {"x": x, "y": y, "z": z}},
{"$type": "look_at_position",
"position": RotateObjectTest.LOOK_AT}]
@abstractmethod
def _read_images(self, images: Images) -> None:
"""
Read image data.
:param images: The image data.
"""
raise Exception()
@staticmethod
def _get_end_commands() -> List[dict]:
"""
:return: A list of commands to end to test.
"""
return [{"$type": "destroy_object",
"id": RotateObjectTest.OBJECT_ID},
{"$type": "unload_asset_bundles"}]
|
from typing import Dict
import pytest
from genshin import MultiCookieClient
@pytest.mark.asyncio
async def test_multicookie(cookies: Dict[str, str], uid: int):
client = MultiCookieClient()
client.set_cookies([cookies])
assert len(client.cookies) == 1
assert client.cookies[0] == {m.key: m.value for m in client.session.cookie_jar}
await client.get_user(uid)
await client.close()
|
# auth things
import sys
import getopt
# import custom functions
from scraper import load_page, minervascrape, minervaupdate, send_email
arguments = sys.argv[1:]
short_options = "u"
long_options = ["update"]
# validating command-line flags and arguments
try:
args, values = getopt.getopt(arguments, short_options, long_options) # args = flags, values = arguments
except getopt.error as e:
print(str(e))
sys.exit(2)
# variable declaration and prep
# CLI things
terms = {
'F' : 'Fall',
'W' : 'Winter',
'S' : 'Summer'
}
term = []
year = []
if len(values) != 0:
# sort by date W < S < F for a given year
values = sorted(sorted(values, key=lambda x : x[0], reverse=True), key=lambda x : int(x[1:]))
for arg in values:
term.append(terms[arg[0].upper()])
year.append(arg[1:])
if len(args) == 0: # no flags, proceed as usual
driver, transcript_table = load_page()
if len(values) != 0: # terms specified
filename = "Scraped_Transcript_{}".format("_".join([term[i] + " " + year[i] for i in range(len(term))]))
print("Beginning scraping for {}...\n".format(", ".join([term[i] + " " + year[i] for i in range(len(term))])))
with open(filename + ".json", "w") as file:
minervascrape(values, term, year, transcript_table, terms, file)
print("Scraping complete! Please navigate to " + filename + ".json to see results.\n")
else: # no terms, scrape for all terms
print("Beginning scraping for all terms...\n")
with open("Scraped_Transcript_All_Terms.json", "w") as file:
minervascrape(values, term, year, transcript_table, terms, file)
print("Scraping complete! Please navigate to Scraped_Transcript_All_Terms.json to see results.\n")
driver.close()
else:
for a, v in args:
if a in ("-u", "--update"):
print("Starting update...\n")
driver, transcript_table = load_page()
change, changes = minervaupdate(values, term, year, transcript_table, terms)
if change:
print("Transcript updated!\n")
send_email(changes)
else:
print("No change...\n")
driver.close()
|
# Generated by Django 3.0 on 2020-01-16 11:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0007_auto_20200116_1648'),
]
operations = [
migrations.AlterField(
model_name='dailyweight',
name='date_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='Date Time'),
),
]
|
##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test tcpdoc
"""
from doctest import DocTestSuite
import os
import re
import unittest
from zope.testing.renormalizing import RENormalizing
from ZODB.interfaces import IDatabase
import zope.app.testing
from zope.app.publication.requestpublicationregistry import factoryRegistry
from zope.app.publication.requestpublicationfactories import BrowserFactory
from zope.app.testing import functional
from zope.app.testing.dochttp import dochttp
import transaction
from zope.app.testing.functional import SampleFunctionalTest
from zope.app.testing.functional import BrowserTestCase, HTTPTestCase
from zope.app.testing.functional import FunctionalDocFileSuite
from zope.app.testing.functional import FunctionalTestCase
from zope.app.testing.testing import AppTestingLayer
from zope.app.testing.testing import FailingKlass
from zope.app.testing._compat import NativeStringIO
HEADERS = """\
HTTP/1.1 200 OK
Content-Type: text/plain
"""
BODY = """\
This is the response body.
"""
here = os.path.dirname(zope.app.testing.__file__)
directory = os.path.join(here, 'recorded')
expected = r'''
>>> print(http(r"""
... GET /@@contents.html HTTP/1.1
... """))
HTTP/1.1 401 Unauthorized
Content-Length: 89
Content-Type: text/html;charset=utf-8
Www-Authenticate: basic realm="Zope"
<BLANKLINE>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"
lang="en">
<BLANKLINE>
...
<BLANKLINE>
</html>
<BLANKLINE>
<BLANKLINE>
>>> print(http(r"""
... GET /@@contents.html HTTP/1.1
... Authorization: Basic bWdyOm1ncnB3
... """))
HTTP/1.1 200 OK
Content-Length: 89
Content-Type: text/html;charset=utf-8
<BLANKLINE>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"
lang="en">
<BLANKLINE>
...
<BLANKLINE>
</html>
<BLANKLINE>
<BLANKLINE>
>>> print(http(r"""
... GET /++etc++site/@@manage HTTP/1.1
... Authorization: Basic bWdyOm1ncnB3
... Referer: http://localhost:8081/
... """))
HTTP/1.1 303 See Other
Content-Length: 0
Content-Type: text/plain;charset=utf-8
Location: @@tasks.html
<BLANKLINE>
>>> print(http(r"""
... GET / HTTP/1.1
... Authorization: Basic bWdyOm1ncnB3
... """))
HTTP/1.1 200 OK
Content-Length: 89
Content-Type: text/html;charset=utf-8
<BLANKLINE>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"
lang="en">
<BLANKLINE>
...
<BLANKLINE>
</html>
<BLANKLINE>
<BLANKLINE>
>>> print(http(r"""
... GET /++etc++site/@@tasks.html HTTP/1.1
... Authorization: Basic bWdyOm1ncnB3
... Referer: http://localhost:8081/
... """))
HTTP/1.1 200 OK
Content-Length: 89
Content-Type: text/html;charset=utf-8
<BLANKLINE>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"
lang="en">
<BLANKLINE>
...
<BLANKLINE>
</html>
<BLANKLINE>
<BLANKLINE>
'''
class FunctionalHTTPDocTest(unittest.TestCase):
maxDiff = None
assertRaisesRegex = getattr(unittest.TestCase, 'assertRaisesRegex',
unittest.TestCase.assertRaisesRegexp)
def test_dochttp(self):
capture = NativeStringIO()
dochttp(['-p', 'test', directory], output_fp=capture)
got = capture.getvalue()
self.assertEqual(expected, got)
def test_no_argument(self):
import sys
old_stderr = sys.stderr
sys.stderr = NativeStringIO()
try:
with self.assertRaises(SystemExit) as exc:
dochttp(["-p", 'test'])
finally:
sys.stderr = old_stderr
e = exc.exception
self.assertEqual(e.args, (2,))
def test_bad_directory_argument(self):
import tempfile
import shutil
d = tempfile.mkdtemp('.zope.app.testing')
self.addCleanup(shutil.rmtree, d)
with open(os.path.join(d, 'test1.request'), 'wt') as f:
f.write("Fake request file")
with open(os.path.join(d, 'test1.response'), 'wt') as f:
f.write("")
with self.assertRaisesRegex(
SystemExit,
"Expected equal numbers of requests and responses in '" + d):
dochttp(["-p", 'test', d])
class AuthHeaderTestCase(unittest.TestCase):
def test_auth_encoded(self):
auth_header = functional.auth_header
header = 'Basic Z2xvYmFsbWdyOmdsb2JhbG1ncnB3'
self.assertEqual(auth_header(header), header)
def test_auth_non_encoded(self):
auth_header = functional.auth_header
header = 'Basic globalmgr:globalmgrpw'
expected = 'Basic Z2xvYmFsbWdyOmdsb2JhbG1ncnB3'
self.assertEqual(auth_header(header), expected)
def test_auth_non_encoded_empty(self):
auth_header = functional.auth_header
header = 'Basic globalmgr:'
expected = 'Basic Z2xvYmFsbWdyOg=='
self.assertEqual(auth_header(header), expected)
header = 'Basic :pass'
expected = 'Basic OnBhc3M='
self.assertEqual(auth_header(header), expected)
def test_auth_non_encoded_colon(self):
auth_header = zope.app.testing.functional.auth_header
header = 'Basic globalmgr:pass:pass'
expected = 'Basic Z2xvYmFsbWdyOnBhc3M6cGFzcw=='
self.assertEqual(auth_header(header), expected)
class HTTPCallerTestCase(unittest.TestCase):
def test_chooseRequestClass(self):
from zope.publisher.interfaces import IRequest, IPublication
factoryRegistry.register('GET', '*', 'browser', 0, BrowserFactory())
caller = functional.HTTPCaller()
request_class, publication_class = caller.chooseRequestClass(
method='GET', path='/', environment={})
self.assertTrue(IRequest.implementedBy(request_class))
self.assertTrue(IPublication.implementedBy(publication_class))
class DummyCookiesResponse(object):
# Ugh, this simulates the *internals* of a HTTPResponse object
# TODO: expand the IHTTPResponse interface to give access to all cookies
_cookies = None
def __init__(self, cookies=None):
self._cookies = cookies or {}
class CookieHandlerTestCase(unittest.TestCase):
def setUp(self):
self.handler = functional.CookieHandler()
def test_saveCookies(self):
response = DummyCookiesResponse(dict(
spam=dict(value='eggs', path='/foo', comment='rest is ignored'),
monty=dict(value='python')))
self.handler.saveCookies(response)
self.assertEqual(len(self.handler.cookies), 2)
self.assertIn(self.handler.cookies['spam'].OutputString(),
('spam=eggs; Path=/foo;', 'spam=eggs; Path=/foo'))
self.assertIn(self.handler.cookies['monty'].OutputString(),
('monty=python;', 'monty=python'))
def test_httpCookie(self):
cookies = self.handler.cookies
cookies['spam'] = 'eggs'
cookies['spam']['path'] = '/foo'
cookies['bar'] = 'baz'
cookies['bar']['path'] = '/foo/baz'
cookies['monty'] = 'python'
cookieHeader = self.handler.httpCookie('/foo/bar')
parts = sorted(cookieHeader.split('; '))
self.assertEqual(parts, ['monty=python', 'spam=eggs'])
cookieHeader = self.handler.httpCookie('/foo/baz')
parts = cookieHeader.split('; ')
parts.sort()
self.assertEqual(parts, ['bar=baz', 'monty=python', 'spam=eggs'])
# There is no test for CookieHandler.loadCookies because it that method
# only passes the arguments on to Cookie.BaseCookie.load, which the
# standard library has tests for (we hope).
class HTTPFunctionalTest(HTTPTestCase):
def testNoDefaultReferer(self):
# There should be no referer set in the request by default.
r = self.makeRequest()
self.assertRaises(KeyError, r.environment.__getitem__, 'HTTP_REFERER')
class BrowserFunctionalTest(BrowserTestCase):
def testNoDefaultReferer(self):
# There should be no referer set in the request by default.
r = self.makeRequest()
self.assertRaises(KeyError, r.environment.__getitem__, 'HTTP_REFERER')
class HTTPCallerFunctionalTest(FunctionalTestCase):
def testNoDefaultReferer(self):
# There should be no referer set in the request by default.
from zope.app.testing.functional import HTTPCaller
http = HTTPCaller()
response = http("GET /++skin++Basic HTTP/1.1\n\n")
self.assertRaises(KeyError, response._request.environment.__getitem__,
'HTTP_REFERER')
def testRemoteAddr(self):
# There should be a REMOTE_ADDR in the request by default.
from zope.app.testing.functional import HTTPCaller
http = HTTPCaller()
response = http("GET / HTTP/1.1\n\n")
self.assertEqual(response._request.environment['REMOTE_ADDR'],
'127.0.0.1')
class GetCookies(object):
"""Get all cookies set."""
def __call__(self):
cookies = sorted(['%s=%s' % (k, v)
for k, v in self.request.getCookies().items()])
return ';'.join(cookies)
class SetCookies(object):
"""Set a specific cookie."""
def __call__(self):
self.request.response.setCookie('bid', 'bval')
class CookieFunctionalTest(BrowserTestCase):
"""Functional tests should handle cookies like a web browser
Multiple requests in the same test should acumulate cookies.
We also ensure that cookies with path values are only sent for
the correct URL's so we can test cookies don't 'leak'. Expiry,
secure and other cookie attributes are not being worried about
at the moment
"""
def setUp(self):
import zope.configuration.xmlconfig
super(CookieFunctionalTest, self).setUp()
self.assertEqual(
len(self.cookies.keys()), 0,
'cookies store should be empty')
zope.configuration.xmlconfig.string(r'''
<configure xmlns="http://namespaces.zope.org/browser">
<include package="zope.browserpage" file="meta.zcml" />
<page
name="getcookies"
for="*"
permission="zope.Public"
class="zope.app.testing.tests.GetCookies" />
<page
name="setcookie"
for="*"
permission="zope.Public"
class="zope.app.testing.tests.SetCookies" />
</configure>
''')
def testDefaultCookies(self):
# By default no cookies are set
response = self.publish('/')
self.assertEqual(response.getStatus(), 200)
self.assertFalse(response._request._cookies)
def testSimpleCookies(self):
self.cookies['aid'] = 'aval'
response = self.publish('/')
self.assertEqual(response.getStatus(), 200)
self.assertEqual(response._request._cookies['aid'], 'aval')
def testCookiePaths(self):
# We only send cookies if the path is correct
self.cookies['aid'] = 'aval'
self.cookies['aid']['Path'] = '/sub/folder'
self.cookies['bid'] = 'bval'
response = self.publish('/')
self.assertEqual(response.getStatus(), 200)
self.assertNotIn('aid', response._request._cookies)
self.assertEqual(response._request._cookies['bid'], 'bval')
def testHttpCookieHeader(self):
# Passing an HTTP_COOKIE header to publish adds cookies
response = self.publish('/', env={
'HTTP_COOKIE':
'$Version=1, aid=aval; $Path=/sub/folder, bid=bval'})
self.assertEqual(response.getStatus(), 200)
self.assertNotIn('aid', response._request._cookies)
self.assertEqual(response._request._cookies['bid'], 'bval')
def testStickyCookies(self):
# Cookies should acumulate during the test
response = self.publish('/', env={'HTTP_COOKIE': 'aid=aval;'})
self.assertEqual(response.getStatus(), 200)
# Cookies are implicity passed to further requests in this test
response = self.publish('/getcookies')
self.assertEqual(response.getStatus(), 200)
self.assertEqual(response.getBody().strip(), 'aid=aval')
# And cookies set in responses also acumulate
response = self.publish('/setcookie')
self.assertEqual(response.getStatus(), 200)
response = self.publish('/getcookies')
self.assertEqual(response.getStatus(), 200)
self.assertEqual(response.getBody().strip(), 'aid=aval;bid=bval')
class SkinsAndHTTPCaller(FunctionalTestCase):
def test_skins(self):
# Regression test for http://zope.org/Collectors/Zope3-dev/353
from zope.app.testing.functional import HTTPCaller
http = HTTPCaller()
response = http("GET /++skin++Basic HTTP/1.1\n\n")
self.assertIn("zopetopBasic.css", str(response))
class RetryProblemFunctional(FunctionalTestCase):
def setUp(self):
super(RetryProblemFunctional, self).setUp()
root = self.getRootFolder()
root['fail'] = FailingKlass()
transaction.commit()
def tearDown(self):
root = self.getRootFolder()
del root['fail']
super(RetryProblemFunctional, self).tearDown()
def test_retryOnConflictErrorFunctional(self):
from zope.app.testing.functional import HTTPCaller
http = HTTPCaller()
response = http(r"""
GET /@@test-conflict-raise-view.html HTTP/1.1
Authorization: Basic mgr:mgrpw
""")
self.assertNotEqual(response.getStatus(), 599)
self.assertEqual(response.getStatus(), 500)
class RetryProblemBrowser(BrowserTestCase):
def setUp(self):
super(RetryProblemBrowser, self).setUp()
root = self.getRootFolder()
root['fail'] = FailingKlass()
transaction.commit()
def tearDown(self):
root = self.getRootFolder()
del root['fail']
super(RetryProblemBrowser, self).tearDown()
def test_retryOnConflictErrorBrowser(self):
response = self.publish('/@@test-conflict-raise-view.html',
handle_errors=True)
self.assertNotEqual(response.getStatus(), 599)
self.assertEqual(response.getStatus(), 500)
ftesting_zcml = os.path.join(here, 'ftesting.zcml')
def doctest_FunctionalTestSetup_clears_global_utilities():
"""Test that FunctionalTestSetup doesn't leave global utilities.
Leaving global IDatabase utilities makes a nice juicy memory leak.
See https://bugs.launchpad.net/zope3/+bug/251273
This bug has now been fixed and this test exercises the fixed version.
>>> from zope.app.testing.functional import FunctionalTestSetup
>>> setup = FunctionalTestSetup(ftesting_zcml)
At this point, there are registrations for the base databases created by
the initialization:
>>> from zope.component import getAllUtilitiesRegisteredFor
>>> base, = getAllUtilitiesRegisteredFor(IDatabase)
Setting up for a test causes overriding registrations to be made:
>>> setup.setUp()
>>> dbs = list(getAllUtilitiesRegisteredFor(IDatabase))
>>> len(dbs)
1
>>> base in dbs
False
>>> override, = dbs
Tearing down the test context causes the overriding database to be
removed:
>>> setup.tearDown()
>>> list(getAllUtilitiesRegisteredFor(IDatabase))
[]
Tearing down completely:
>>> setup.tearDownCompletely()
"""
empty_zcml = os.path.join(here, 'empty.zcml')
def doctest_FunctionalTestSetup_supports_product_config():
"""Test that FunctionalTestSetup configures products.
We want to apply the following product configuration before opening
databases:
>>> product_config = '''
... <product-config abc>
... key1 value1
... key2 value2
... </product-config>
... '''
Since we expect the product configuration to be available when the layer
is initialized, we'll register a subscriber for the IDatabaseOpenedEvent
event, The normal CA-provided handling of the event is of no use to use,
since the functional layer controls the configuration of that, but a
low-level zoe.event subscriber will do the job:
>>> import zope.event
>>> def handle_database_open(event):
... global config
... IDbOE = zope.processlifetime.IDatabaseOpened
... if IDbOE.providedBy(event):
... config = zope.app.appsetup.product.getProductConfiguration(
... 'abc')
>>> zope.event.subscribers.append(handle_database_open)
The product configuration is passed to the layer setup and installed by
the setUp method:
>>> import pprint
>>> import zope.app.appsetup.product
>>> from zope.app.testing.functional import FunctionalTestSetup
>>> setup = FunctionalTestSetup(
... empty_zcml, product_config=product_config)
The configuration was visible to our database-opened subscriber:
>>> pprint.pprint(config, width=1)
{'key1': 'value1',
'key2': 'value2'}
>>> config = zope.app.appsetup.product.getProductConfiguration(
... 'abc')
>>> pprint.pprint(config, width=1)
{'key1': 'value1',
'key2': 'value2'}
Let's run a test that mutates the product configuration:
>>> setup.setUp()
>>> zope.app.appsetup.product.setProductConfiguration(
... 'abc', {'another': 'value'})
>>> zope.app.appsetup.product.getProductConfiguration('abc')
{'another': 'value'}
>>> setup.tearDown()
A second test run in the layer sees the original product configuration:
>>> setup.setUp()
>>> config = zope.app.appsetup.product.getProductConfiguration(
... 'abc')
>>> pprint.pprint(config, width=1)
{'key1': 'value1',
'key2': 'value2'}
>>> setup.tearDown()
After the layer is cleaned up, there's no longer any product
configuration:
>>> zope.event.subscribers.remove(handle_database_open)
>>> setup.tearDownCompletely()
>>> zope.app.appsetup.product.saveConfiguration()
{}
"""
def doctest_ZCMLLayer_carries_product_configuration():
"""Show that ``ZCMLLayer`` carries along product configuration.
ZCML layers can carry be defined to work with specific product
configuration; this is useful when application code (early subscribers,
including generations) need configuration data.
Let's define a couple of separate ZCML layers, and show that the
configuration data is properly associated with each, and applied at
appropriate times.
We'll need two distinct product configurations:
>>> product_config_one = '''
... <product-config abc>
... key1 a1
... key2 a2
... </product-config>
... '''
>>> product_config_two = '''
... <product-config abc>
... key1 b1
... key2 b2
... </product-config>
... '''
We can create two distinct layers that use these configurations:
>>> LayerOne = functional.ZCMLLayer(
... empty_zcml, 'zope.app.testing.tests', 'LayerOne',
... product_config=product_config_one,
... allow_teardown=True)
>>> LayerTwo = functional.ZCMLLayer(
... empty_zcml, 'zope.app.testing.tests', 'LayerTwo',
... product_config=product_config_two,
... allow_teardown=True)
For each layer, we can see that the correct product configuration is
installed, and subsequent layer usages won't have problems because of the
previously installed layer. This checks that initialization and
deconstruction of the functional test setup is handled properly to allow
layers to be used in sequence.
Let's use a helper function to show the configuration:
>>> import pprint
>>> def show_config():
... c = zope.app.appsetup.product.getProductConfiguration('abc')
... pprint.pprint(c, width=1)
>>> LayerOne.setUp()
>>> show_config()
{'key1': 'a1',
'key2': 'a2'}
>>> LayerOne.tearDown()
>>> LayerTwo.setUp()
>>> show_config()
{'key1': 'b1',
'key2': 'b2'}
>>> LayerTwo.tearDown()
"""
class TestXMLRPCTransport(unittest.TestCase):
def _makeOne(self):
from zope.app.testing.xmlrpc import ZopeTestTransport
return ZopeTestTransport()
def test_construct(self):
self._makeOne()
class TestXMLRPCServerProxy(unittest.TestCase):
def _makeOne(self, uri, **kwargs):
from zope.app.testing.xmlrpc import ServerProxy
return ServerProxy(uri, **kwargs)
def test_construct(self):
self._makeOne("http://example.com")
class TestConflictRaisingView(unittest.TestCase):
def _makeOne(self, context=None, request=None):
from zope.app.testing.testing import ConflictRaisingView
return ConflictRaisingView(context, request)
def test_browserDefault(self):
view = self._makeOne()
self.assertEqual(view.browserDefault(), (view, ()))
def test_call(self):
from ZODB.POSException import ConflictError
view = self._makeOne()
with self.assertRaises(ConflictError):
view()
class TestPlacefulSetUp(unittest.TestCase):
def setUp(self):
from zope.app.testing.setup import placefulSetUp
self.site = placefulSetUp(True)
def tearDown(self):
from zope.app.testing.setup import placefulTearDown
placefulTearDown()
self.site = None
def testSite(self):
from zope.component.hooks import getSite
self.assertEqual(self.site, getSite())
def test_buildSampleFolderTree(self):
from zope.app.testing.setup import buildSampleFolderTree
t = buildSampleFolderTree()
self.assertTrue(t)
def test_suite():
from zope.app.testing.setup import setUpTestAsModule
from zope.app.testing.setup import tearDownTestAsModule
import doctest
from zope.testing import renormalizing
checker = RENormalizing([
(re.compile(r'^HTTP/1.1 (\d{3}) .*?\n'), 'HTTP/1.1 \\1\n')])
SampleFunctionalTest.layer = AppTestingLayer
CookieFunctionalTest.layer = AppTestingLayer
SkinsAndHTTPCaller.layer = AppTestingLayer
RetryProblemFunctional.layer = AppTestingLayer
RetryProblemBrowser.layer = AppTestingLayer
HTTPFunctionalTest.layer = AppTestingLayer
BrowserFunctionalTest.layer = AppTestingLayer
HTTPCallerFunctionalTest.layer = AppTestingLayer
doc_test = FunctionalDocFileSuite(
'doctest.rst', 'cookieTestOne.rst',
'cookieTestTwo.rst', checker=checker)
doc_test.layer = AppTestingLayer
xml_checker = RENormalizing((
(re.compile('<DateTime \''), '<DateTime u\''),
(re.compile('at [-0-9a-fA-F]+'), 'at <SOME ADDRESS>'),
(re.compile("HTTP/1.0"), "HTTP/1.1"),
))
def xmlSetUp(test):
setUpTestAsModule(test, 'zope.app.testing.xmlrpc.README')
def xmlTearDown(test):
# clean up the views we registered:
# we use the fact that registering None unregisters whatever is
# registered. We can't use an unregistration call because that
# requires the object that was registered and we don't have that handy.
# (OK, we could get it if we want. Maybe later.)
from zope.site.interfaces import IFolder
from zope.publisher.interfaces.xmlrpc import IXMLRPCRequest
zope.component.provideAdapter(None, (
IFolder,
IXMLRPCRequest
), zope.interface, 'contents')
tearDownTestAsModule(test)
xmlrpcsuite = FunctionalDocFileSuite(
'xmlrpc.rst',
setUp=xmlSetUp,
tearDown=xmlTearDown,
checker=xml_checker,
optionflags=(doctest.ELLIPSIS
| doctest.NORMALIZE_WHITESPACE
| renormalizing.IGNORE_EXCEPTION_MODULE_IN_PYTHON2)
)
xmlrpcsuite.layer = AppTestingLayer
return unittest.TestSuite((
unittest.defaultTestLoader.loadTestsFromName(__name__),
DocTestSuite(),
doc_test,
xmlrpcsuite,
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.