content stringlengths 5 1.05M |
|---|
import os
import pickle
from tqdm import tqdm
from sentence_transformers import SentenceTransformer
root_dir = os.getcwd()
class SentenceEncoder():
def __init__(self,name):
self.name = name
def load_model(self):
sbert_model = SentenceTransformer(self.name)
return sbert_model
def encode_sentences(self, sbert_model, filtered_data):
self.sbert_model = sbert_model
self.sentences = filtered_data
sentence_embeddings = {}
for index in tqdm(range(len(filtered_data))):
id = index + 1
sentence = filtered_data['Questions'][index]
sentence_embedding = sbert_model.encode(sentence)
sentence_embeddings[id] = sentence_embedding
if not os.path.exists(os.path.join(root_dir, 'models')):
os.mkdir(os.path.join(root_dir, 'models'))
with open('./models/sentence_encodings.pickle', 'wb') as file:
pickle.dump(sentence_embeddings, file)
|
from .base_sprite import BaseSprite
class SimpleSprite(BaseSprite):
def __init__(self, coordinates, speed_x, speed_y, image, transparent_color=None):
super().__init__(coordinates, speed_x, speed_y, image, transparent_color)
self.surf.blit(image.convert_alpha(), (0, 0))
|
#Copyright (c) 2016, Allgeyer Tobias, Aumann Florian, Borella Jocelyn, Karrenbauer Oliver, Marek Felix, Meissner Pascal, Stroh Daniel, Trautmann Jeremias
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
#2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other #materials provided with the distribution.
#
#3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific #prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import subprocess
import time
from string_util import *
import libtmux
server = libtmux.Server()
session = server.sessions[0]
curWindow = session.attached_window
curPane = session.attached_pane
# tmux api
# windows
def getPaneOut(pane):
return "\n".join(pane.cmd("capturep", "-p").stdout)
def getTmuxCurOut():
return getPaneOut(session.attached_pane)
def getTmuxOut(windowNameOrId, paneId):
window = getWindow(windowNameOrId)
if window is None:
return
paneId = paneId % len(window.panes)
return getPaneOut(window.panes[paneId])
def getWindow(windowNameOrId):
if type(windowNameOrId) is int:
windowNameOrId = "@" + str(windowNameOrId)
window = session.get_by_id(windowNameOrId)
if window is not None:
return window
# otherwise we have to find the window
# [(name, window)]
windowTuples = map(lambda x : (x._info["window_name"], x), session.windows)
filteredWindows = filter (lambda (name, window): name == windowNameOrId, windowTuples)
if len(filteredWindows) > 0:
return filteredWindows[0][1]
def selectWindow(windowNameOrId):
window = getWindow(windowNameOrId)
if window is not None:
window.select_window()
else:
print("window not found")
def getNumberOfWindows():
return len(session.windows)
def getValidWindowIds():
windows = session.windows
validWindowIds = map(lambda x: x._window_id, windows)
return validWindowIds
def restartWindow(i):
selectWindow(str(i))
nPanes = getNumberOfPanes()
for i in range(0, nPanes):
selectPane(i)
restartCurrentPane()
def terminateWindow(i):
selectWindow(str(i))
nPanes = getNumberOfPanes()
for i in range(0, nPanes):
selectPane(i)
terminatePane()
def restartAllWindows():
for i in getValidWindowIds():
restartWindow(i)
def terminateAllWindows():
for i in getValidWindowIds():
terminateWindow(i)
# panes
def getNumberOfPanes():
return len(session.attached_window.panes)
def selectPane(paneId):
nPanes = getNumberOfPanes()
paneId = paneId % nPanes
session.attached_window.panes[paneId].select_pane()
def terminatePane():
session.attached_pane.cmd("send-keys", "C-C")
def restartCurrentPane():
terminatePane()
# wait for application to finish
while True:
lastLine = getLastLinesFrom(getTmuxCurOut(), -1)
lastSymbol = lastLine[-1] if len(lastLine) > 0 else ""
if "$" == lastSymbol:
break
terminatePane()
# to terminate python process, we first remove all input characters before and after and then use Ctrl + d
#session.attached_pane.cmd("send-keys", "C-k")
#session.attached_pane.cmd("send-keys", "C-u")
#session.attached_pane.cmd("send-keys", "C-d")
time.sleep(1)
# restart last command
session.attached_pane.cmd("send-keys", "Up")
session.attached_pane.enter()
def terminateCurrentPane():
terminatePane()
# wait for application to finish
while True:
lastLine = getLastLinesFrom(getTmuxCurOut(), -1)
lastSymbol = lastLine[-1] if len(lastLine) > 0 else ""
if "$" == lastSymbol:
break
terminatePane()
time.sleep(1)
def restartPanes(windowAndPanes):
oldPane = session.attached_pane
if type(windowAndPanes) is not list:
windowAndPanes = [windowAndPanes]
for (windowNameOrId, paneId) in windowAndPanes:
selectWindow(windowNameOrId)
selectPane(paneId)
restartCurrentPane()
oldPane.window.select_window()
oldPane.select_pane()
def terminatePanes(windowAndPanes):
oldPane = session.attached_pane
if type(windowAndPanes) is not list:
windowAndPanes = [windowAndPanes]
for (windowNameOrId, paneId) in windowAndPanes:
selectWindow(windowNameOrId)
selectPane(paneId)
terminateCurrentPane()
oldPane.window.select_window()
oldPane.select_pane()
def selectWindowAndPane(windowNameOrId, paneId):
selectWindow(windowNameOrId)
selectPane(paneId)
def tmuxKill():
subprocess.call("tmux kill-server", shell=True)
|
def leapYear(year):
if 0 == year % 4 and not 0 == year % 100 or 0 == year % 400:
return True
return False
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: user.py
@time: 2018-04-04 17:33
"""
from __future__ import unicode_literals
import json
from datetime import datetime
from flask import (
request,
flash,
render_template,
url_for,
redirect,
abort,
jsonify,
Blueprint,
)
from flask_babel import gettext as _
from flask_login import login_required, current_user
from app_backend import app
from app_backend import excel
from app_backend.api.buyer_order import count_buyer_order
from app_backend.api.delivery import count_delivery
from app_backend.api.enquiry import count_enquiry
from app_backend.api.purchase import count_purchase
from app_backend.api.quotation import count_quotation
from app_backend.api.sales_order import count_sales_order
from app_backend.api.user import (
get_user_rows,
get_user_pagination,
get_user_row_by_id,
add_user,
edit_user,
user_current_stats,
user_former_stats)
from app_backend.api.user_auth import (
add_user_auth,
edit_user_auth, get_user_auth_row)
from app_backend.forms.user import (
UserSearchForm,
UserAddForm,
UserEditForm,
)
from app_backend.models.model_bearing import User
from app_backend.permissions import permission_role_administrator
from app_backend.permissions.user import (
permission_user_section_add,
permission_user_section_search,
permission_user_section_stats,
permission_user_section_export,
permission_user_section_get,
permission_user_section_edit,
permission_user_section_del,
)
from app_common.maps.default import DEFAULT_SEARCH_CHOICES_INT_OPTION
from app_common.maps.operations import OPERATION_EXPORT, OPERATION_DELETE
from app_common.maps.status_delete import (
STATUS_DEL_OK,
STATUS_DEL_NO)
from app_common.maps.status_verified import STATUS_VERIFIED_OK
from app_common.maps.type_auth import TYPE_AUTH_ACCOUNT
from app_common.tools import json_default
# 定义蓝图
bp_user = Blueprint('user', __name__, url_prefix='/user')
# 加载配置
DOCUMENT_INFO = app.config.get('DOCUMENT_INFO', {})
PER_PAGE_BACKEND = app.config.get('PER_PAGE_BACKEND', 20)
AJAX_SUCCESS_MSG = app.config.get('AJAX_SUCCESS_MSG', {'result': True})
AJAX_FAILURE_MSG = app.config.get('AJAX_FAILURE_MSG', {'result': False})
@bp_user.route('/lists.html', methods=['GET', 'POST'])
@login_required
@permission_user_section_search.require(http_exception=403)
def lists():
"""
用户列表
:return:
"""
template_name = 'user/lists.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user lists')
# 搜索条件
form = UserSearchForm(request.form)
search_condition = [
User.status_delete == STATUS_DEL_NO,
]
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Search Failure'), 'danger')
# 单独处理csrf_token
if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
else:
if form.name.data:
search_condition.append(User.name == form.name.data)
if form.role_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(User.role_id == form.role_id.data)
if form.start_create_time.data:
search_condition.append(User.create_time >= form.start_create_time.data)
if form.end_create_time.data:
search_condition.append(User.create_time <= form.end_create_time.data)
# 处理导出
if form.op.data == OPERATION_EXPORT:
# 检查导出权限
if not permission_user_section_export.can():
abort(403)
column_names = User.__table__.columns.keys()
query_sets = get_user_rows(*search_condition)
return excel.make_response_from_query_sets(
query_sets=query_sets,
column_names=column_names,
file_type='csv',
file_name='%s.csv' % _('user lists')
)
# 批量删除
if form.op.data == OPERATION_DELETE:
# 检查删除权限
if not permission_user_section_del.can():
abort(403)
user_ids = request.form.getlist('user_id')
# 检查删除权限
permitted = True
for user_id in user_ids:
# 检查是否正在使用
# 1、报价
if count_quotation(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
# 2、销售订单
if count_sales_order(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
# 3、销售出货
if count_delivery(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
# 4、询价
if count_enquiry(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
# 5、采购订单
if count_buyer_order(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
# 6、采购进货
if count_purchase(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
if permitted:
result_total = True
for user_id in user_ids:
current_time = datetime.utcnow()
user_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_user(user_id, user_data)
result_total = result_total and result
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
# 翻页数据
pagination = get_user_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
# 渲染模板
return render_template(
template_name,
form=form,
pagination=pagination,
**document_info
)
# @bp_user.route('/search.html', methods=['GET', 'POST'])
# @login_required
# @permission_user_section_search.require(http_exception=403)
# def search():
# """
# 用户搜索
# :return:
# """
# template_name = 'customer/search_modal.html'
# # 文档信息
# document_info = DOCUMENT_INFO.copy()
# document_info['TITLE'] = _('Customer Search')
#
# # 搜索条件
# form = UserSearchForm(request.form)
# form.owner_uid.choices = get_sales_user_list()
# # app.logger.info('')
#
# search_condition = [
# Customer.status_delete == STATUS_DEL_NO,
# ]
# if request.method == 'POST':
# # 表单校验失败
# if not form.validate_on_submit():
# flash(_('Search Failure'), 'danger')
# # 单独处理csrf_token
# if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
# map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
# else:
# if form.company_type.data != default_choice_option_int:
# search_condition.append(Customer.company_type == form.company_type.data)
# if form.company_name.data:
# search_condition.append(Customer.company_name.like('%%%s%%' % form.company_name.data))
# # 翻页数据
# pagination = get_customer_pagination(form.page.data, PER_PAGE_BACKEND_MODAL, *search_condition)
#
# # 渲染模板
# return render_template(
# template_name,
# form=form,
# pagination=pagination,
# **document_info
# )
@bp_user.route('/<int:user_id>/info.html')
@login_required
@permission_user_section_get.require(http_exception=403)
def info(user_id):
"""
用户详情
:param user_id:
:return:
"""
# 详情数据
user_info = get_user_row_by_id(user_id)
# 检查资源是否存在
if not user_info:
abort(404)
# 检查资源是否删除
if user_info.status_delete == STATUS_DEL_OK:
abort(410)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user info')
# 渲染模板
return render_template('user/info.html', user_info=user_info, **document_info)
@bp_user.route('/add.html', methods=['GET', 'POST'])
@login_required
@permission_user_section_add.require(http_exception=403)
def add():
"""
创建用户
:return:
"""
template_name = 'user/add.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user add')
# 加载创建表单
form = UserAddForm(request.form)
# 进入创建页面
if request.method == 'GET':
# 渲染页面
return render_template(
template_name,
form=form,
**document_info
)
# 处理创建请求
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
# 表单校验成功
# 创建用户基本信息
current_time = datetime.utcnow()
user_data = {
'name': form.name.data,
'salutation': form.salutation.data,
'mobile': form.mobile.data,
'tel': form.tel.data,
'fax': form.fax.data,
'email': form.email.data,
'role_id': form.role_id.data,
'create_time': current_time,
'update_time': current_time,
}
user_id = add_user(user_data)
if not user_id:
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
# 创建用户认证信息
user_auth_data = {
'user_id': user_id,
'type_auth': TYPE_AUTH_ACCOUNT,
'auth_key': form.name.data,
'auth_secret': '123456', # 默认密码
'status_verified': STATUS_VERIFIED_OK,
'create_time': current_time,
'update_time': current_time,
}
result = add_user_auth(user_auth_data)
if result:
flash(_('Add Success'), 'success')
return redirect(request.args.get('next') or url_for('user.lists'))
# 创建操作失败
else:
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
@bp_user.route('/<int:user_id>/edit.html', methods=['GET', 'POST'])
@login_required
@permission_user_section_edit.require(http_exception=403)
def edit(user_id):
"""
用户编辑
"""
user_info = get_user_row_by_id(user_id)
# 检查资源是否存在
if not user_info:
abort(404)
# 检查资源是否删除
if user_info.status_delete == STATUS_DEL_OK:
abort(410)
template_name = 'user/edit.html'
# 加载编辑表单
form = UserEditForm(request.form)
form.id.data = user_id # id 仅作为编辑重复校验
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user edit')
# 进入编辑页面
if request.method == 'GET':
# 表单赋值
form.id.data = user_info.id
form.name.data = user_info.name
form.salutation.data = user_info.salutation
form.mobile.data = user_info.mobile
form.tel.data = user_info.tel
form.fax.data = user_info.fax
form.email.data = user_info.email
form.role_id.data = user_info.role_id
form.create_time.data = user_info.create_time
form.update_time.data = user_info.update_time
# 渲染页面
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
# 处理编辑请求
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Edit Failure'), 'danger')
# flash(form.errors, 'danger')
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
# 非系统角色,仅能修改自己的信息
if not permission_role_administrator.can():
if getattr(current_user, 'id') != form.id.data:
flash(_('Permission denied, only the user\'s own information can be modified'), 'danger')
# flash(form.errors, 'danger')
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
# 表单校验成功
# 编辑用户基本信息
current_time = datetime.utcnow()
user_data = {
'name': form.name.data,
'salutation': form.salutation.data,
'mobile': form.mobile.data,
'tel': form.tel.data,
'fax': form.fax.data,
'email': form.email.data,
'role_id': form.role_id.data,
'update_time': current_time,
}
result = edit_user(user_id, user_data)
if not result:
# 编辑操作失败
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
user_auth_row = get_user_auth_row(user_id=user_id)
if not user_auth_row:
# 编辑操作失败
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
# 编辑用户认证信息
user_auth_data = {
'user_id': user_id,
'type_auth': TYPE_AUTH_ACCOUNT,
'auth_key': form.name.data,
'update_time': current_time,
}
result = edit_user_auth(user_auth_row.id, user_auth_data)
if not result:
# 编辑操作失败
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
user_id=user_id,
form=form,
**document_info
)
# 编辑操作成功
flash(_('Edit Success'), 'success')
return redirect(request.args.get('next') or url_for('user.lists'))
@bp_user.route('/ajax/del', methods=['GET', 'POST'])
@login_required
def ajax_delete():
"""
用户删除
:return:
"""
ajax_success_msg = AJAX_SUCCESS_MSG.copy()
ajax_failure_msg = AJAX_FAILURE_MSG.copy()
# 检查删除权限
if not permission_user_section_del.can():
ext_msg = _('Permission Denied')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求方法
if not (request.method == 'GET' and request.is_xhr):
ext_msg = _('Method Not Allowed')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求参数
user_id = request.args.get('user_id', 0, type=int)
if not user_id:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
user_info = get_user_row_by_id(user_id)
# 检查资源是否存在
if not user_info:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查资源是否删除
if user_info.status_delete == STATUS_DEL_OK:
ext_msg = _('Already deleted')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查是否正在使用
# 报价、订单
if count_quotation(**{'uid': user_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
current_time = datetime.utcnow()
user_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_user(user_id, user_data)
if result:
ajax_success_msg['msg'] = _('Del Success')
return jsonify(ajax_success_msg)
else:
ajax_failure_msg['msg'] = _('Del Failure')
return jsonify(ajax_failure_msg)
@bp_user.route('/ajax/stats', methods=['GET', 'POST'])
@login_required
def ajax_stats():
"""
获取用户统计
:return:
"""
time_based = request.args.get('time_based', 'hour')
result_user_current = user_current_stats(time_based)
result_user_former = user_former_stats(time_based)
line_chart_data = {
'labels': [label for label, _ in result_user_current],
'datasets': [
{
'label': '在职',
'backgroundColor': 'rgba(220,220,220,0.5)',
'borderColor': 'rgba(220,220,220,1)',
'pointBackgroundColor': 'rgba(220,220,220,1)',
'pointBorderColor': '#fff',
'pointBorderWidth': 2,
'data': [data for _, data in result_user_current]
},
{
'label': '离职',
'backgroundColor': 'rgba(151,187,205,0.5)',
'borderColor': 'rgba(151,187,205,1)',
'pointBackgroundColor': 'rgba(151,187,205,1)',
'pointBorderColor': '#fff',
'pointBorderWidth': 2,
'data': [data for _, data in result_user_former]
}
]
}
return json.dumps(line_chart_data, default=json_default)
@bp_user.route('/stats.html')
@login_required
@permission_user_section_stats.require(http_exception=403)
def stats():
"""
用户统计
:return:
"""
# 统计数据
time_based = request.args.get('time_based', 'hour')
if time_based not in ['hour', 'date', 'month']:
abort(404)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user stats')
# 渲染模板
return render_template(
'user/stats.html',
time_based=time_based,
**document_info
)
@bp_user.route('/<int:user_id>/stats.html')
@login_required
@permission_user_section_stats.require(http_exception=403)
def stats_item(user_id):
"""
用户统计明细
:param user_id:
:return:
"""
user_info = get_user_row_by_id(user_id)
# 检查资源是否存在
if not user_info:
abort(404)
# 检查资源是否删除
if user_info.status_delete == STATUS_DEL_OK:
abort(410)
# 统计数据
user_stats_item_info = get_user_row_by_id(user_id)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('user stats item')
# 渲染模板
return render_template(
'user/stats_item.html',
user_stats_item_info=user_stats_item_info,
**document_info
)
|
import random
import numpy as np
import torch
def set_seed(seed: int):
""" Set random seed for python, numpy and pytorch RNGs """
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_random_state():
return {
'python': random.getstate(),
'numpy': np.random.get_state(),
'pytorch': torch.get_rng_state(),
'pytorch_cuda': torch.cuda.get_rng_state_all(),
}
def set_random_state(state):
random.setstate(state['python'])
np.random.set_state(state['numpy'])
torch.set_rng_state(state['pytorch'])
try:
torch.cuda.set_rng_state_all(state['pytorch_cuda'])
except IndexError:
print('cannot load the cuda random state')
|
import subprocess
import pwd
import grp
import os
from config import Config as Configuration
class UserDeleteFailed(Exception):
pass
class UserAddFailed(Exception):
pass
class UserManagement():
@classmethod
def starting_user_id(cls):
return Configuration.starting_uid_number()
@classmethod
def user_exist(cls, login, output=False):
try:
pwd.getpwnam(login)
if output:
print('User {} on local system'.format(login))
return True
except KeyError:
return False
def group_exist(self, github_team):
try:
grp.getgrnam(github_team)
return True
except KeyError:
return False
def add_user(self, login, github_team, key):
_github_team = github_team.lower().replace(' ', '_')
if not self.group_exist(_github_team):
self.add_group(_github_team)
try:
print('adding {}'.format(login))
subprocess.run(['useradd', '-m', '-G', _github_team, login], check=True)
self.add_ssh_pub_key(login, key)
except subprocess.CalledProcessError:
raise UserAddFailed("Failed to add {} add system".format(login))
def add_group(self, github_team):
try:
subprocess.run(['groupadd', github_team], check=True)
except subprocess.CalledProcessError:
raise GroupAddFailed("Failed to add {} to system".format(github_team))
def purge_user(self, login):
try:
run = subprocess.run(['userdel', '-r', login], stdout=subprocess.PIPE, stderr=subprocess.PIPE )
if run.returncode == 12:
print("Can't remove {}, does not own home directory".format(login))
if run.returncode == 6:
print("User {} already deleted".format(login))
except subprocess.CalledProcessError:
raise UserDeleteFailed("Failed to remove {} from system".format(login))
def add_ssh_pub_key(self, user, public_key):
if not public_key:
return "No public key provided"
_dir = '/home/' + user + '/.ssh'
_file = 'authorized_keys'
_auth_file = _dir + '/' + _file
os.mkdir(_dir, mode=0o700)
with open(_auth_file, 'w') as f:
for _key in public_key:
f.write(_key + "\n")
os.chown(_auth_file, self.get_uid(user), self.get_gid(user))
os.chown(_dir, self.get_uid(user), self.get_gid(user))
def get_uid(self, login):
return pwd.getpwnam(login)[2]
def get_gid(self, login):
return pwd.getpwnam(login)[3]
def get_ids(self, uid):
return (id for id in pwd.getpwall() if (id.pw_uid >= uid))
def list_local_uids(self):
for id in self.get_ids(self.starting_user_id()):
if id.pw_uid != 65534:
return id.pw_uid
def list_local_logins(self):
for id in self.get_ids(self.starting_user_id()):
if id.pw_uid != 65534:
yield id.pw_name
|
# Generated by Django 4.0 on 2021-12-14 02:23
from django.db import migrations, models
import posts_apis.helpers.src.image_file
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('content', models.TextField()),
('image', models.ImageField(blank=True, null=True, upload_to=posts_apis.helpers.src.image_file.ImageManage.set_image_file)),
('published', models.BooleanField(default=True)),
('slug', models.SlugField(blank=True, max_length=120)),
('create_at', models.DateTimeField(auto_now_add=True)),
('update_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ('-create_at',),
},
),
]
|
def generate_tree(base, trunk, leaves):
for i in range(int(base / 2 + 1)):
print(" " * int(base / 2 - i) + leaves * (i * 2 + 1))
print(" " * int(base / 2 - 1) + trunk * 3)
def main():
arr_input = input("Enter the base-width, trunk, and leaves (space-delimited): ").split()
generate_tree(int(arr_input[0]), arr_input[1], arr_input[2])
if __name__ == '__main__':
main()
|
from bs4 import BeautifulSoup as BS
import requests, json
import pandas as pd
from common import *
#Parser using an User Agent
def parser(url):
agent = {"User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'}
page=requests.get(url,headers=agent) #requesting page using an agent
soup = BS(page.text, 'html.parser')
return soup
#Initialize scrapping by getting required infi from config json file to extract data
def crawler():
try:
with open("config.json") as jdata: #reading json file
data = json.load(jdata) #calling json objects
search_pg=data[0]["html"] #Search link retrieved from config.json
#outf=data[0]["Outfile_name"] #Sample data for dev
url=user_choice(search_pg) #get user's required link to scrap
groupedlist1=Extract_Zauba_indi(url) #final individual company data packed into list of lists
#uncomment the below line to get excel output
store_excel(str(url.split("/")[-2]),groupedlist1) #storing individual company data in excel
#uncomment the below line to get json1 output
store_json(str(url.split("/")[-2]),groupedlist1) #storing individual company data in excel
print("Scrapping completed Succesfully!!!")
except Exception as E:
print('Oops scrapping unsuccessful\nError: ',E)
#Command Line Choices Display
def user_choice(search_pg):
print("Welcome to Speed Crawler")
mode=0
url=""
search_key=""
temp=""
try:
mode=int(input("Please enter required mode number from options below:\n1 - Download Single Data by CIN number \n2 - Download Single Data by Top Search\n3 - Download Single Data by Exact Zaubacorp Link\n\nChoice: "))
if (mode==2):
search_key=str(input("Enter Search Keyword: "))
url=zauba_top_search(search_pg,search_key)
return url
if (mode==1):
search_key=(input("Enter CIN number: "))
url=str(search_pg+search_key)
print(url)
temp = requests.get(url)
url=temp.url
return url
if (mode==3):
url=str(input("Enter exact Zaubacorp link: "))
return url
except:
print("Exiting program as no suitable response recieved")
exit()
#Returns the Best Search Link
def zauba_top_search(search_pg,search_key):
#search_key="sun" #sample search for devs
top_link=""
soup2=""
company_link=str(search_pg+search_key)
try:
soup2=parser(company_link)
search_data=(soup2.find("div",{"class":"col-xs-12"}))
top_link =search_data.find_all("table")[0].find("a").get("href") #GET FIRST SEARCH LINK
print("\nScrapping initiated . . .")
return str(top_link)
except:
print("No search result found for "+search_key)
exit()
#Pack the details of a specific company in a list.
def Extract_Zauba_indi(url):
try:
soup=parser(url) #calling nested function
details=(soup.find_all("div",{"class":"col-lg-12 col-md-12 col-sm-12 col-xs-12"}))
#print(details)
i=0
#Scraping all tabulated Company Details from Zauba Corp
groupedlist1=[]
for i in range(20):
list1=[]
list2=[]
field1=[]
field2=[]
try:
field1 = details[i].find_all("table")[0].find_all("tr") #Table Contents
field2 = details[i].find_all("h4")[0].text.strip() #Name of dataframe
except:
continue
for element in field1:
sub_data = []
for sub_element in element:
try:
sub_data.append(sub_element.text.strip())
except:
continue
list1.append(sub_data)
list2.append(field2[:29]) #name of table scrapped to save as sheet name - less than 32 characters
list2.append(list1) #fial data without table name
groupedlist1.append(list2) #final list of data scraped with table name
#print(field2,"\n",list1)
print("Succesfully Extracted Data")
return groupedlist1
except:
print("Oops, No matching Results, exiting Scrapper")
exit()
if __name__ == '__main__':
crawler()
|
# Copyright 2018-2019 Faculty Science Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from marshmallow import fields, post_load
from faculty.clients.base import BaseSchema, BaseClient
Project = namedtuple("Project", ["id", "name", "owner_id"])
class ProjectSchema(BaseSchema):
id = fields.UUID(data_key="projectId", required=True)
name = fields.Str(required=True)
owner_id = fields.UUID(data_key="ownerId", required=True)
@post_load
def make_project(self, data):
return Project(**data)
class ProjectClient(BaseClient):
SERVICE_NAME = "casebook"
def create(self, owner_id, project_name):
payload = {"owner_id": str(owner_id), "name": project_name}
return self._post("/project", ProjectSchema(), json=payload)
def get(self, project_id):
endpoint = "/project/{}".format(project_id)
return self._get(endpoint, ProjectSchema())
def get_by_owner_and_name(self, owner_id, project_name):
endpoint = "/project/{}/{}".format(owner_id, project_name)
return self._get(endpoint, ProjectSchema())
def list_accessible_by_user(self, user_id):
endpoint = "/user/{}".format(user_id)
return self._get(endpoint, ProjectSchema(many=True))
|
import getopt
import os
import sys
from BaseHTTPServer import HTTPServer
from composite_handler import CompositeHttpRequestHandler
def usage():
print ""
print "Usage: " + sys.argv[0] + " [OPTIONS]"
print " -h HOST"
print " -p Port"
print ""
if __name__ == "__main__":
host = ""
port = 8000
try:
opts, args = getopt.getopt(sys.argv[1:], "h:p:", ["host", "port"])
except getopt.GetoptError, err:
print(err)
sys.exit(-1)
for o, arg in opts:
if o in ("-h", "--host"):
host = arg
elif o in ("-p", "--port"):
port = int(arg)
else:
print "Unknown Options"
try:
CompositeHttpRequestHandler.dir_path = os.curdir + "/media"
# CompositeHttpRequestHandler.dir_path = os.path.dirname(os.path.realpath(__file__)) + "/../media"
httpd = HTTPServer((host, port), CompositeHttpRequestHandler)
except Exception as e:
sys.stderr.write(str(e))
sys.exit(-1)
print "Serving on " + host + ":" + str(port) + " ... "
while True:
httpd.handle_request()
|
import pytest
import responses
import requests
from piperci.artman import artman_client
_post_artifact_data = {
"artman_url": "http://artman_url",
"uri": "https://someminio.example.com/art1",
"sri": "sha256-sCDaaxdshXhK4sA/v4dMHiMWhtGyQwA1fP8PgrN0O5g=",
"type": "artifact",
"caller": "pytest",
"task_id": "1234",
}
@responses.activate
def test_post_artifact(post_artifact_response):
responses.add(
responses.POST, "http://artman_url/artifact", json=post_artifact_response
)
results = artman_client.post_artifact(**_post_artifact_data)
assert results == post_artifact_response
@pytest.mark.parametrize("response_code", [400, 500])
@responses.activate
def test_post_artifact_bad_response_code(response_code):
responses.add(responses.POST, "http://artman_url/artifact", status=response_code)
with pytest.raises(requests.exceptions.RequestException):
artman_client.post_artifact(**_post_artifact_data)
def test_post_artifact_request_exception(mock_post_request_exception):
with pytest.raises(requests.exceptions.RequestException):
artman_client.post_artifact(**_post_artifact_data)
@pytest.mark.parametrize("test", [(200, True), (404, False)])
@responses.activate
def test_check_artifact_exists_for_sri_exists(test):
responses.add(responses.HEAD, "http://artman_url/artifact/sri/1234", status=test[0])
assert (
artman_client._check_artifact_exists_for_sri(
artman_url="http://artman_url", sri_urlsafe="1234"
)
== test[1]
)
@responses.activate
def test_check_artifact_exists_for_sri_invalid_response():
responses.add(responses.HEAD, "http://artman_url/artifact/sri/1234", status=400)
with pytest.raises(requests.exceptions.RequestException):
artman_client._check_artifact_exists_for_sri(
artman_url="http://artman_url", sri_urlsafe="1234"
)
def test_check_artifact_exists_for_sri_exception(mock_head_request_exception):
with pytest.raises(requests.exceptions.RequestException):
artman_client._check_artifact_exists_for_sri(
artman_url="http://artman_url", sri_urlsafe="1234"
)
@pytest.mark.parametrize("test", [(200, True), (404, False)])
@responses.activate
def test_check_artifact_exists_for_task_id_exists(test):
responses.add(
responses.HEAD, "http://artman_url/artifact/task/1234", status=test[0]
)
assert (
artman_client._check_artifact_exists_for_task_id(
artman_url="http://artman_url", task_id="1234"
)
== test[1]
)
@responses.activate
def test_check_artifact_exists_for_task_id_invalid_response():
responses.add(responses.HEAD, "http://artman_url/artifact/task/1234", status=400)
with pytest.raises(requests.exceptions.RequestException):
artman_client._check_artifact_exists_for_task_id(
artman_url="http://artman_url", task_id="1234"
)
def test_check_artifact_exists_for_task_id_exception(mock_head_request_exception):
with pytest.raises(requests.exceptions.RequestException):
artman_client._check_artifact_exists_for_task_id(
artman_url="http://artman_url", task_id="1234"
)
@pytest.mark.parametrize(
"arg",
[
(
{"artman_url": "http://artman_url", "task_id": "1234"},
"task/1234",
{"x-gman-artifacts": "2"},
),
(
{"artman_url": "http://artman_url", "sri_urlsafe": "1234"},
"sri/1234",
{"x-gman-artifacts": "2"},
),
],
)
@responses.activate
def test_check_artifact_exists(arg):
responses.add(
responses.HEAD, f"http://artman_url/artifact/{arg[1]}", headers=arg[2]
)
assert artman_client.check_artifact_exists(**arg[0])
assert len(responses.calls) == 1
assert responses.calls[0].request.url == f"http://artman_url/artifact/{arg[1]}"
@pytest.mark.parametrize(
"arg",
[
{"artman_url": "http://artman_url", "task_id": "1234", "sri_urlsafe": "1234"},
{"artman_url": "http://artman_url"},
],
)
@responses.activate
def test_check_artifact_exists_invalid_parameter(arg):
with pytest.raises(ValueError):
artman_client.check_artifact_exists(**arg)
@responses.activate
def test_check_artifact_status():
responses.add(
responses.HEAD,
"http://artman_url/artifact/1234",
headers={"x-gman-artifact-status": "unknown"},
)
response = artman_client.check_artifact_status(
artman_url="http://artman_url", artifact_id="1234"
)
assert response == "unknown"
@pytest.mark.parametrize("response_code", [400, 500])
@responses.activate
def test_check_artifact_status_bad_response_code(response_code):
responses.add(
responses.HEAD, "http://artman_url/artifact/1234", status=response_code
)
with pytest.raises(requests.exceptions.HTTPError):
artman_client.check_artifact_status(
artman_url="http://artman_url", artifact_id="1234"
)
def test_check_artifact_status_exception(mock_head_request_exception):
with pytest.raises(requests.exceptions.RequestException):
artman_client.check_artifact_status(
artman_url="http://artman_url", artifact_id="1234"
)
@responses.activate
def test_get_artifacts_by_sri(artifact_list):
responses.add(
responses.GET, "http://artman_url/artifact/sri/1234", json=artifact_list
)
response = artman_client._get_artifacts_by_sri(
artman_url="http://artman_url", sri_urlsafe="1234"
)
assert len(response) == 2
@responses.activate
def test_get_artifacts_by_sri_query_filter(artifact_list):
responses.add(
responses.GET, "http://artman_url/artifact/sri/1234", json=artifact_list
)
response = artman_client._get_artifacts_by_sri(
artman_url="http://artman_url",
sri_urlsafe="1234",
query_filter=lambda x: x.get("status") == "unknown",
)
assert len(response) == 2
@pytest.mark.parametrize("response_code", [400, 500])
@responses.activate
def test_get_artifacts_by_sri_bad_response_code(response_code):
responses.add(
responses.GET, "http://artman_url/artifact/sri/1234", status=response_code
)
with pytest.raises(requests.exceptions.HTTPError):
artman_client._get_artifacts_by_sri(
artman_url="http://artman_url", sri_urlsafe="1234"
)
def test_get_artifacts_by_sri_request_exception(mock_get_request_exception):
with pytest.raises(requests.exceptions.RequestException):
artman_client._get_artifacts_by_sri(
artman_url="http://artman_url", sri_urlsafe="1234"
)
@responses.activate
def test_get_artifact_by_artifact_id(artifacts):
responses.add(responses.GET, "http://artman_url/artifact/1234", json=artifacts[0])
artman_client._get_artifact_by_artifact_id(
artman_url="http://artman_url", artifact_id="1234"
)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == "http://artman_url/artifact/1234"
@pytest.mark.parametrize("response_code", [400, 500])
@responses.activate
def test_get_artifact_by_artifact_id_bad_response_code(response_code):
responses.add(
responses.GET, "http://artman_url/artifact/1234", status=response_code
)
with pytest.raises(requests.exceptions.HTTPError):
artman_client._get_artifact_by_artifact_id(
artman_url="http://artman_url", artifact_id="1234"
)
def test_get_artifact_by_artifact_id_request_exception(mock_get_request_exception):
with pytest.raises(requests.exceptions.RequestException):
artman_client._get_artifact_by_artifact_id(
artman_url="http://artman_url", artifact_id="1234"
)
@responses.activate
def test_get_artifact_by_task_id(artifacts):
responses.add(responses.GET, "http://artman_url/artifact/task/1234", json=artifacts)
artman_client._get_artifacts_by_task_id(
artman_url="http://artman_url", task_id="1234"
)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == "http://artman_url/artifact/task/1234"
@responses.activate
def test_get_artifacts_by_task_id_query_filter(artifact_list):
responses.add(
responses.GET, "http://artman_url/artifact/task/1234", json=artifact_list
)
response = artman_client._get_artifacts_by_task_id(
artman_url="http://artman_url",
task_id="1234",
query_filter=lambda x: x.get("status") == "unknown",
)
assert len(response) == 2
@pytest.mark.parametrize("response_code", [400, 500])
@responses.activate
def test_get_artifacts_by_task_id_bad_response_code(response_code):
responses.add(
responses.GET, "http://artman_url/artifact/task/1234", status=response_code
)
with pytest.raises(requests.exceptions.HTTPError):
artman_client._get_artifacts_by_task_id(
artman_url="http://artman_url", task_id="1234"
)
def test_get_artifacts_by_task_id_request_exception(mock_get_request_exception):
with pytest.raises(requests.exceptions.RequestException):
artman_client._get_artifacts_by_task_id(
artman_url="http://artman_url", task_id="1234"
)
@responses.activate
def test_get_artifact_by_thread_id(mocker, artifact_list, thread_id_tasks_fixture):
responses.add(
responses.GET, "http://artman_url/artifact/task/1234", json=artifact_list
)
mocker.patch(
"piperci.gman.client.get_thread_id_tasks",
return_value=thread_id_tasks_fixture,
)
artman_client._get_artifacts_by_thread_id(
artman_url="http://artman_url", thread_id="1234"
)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == "http://artman_url/artifact/task/1234"
@responses.activate
def test_get_artifacts_by_thread_id_query_filter(
mocker, artifact_list, thread_id_tasks_fixture
):
responses.add(
responses.GET, "http://artman_url/artifact/task/1234", json=artifact_list
)
mocker.patch(
"piperci.gman.client.get_thread_id_tasks",
return_value=thread_id_tasks_fixture,
)
response = artman_client._get_artifacts_by_thread_id(
artman_url="http://artman_url",
thread_id="1234",
query_filter=lambda x: x.get("status") == "unknown",
)
assert len(response) == 2
@pytest.mark.parametrize("response_code", [400, 500])
@responses.activate
def test_get_artifacts_by_thread_id_bad_response_code(
mocker, response_code, thread_id_tasks_fixture
):
responses.add(
responses.GET, "http://artman_url/artifact/task/1234", status=response_code
)
mocker.patch(
"piperci.gman.client.get_thread_id_tasks",
return_value=thread_id_tasks_fixture,
)
with pytest.raises(requests.exceptions.HTTPError):
artman_client._get_artifacts_by_thread_id(
artman_url="http://artman_url", thread_id="1234"
)
def test_get_artifacts_by_thread_id_request_exception(mock_get_request_exception):
with pytest.raises(requests.exceptions.RequestException):
artman_client._get_artifacts_by_thread_id(
artman_url="http://artman_url", thread_id="1234"
)
@pytest.mark.parametrize(
"arg",
[
({"artman_url": "http://artman_url", "task_id": "1234"}, "task/1234"),
({"artman_url": "http://artman_url", "sri_urlsafe": "1234"}, "sri/1234"),
({"artman_url": "http://artman_url", "artifact_id": "1234"}, "1234"),
],
)
@responses.activate
def test_get_artifact(arg, artifact_list):
responses.add(
responses.GET, f"http://artman_url/artifact/{arg[1]}", json=artifact_list
)
artman_client.get_artifact(**arg[0])
assert len(responses.calls) == 1
assert responses.calls[0].request.url == f"http://artman_url/artifact/{arg[1]}"
@responses.activate
def test_get_artifact_thread(mocker, artifact_list, thread_id_tasks_fixture):
responses.add(
responses.GET, "http://artman_url/artifact/task/1234", json=artifact_list
)
mocker.patch(
"piperci.gman.client.get_thread_id_tasks",
return_value=thread_id_tasks_fixture,
)
artman_client.get_artifact(
artman_url="http://artman_url", thread_id="1234"
)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == "http://artman_url/artifact/task/1234"
@pytest.mark.parametrize(
"arg",
[
{"artman_url": "http://artman_url", "task_id": "1234", "sri_urlsafe": "1234"},
{"artman_url": "http://artman_url", "thread_id": "1234", "sri_urlsafe": "1234"},
{"artman_url": "http://artman_url", "thread_id": "1234", "task_id": "1234"},
{
"artman_url": "http://artman_url",
"task_id": "1234",
"sri_urlsafe": "1234",
"artifact_id": "1234",
},
{
"artman_url": "http://artman_url",
"artifact_id": "1234",
"sri_urlsafe": "1234",
"thread_id": "1234"
},
{
"artman_url": "http://artman_url",
"artifact_id": "1234",
"task_id": "1234",
"thread_id": "1234"
},
{
"artman_url": "http://artman_url",
"sri_urlsafe": "1234",
"task_id": "1234",
"thread_id": "1234"
},
{"artman_url": "http://artman_url"},
],
)
@responses.activate
def test_get_artifact_invalid_parameter(arg):
with pytest.raises(ValueError):
artman_client.get_artifact(**arg)
|
#!/usr/bin/env python
import sys
import math
import time
import StringIO
from PIL import Image
import scipy.constants
from mathics.world import World
from mathics.viewport import Viewport
from mathics.machines import Pendulum, Timer, Point, Vector
def write_gif(gif, frames, duration, nq):
for i in range(len(frames)):
frames[i] = frames[i].convert('P', palette = Image.ADAPTIVE)
frames[0].save(gif, format="GIF", save_all=True, append_images=frames[1:], duration=duration/len(frames), loop=0)
def serve_gif(frames, duration, nq=0):
gif = StringIO.StringIO()
timer_start = time.time()
write_gif(gif, frames, duration, nq)
with open('image.gif', 'wb') as f:
gif.seek(0)
f.write(gif.read())
timer_end = time.time()
print "stored gif in %i seconds." % (timer_end - timer_start)
# server image.gif
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
PORT_NUMBER = 8000
#This class will handles any incoming request from
#the browser
class myHandler(BaseHTTPRequestHandler):
#Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','image/gif')
self.end_headers()
gif.seek(0)
self.wfile.write(gif.read())
return
try:
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', PORT_NUMBER), myHandler)
#Wait forever for incoming http requests
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
if __name__ == '__main__':
# image settings
supersample = 2
width = 200
font_size = 8
# animation settings
step = 0.05
duration = 4
blur = 0
# calculate internal size of world
x = int(supersample*width)
y = (6 * x / 5)
# automatically scale font with supersample
world = World(x, y, Viewport.WHITE, ("/usr/share/fonts/truetype/freefont/FreeSansBold.ttf", supersample * font_size))
# create and add viewports
viewport_different = Viewport(-4, 1.9, 8, -0.5, (0,200,0))
viewport = Viewport(-3, 3, 3, -3, Viewport.BEIGE)
world.add_viewport(viewport, 0, y/6, x, y)
world.add_viewport(viewport_different, 0, 0, x, y/6)
# create and add machines
seconds_pendulum = Pendulum(Point(0,1), Vector.from_polar((2/(2*math.pi)) * (2/(2*math.pi)) * scipy.constants.g, math.radians(320)))
world.add_machine(seconds_pendulum)
twoseconds_pendulum = Pendulum(Point(0,2), Vector.from_polar((4/(2*math.pi)) * (4/(2*math.pi)) * scipy.constants.g, math.radians(300)))
world.add_machine(twoseconds_pendulum)
timer = Timer(Point(2,2))
world.add_machine(timer)
viewport.add_axis(0.2, 1)
# add object visualizations to viewports
viewport.add_visualization(seconds_pendulum.visualization_basic)
viewport.add_visualization(twoseconds_pendulum.visualization_basic)
viewport.add_visualization(timer.visualization_basic)
viewport_different.add_visualization(seconds_pendulum.visualization_different)
viewport_different.add_visualization(twoseconds_pendulum.visualization_different)
# generate frames
timer_start = time.time()
duration = step * math.ceil(duration/step)
frames = world.get_frames(0, duration, step, blur, 1.0/supersample)
timer_end = time.time()
print "generated %i frames in %i seconds. %f fps" % (len(frames) * (blur+1) - blur, timer_end - timer_start, (len(frames)*(blur+1))/duration)
serve_gif(frames, duration)
|
import os
import time
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow.keras.preprocessing import image
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.keras.applications.resnet_v2 import (
preprocess_input as resnet_v2_preprocess_input,
decode_predictions as resnet_v2_decode_predictions
)
def load_image(i, target_size=(130, 130)):
image_path = './test/'+ os.listdir('./test/')[i]
img = image.load_img(image_path, target_size=target_size)
return (img, image_path)
def get_images(number_of_images, get_one_image=load_image):
images = []
for i in range(number_of_images):
images.append(get_one_image(i))
return images
def batch_input(images):
batch_size = len(images)
batched_input = np.zeros((batch_size, 130, 130, 3), dtype=np.float32)
for i in range(batch_size):
img = images[i][0]
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = resnet_v2_preprocess_input(x)
batched_input[i, :] = x
batched_input = tf.constant(batched_input)
return batched_input
def predict_and_benchmark_throughput(batched_input, model, N_warmup_run=50, N_run=500):
elapsed_time = []
all_preds = []
batch_size = batched_input.shape[0]
for i in range(N_warmup_run):
preds = model.predict(batched_input)
for i in range(N_run):
start_time = time.time()
preds = model.predict(batched_input)
end_time = time.time()
elapsed_time = np.append(elapsed_time, end_time - start_time)
all_preds.append(preds)
if i % 50 == 0:
print('Steps {}-{} average: {:4.1f}ms'.format(i, i+50, (elapsed_time[-50:].mean()) * 1000))
print('Throughput: {:.0f} images/s'.format(N_run * batch_size / elapsed_time.sum()))
return all_preds
def predict_and_benchmark_throughput_from_saved(batched_input, infer, N_warmup_run=50, N_run=500, model='custom'):
elapsed_time = []
all_preds = []
batch_size = batched_input.shape[0]
for i in range(N_warmup_run):
labeling = infer(batched_input)
if model == "quantized":
preds = labeling
if model == "custom":
preds = labeling['dense_1'].numpy()
for i in range(N_run):
start_time = time.time()
labeling = infer(batched_input)
end_time = time.time()
elapsed_time = np.append(elapsed_time, end_time - start_time)
if model == "quantized":
preds = labeling
if model == "custom":
preds = labeling['dense_1'].numpy()
all_preds.append(preds)
if i % 50 == 0:
print('Steps {}-{} average: {:4.1f}ms'.format(i, i+50, (elapsed_time[-50:].mean()) * 1000))
print('Throughput: {:.0f} images/s'.format(N_run * batch_size / elapsed_time.sum()))
return all_preds
def display_prediction_info(preds, images):
class_names = ['Parasitized', 'Uninfected']
for i in range(len(preds)):
img_decoded_predictions = preds[i]
predictions = img_decoded_predictions.flatten()
img, path = images[i]
print(path)
for i, predicted in enumerate(predictions):
if predicted > 0.25:
result = class_names[1]
else:
result = class_names[0]
plt.figure()
plt.axis('off')
plt.title(result)
plt.imshow(img)
plt.show()
def load_tf_saved_model(input_saved_model_dir):
print('Loading saved model {}...'.format(input_saved_model_dir))
saved_model_loaded = tf.saved_model.load(input_saved_model_dir, tags=[tag_constants.SERVING])
return saved_model_loaded
def convert_to_trt_graph_and_save(precision_mode='float32', input_saved_model_dir='malaria_model', calibration_data=''):
if precision_mode == 'float32':
precision_mode = trt.TrtPrecisionMode.FP32
converted_save_suffix = '_TFTRT_FP32'
if precision_mode == 'float16':
precision_mode = trt.TrtPrecisionMode.FP16
converted_save_suffix = '_TFTRT_FP16'
if precision_mode == 'int8':
precision_mode = trt.TrtPrecisionMode.INT8
converted_save_suffix = '_TFTRT_INT8'
output_saved_model_dir = input_saved_model_dir + converted_save_suffix
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode=precision_mode,
max_workspace_size_bytes=8000000000
)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=input_saved_model_dir,
conversion_params=conversion_params
)
print('Converting {} to TF-TRT graph precision mode {}...'.format(input_saved_model_dir, precision_mode))
if precision_mode == trt.TrtPrecisionMode.INT8:
def calibration_input_fn():
yield (calibration_data, )
converter.convert(calibration_input_fn=calibration_input_fn)
else:
converter.convert()
print('Saving converted model to {}...'.format(output_saved_model_dir))
converter.save(output_saved_model_dir=output_saved_model_dir)
print('Complete') |
# Copyright (c) OpenMMLab. All rights reserved.
custom_imports = dict(imports=['r'], allow_failed_imports=False)
|
from .models import UserAuth, ContactUsData
from .jwt import TokenAuth
Contact_Us = ContactUsData()
Token_Auth = TokenAuth()
User_Auth = UserAuth()
|
import torch
import unittest
from models import FactorVAE
from torchsummary import summary
class TestFAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = FactorVAE(3, 10)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
#
# print(sum(p.numel() for p in self.model.parameters() if p.requires_grad))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
x2 = torch.randn(16,3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005, optimizer_idx=0, secondary_input=x2)
loss = self.model.loss_function(*result, M_N = 0.005, optimizer_idx=1, secondary_input=x2)
print(loss)
def test_optim(self):
optim1 = torch.optim.Adam(self.model.parameters(), lr = 0.001)
optim2 = torch.optim.Adam(self.model.discrminator.parameters(), lr = 0.001)
def test_sample(self):
self.model.cuda()
y = self.model.sample(144, 0)
if __name__ == '__main__':
unittest.main() |
import math
from random import randint
special_mode=False
class VectorX(tuple):
def __new__(cls,*n):
if special_mode:
return tuple.__new__(VectorX, n[0] if isinstance(n[0],tuple) else n)
else:
return tuple.__new__(VectorX,n)
def __add__(self, other):
return VectorX(*(x + y for x, y in zip(self, other)))
def __sub__(self, other):
return VectorX(*(x - y for x, y in zip(self, other)))
def __mul__(self, other):
if isinstance(other, VectorX):
return VectorX(*(x * y for x, y in zip(self, other)))
else:
return VectorX(*(x * other for x in self))
def __truediv__(self, other):
return self*(1/other)
def __floordiv__(self, other):
return VectorX(*(x // other for x in self))
def __repr__(self):
return "VX"+tuple.__repr__(self)
def __neg__(self):
return self*-1
def __bool__(self):
return any(self)
def __abs__(self):
return VectorX(*(abs(x) for x in self))
def __mod__(self, other):
return VectorX(*(x % other for x in self))
def unit(self):
try:
return self/self.rlen
except ZeroDivisionError:
return VectorX(*[0] * len(self))
def iter_space(self,layer=0):
for x in range(self[layer]):
if layer<len(self)-1:
for v in self.iter_space(layer+1):
yield VectorX(x, *v)
else:
yield VectorX(x)
def iter_space_2d(self,start):
if start is None:
start=zero
for y in range(start.y,start.y+self.y):
for x in range(start.x,start.x+self.x):
yield VectorX(x,y)
def iter_space_2d_tuple(self,start=None):
if start is None:
start=zero
for y in range(start.y,start.y+self.y):
for x in range(start.x,start.x+self.x):
yield x,y
def within(self, other):
return all(0<=self[n]<other[n] for n in range(len(self)))
def angle_to(self,other):
return math.atan2(self.y-other.y,other.x-self.x)
def len_to(self,other):
return sum((self[n]-other[n])**2 for n in range(len(self)))**0.5
def alt_len(self,other):
return max(abs(self[n]-other[n]) for n in range(len(self)))
@property
def rlen(self):
return sum(x**2 for x in self)**0.5
@property
def smil(self):
return max(abs(x) for x in self)
@property
def int(self):
return VectorX(*(int(x) for x in self))
@property
def x(self):
return self[0]
@property
def y(self):
return self[1]
@property
def z(self):
return self[2]
@property
def hoz(self):
return VectorX(self[0], 0)
@property
def vert(self):
return VectorX(0, self[1])
class Coordinate(object):
def __init__(self,area,pos):
self.area=area
self.pos=pos
def __add__(self, other):
return Coordinate(self.area,self.pos+other)
def __sub__(self, other):
return Coordinate(self.area,self.pos-other)
def copy(self):
return Coordinate(self.area,self.pos)
def match(self,other):
self.area=other.area
self.pos=other.pos
def get(self,layer):
return self.area.get(layer,self.pos)
def __iter__(self):
yield self.area
yield self.pos
up=VectorX(0,-1)
left=VectorX(-1,0)
right=VectorX(1,0)
down=VectorX(0,1)
zero=VectorX(0,0)
vdirs = up, right, down, left
ddirs=VectorX(-1,-1),VectorX(1,-1),VectorX(-1,1),VectorX(1,1)
def iter_offsets(root, offs=vdirs):
for d in offs:
yield root+d
def get_uts(pos,dic,eq):
uts=[]
for dd in ddirs:
u=0
if eq(dic[pos+dd.hoz]):
u+=1
if eq(dic[pos+dd.vert]):
u+=2
if u==3 and eq(dic[pos+dd]):
u=4
uts.append(u)
return tuple(uts) |
from pylons import config
import ckan.plugins as p
from ckan.common import json
from geomet import wkt
import re
import logging
import dateutil
from datetime import date
log = logging.getLogger(__name__)
def extras_to_dict(pkg):
extras_dict = {}
if pkg and 'extras' in pkg:
for extra in pkg['extras']:
extras_dict[extra['key']] = extra['value']
return extras_dict
def geojson_to_wkt(value):
return wkt.dumps(json.loads(value))
def latest_news(truncate=2):
return p.toolkit.get_action('ckanext_pages_list')(None, {'private': False})[::-1][:truncate]
def date_to_iso(value, temp_res=None):
result = ''
result = dateutil.parser.parse(value).isoformat().split('T')[0]
if temp_res is not None:
if temp_res == u'month':
result = result.split('-')[0] + '-' + result.split('-')[1]
elif temp_res == u'year' or temp_res == u'decade' or temp_res == u'century':
result = result.split('-')[0]
return result
def get_default_slider_values():
data_dict = {
'sort': 'start_time asc',
'rows': 1,
'q': 'start_time:[* TO *]',
}
result = p.toolkit.get_action('package_search')({}, data_dict)['results']
if len(result) == 1:
start_time = result[0].get('start_time')
begin = dateutil.parser.parse(start_time).isoformat().split('T')[0]
else:
begin = date.today().isoformat()
data_dict = {
'sort': 'end_time desc',
'rows': 1,
'q': 'end_time:[* TO *]',
}
result = p.toolkit.get_action('package_search')({}, data_dict)['results']
if len(result) == 1:
end_time = result[0].get('end_time')
end = dateutil.parser.parse(end_time).isoformat().split('T')[0]
else:
end = date.today().isoformat()
return begin, end
def get_date_url_param():
params = ['', '']
for k, v in p.toolkit.request.params.items():
if k == 'ext_begin_date':
params[0] = v
elif k == 'ext_end_date':
params[1] = v
else:
continue
return params
def get_field_choices(dataset_type):
from ckanext.scheming import helpers as scheming_helpers
schema = scheming_helpers.scheming_get_dataset_schema(dataset_type)
fields = dict()
for field in schema['dataset_fields']:
if field.get('choices'):
choices_new = dict()
for choice in field.get('choices'):
choices_new[choice['value']] = choice['label']['zh_TW'] if isinstance(choice['label'], dict) else choice['label']
fields[field['field_name']] = choices_new
return fields
def get_time_period():
time_period_dict = get_field_choices('dataset')['time_period']
time_period_list = []
for value, label in time_period_dict.iteritems():
splitted = re.split(r'[-()]', label)
time_period_list.append((label, splitted[-3], splitted[-2]))
time_period_list.sort(key=lambda tup:tup[1])
return time_period_list
def string_to_list(value):
if value == [u''] or value == None:
return []
if isinstance(value, list):
return value
return [value]
def get_gmap_config():
'''
Returns a dict with all configuration options related to the
Google Maps API (ie those starting with 'ckanext.taijiang.gmap')
'''
namespace = 'ckanext.taijiang.gmap.'
gmap_configs = dict([(k.replace(namespace, ''), v) for k, v in config.iteritems()
if k.startswith(namespace)])
if not gmap_configs.get('api_key'):
log.critical('''Please specify a ckanext.taijiang.gmap.api_key
in your config for the Google Maps layer''')
return dict([(k.replace(namespace, ''), v) for k, v in config.iteritems()
if k.startswith(namespace)])
|
"""
Test beamshapes predictions given model data.
Author: Thejasvi Beleyur, Acoustic and Functional Ecology,
Max Planck Institute for Ornithology, Seewiesen
License : This code is released under an MIT License.
Copyright 2020, Thejasvi Beleyur
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
import pandas as pd
def calculate_model_and_data_error(real_data, predictions, **kwargs):
""" This function compares and calculates the error between model prediction
and data.
Parameters
----------
real_data : pd.DataFrame with 2 columns
theta: float. Angle in radians.
obs_relative_pressure: float>0. Observed relative pressure in comparison to on-axis.
predictions : pd.DataFrame with 2 columns.
theta: float. Angle in radians.
pred_relative_pressure: float>0. Predicted relative pressure in comparison to on-axis.
Keyword Arguments
-----------------
error_function : function that calculates the mis/match between data and predictions.
Defaults to the sum of absolute error observed.
Returns
--------
prediction_error : output format depends on the exact error function used.
"""
prediction_error = kwargs.get('error_function', sum_absolute_error)(real_data, predictions)
return(prediction_error)
def sum_absolute_error(real_data, predictions):
"""
Calculates the absolute difference between the predictions and the observed data
and outputs the sum.
sum_absolute_error = Sum(real_data - prediction)
"""
if not check_if_angles_are_same(real_data, predictions):
raise ValueError('The theta values are not the same between data and predictions - please check.')
# subtract and calculate sum absolute error
error = predictions['pred_relative_pressure'] - real_data['obs_relative_pressure']
sum_absolute_error = np.sum(np.abs(error))
return sum_absolute_error
def dbeam_by_dtheta_error(real_data, predictions):
'''
Calculates the first order derivative of the beamshape with reference to
the angle of emission.
If the overall error in dbeam/dtheta is low it means that the real data and
predictions match well in their shape, but not so much in their exact values.
Parameters
----------
real_data
predictions
Returns
-------
error_dbeam_by_dtheta
'''
def check_if_angles_are_same(real_data, predictions):
''' Check to make sure that the real data and
predictions are of the same emission angles
Parameters
---------
real_data
predictions
Returns
-------
angles_same: Boolean.
True if the 'theta' is the same, False otherwise.
'''
angles_same = real_data['theta'].equals(predictions['theta'])
return(angles_same)
|
# Import base tools
import os
## Note, for mac osx compatability import something from shapely.geometry before importing fiona or geopandas
## https://github.com/Toblerity/Shapely/issues/553 * Import shapely before rasterio or fioana
from shapely import geometry
import rasterio
import random
from cw_tiler import main
from cw_tiler import utils
from cw_tiler import vector_utils
from cw_nets.Ternaus_tools import tn_tools
import numpy as np
import os
import random
import torch
import json
import logging
import time
import io
from tqdm import tqdm
# Setting Certificate Location for Ubuntu/Mac OS locations (Rasterio looks for certs in centos locations)
os.environ['CURL_CA_BUNDLE']='/etc/ssl/certs/ca-certificates.crt'
logger = logging.getLogger(__name__)
def get_processing_details(rasterPath, smallExample=False,
dstkwargs={"nodata": 0,
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "LZW"}):
with rasterio.open(rasterPath) as src:
# Get Lat, Lon bounds of the Raster (src)
wgs_bounds = utils.get_wgs84_bounds(src)
# Use Lat, Lon location of Image to get UTM Zone/ UTM projection
utm_crs = utils.calculate_UTM_crs(wgs_bounds)
# Calculate Raster bounds in UTM coordinates
utm_bounds = utils.get_utm_bounds(src, utm_crs)
vrt_profile = utils.get_utm_vrt_profile(src,
crs=utm_crs,
)
dst_profile = vrt_profile
dst_profile.update({'count': 1,
'dtype': rasterio.uint8,
'driver': "GTiff",
})
# update for CogStandard
dst_profile.update(dstkwargs)
# open s3 Location
rasterBounds = geometry.box(*utm_bounds)
if smallExample:
rasterBounds = geometry.box(*rasterBounds.centroid.buffer(1000).bounds)
return rasterBounds, dst_profile
def generate_cells_list_dict(rasterBounds, cell_size_meters, stride_size_meters, tile_size_pixels, quad_space=True):
cells_list_dict = main.calculate_analysis_grid(rasterBounds.bounds,
stride_size_meters=stride_size_meters,
cell_size_meters=cell_size_meters,
quad_space=True)
return cells_list_dict
def createRasterMask(rasterPath,
cells_list_dict,
dataLocation,
outputName,
dst_profile,
modelPath,
tile_size_pixels,
logger=None):
logger = logger or logging.getLogger(__name__)
mask_dict_list = []
model = tn_tools.get_model(modelPath)
outputTifMask = os.path.join(dataLocation, outputName.replace('.tif', '_mask.tif'))
outputTifCountour = os.path.join(dataLocation, outputName.replace('.tif', '_contour.tif'))
outputTifCount = os.path.join(dataLocation, outputName.replace('.tif', '_count.tif'))
# define Image_transform for Tile
img_transform = tn_tools.get_img_transform()
# Open Raster File
with rasterio.open(rasterPath) as src:
for cells_list_id, cells_list in cells_list_dict.items():
outputTifMask = os.path.join(dataLocation, outputName.replace('.tif', '{}_mask.tif'.format(cells_list_id)))
outputTifCountour = os.path.join(dataLocation, outputName.replace('.tif', '{}_contour.tif'.format(cells_list_id)))
outputTifCount = os.path.join(dataLocation, outputName.replace('.tif', '{}_count.tif'.format(cells_list_id)))
# Open Results TIF
with rasterio.open(outputTifMask,
'w',
**dst_profile) as dst, \
rasterio.open(outputTifCountour,
'w',
**dst_profile) as dst_countour, \
rasterio.open(outputTifCount,
'w',
**dst_profile) as dst_count:
src_profile = src.profile
print("start interating through {} cells".format(len(cells_list_dict[0])))
for cell_selection in tqdm(cells_list):
# Break up cell into four gorners
ll_x, ll_y, ur_x, ur_y = cell_selection
# Get Tile from bounding box
tile, mask, window, window_transform = main.tile_utm(src, ll_x, ll_y, ur_x, ur_y, indexes=None, tilesize=tile_size_pixels, nodata=None, alpha=None,
dst_crs=dst_profile['crs'])
img = tn_tools.reform_tile(tile)
img, pads = tn_tools.pad(img)
input_img = torch.unsqueeze(img_transform(img / 255).cuda(), dim=0)
predictDict = tn_tools.predict(model, input_img, pads)
# Returns predictDict = {'mask': mask, # Polygon Results for detection of buildings
# 'contour': contour, # Contour results for detecting edge of buildings
# 'seed': seed, # Mix of Contour and Mask for used by watershed function
# 'labels': labels # Result of watershed function
#}
try:
dst.write(tn_tools.unpad(predictDict['mask'], pads).astype(np.uint8), window=window, indexes=1)
dst_countour.write(tn_tools.unpad(predictDict['seed'], pads).astype(np.uint8), window=window, indexes=1)
dst_count.write(np.ones(predictDict['labels'].shape).astype(np.uint8), window=window, indexes=1)
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
logger.error("Failed To write tile:")
logger.error("Failed window: {}".format(window))
logger.error("Failed cell_section: {}".format(cell_selection))
resultDict = {'mask': outputTifMask,
'contour': outputTifCountour,
'count': outputTifCount}
mask_dict_list.append(resultDict)
return mask_dict_list
def process_results_mask(mask_dict_list, outputNameTiff, delete_tmp=True):
firstCell = True
src_mask_list = []
src_countour_list = []
src_count_list = []
for resultDict in tqdm(mask_dict_list):
src_mask_list.append(rasterio.open(resultDict['mask']))
src_countour_list.append(rasterio.open(resultDict['contour']))
src_count_list.append(rasterio.open(resultDict['count']))
src_mask_profile = src_mask_list[0].profile
with rasterio.open(outputNameTiff,
'w',
**src_mask_profile) as dst:
windows = [window for ij, window in dst.block_windows()]
for window in tqdm(windows):
firstCell = True
for src_mask, src_contour, src_count in zip(src_mask_list, src_countour_list, src_count_list):
if firstCell:
data_mask = src_mask.read(window=window)
data_count = src_count.read(window=window)
firstCell = False
else:
data_mask += src_mask.read(window=window)
data_count += src_count.read(window=window)
data_mask=(data_mask/data_count).astype(np.uint8)
data_mask=(data_mask>=1.0).astype(np.uint8)
dst.write(data_mask, window=window)
resultDict = {'mask': outputNameTiff}
return resultDict
def polygonize_results_mask(maskDict):
results = []
#mask= data_mask==0
with rasterio.open(maskDict['mask']) as src:
src_profile = src.profile
image = src.read(1)
mask=image>0
for i, (geom, val) in tqdm(enumerate(rasterio.features.shapes(image, mask=mask, transform=src.transform))):
geom = rasterio.warp.transform_geom(src.crs, 'EPSG:4326', geom, precision=6)
results.append({
"type": "Feature",
'properties': {'raster_val': val},
'geometry': geom
}
)
return results, src_profile
def write_results_tojson(results, dst_name):
collection = {
'type': 'FeatureCollection',
'features': list(results) }
with open(dst_name, 'w') as dst:
json.dump(collection, dst)
|
#! -*- Encoding: Latin-1 -*-
import threading
CURRENT_VERSION = 1.0
Database = None
|
# Generated by Django 3.0.2 on 2020-02-29 15:07
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20200201_1227'),
]
operations = [
migrations.AlterField(
model_name='role',
name='privileges',
field=multiselectfield.db.fields.MultiSelectField(choices=[('BackOffice.manageUser', 'Manage Backoffice Users'), ('BackOffice.viewUsers', 'View Backoffice Userss'), ('BackOffice.manageClient', 'Manage Backoffice Clients'), ('BackOffice.viewClients', 'View Backoffice Clients'), ('Clients.manageClient', 'Manage Clients'), ('Clients.manageUsers', 'Manage Clients'), ('Payments.initiatePayment', 'Initiate Payments'), ('Payments.approvePayments', 'Approve Payments'), ('Payments.reports', 'View Reports'), ('Payments.statement', 'View Statements'), ('Payments.viewPayments', 'View Payments')], max_length=238, null=True),
),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import autonetkit.log as log
import autonetkit.ank as ank_utils
import itertools
from autonetkit.ank_utils import call_log
@call_log
def mpls_te(anm):
g_in = anm['input']
g_phy = anm['phy']
g_l3_conn = anm['l3_conn']
# add regardless, so allows quick check of node in anm['mpls_te'] in compilers
g_mpls_te = anm.add_overlay('mpls_te')
if not any(True for n in g_in.routers() if n.mpls_te_enabled):
log.debug('No nodes with mpls_te_enabled set')
return
# te head end set if here
g_mpls_te.add_nodes_from(g_in.routers())
# build up edge list sequentially, to provide meaningful messages for multipoint links
multipoint_edges = [e for e in g_l3_conn.edges() if e.multipoint]
if len(multipoint_edges):
log.info('Excluding multi-point edges from MPLS TE topology: %s'
% ', '.join(str(e) for e in multipoint_edges))
edges_to_add = set(g_l3_conn.edges()) - set(multipoint_edges)
g_mpls_te.add_edges_from(edges_to_add)
@call_log
def mpls_oam(anm):
g_in = anm['input']
# create placeholder graph (may have been created in other steps)
if anm.has_overlay('mpls_oam'):
g_mpls_oam = anm['mpls_oam']
else:
g_mpls_oam = anm.add_overlay('mpls_oam')
use_mpls_oam = g_in.data.use_mpls_oam
if use_mpls_oam:
g_mpls_oam.add_nodes_from(g_in.routers())
@call_log
def vrf_pre_process(anm):
"""Marks nodes in g_in as appropriate based on vrf roles.
CE nodes -> ibgp_role = Disabled, so not in iBGP (this is allocated later)
"""
log.debug("Applying VRF pre-processing")
g_vrf = anm['vrf']
for node in g_vrf.nodes(vrf_role = "CE"):
log.debug("Marking CE node %s as non-ibgp" % node)
node['input'].ibgp_role = "Disabled"
@call_log
def allocate_vrf_roles(g_vrf):
"""Allocate VRF roles"""
g_phy = g_vrf.anm['phy']
#TODO: might be clearer like ibgp with is_p is_pe etc booleans? - final step to translate to role for vis
for node in g_vrf.nodes(vrf_role="CE"):
if not node.vrf:
node.vrf = "default_vrf"
ce_set_nodes = []
for node in sorted(g_vrf.nodes('vrf')):
node.vrf_role = "CE"
ce_set_nodes.append(node)
if len(ce_set_nodes):
message = ", ".join(str(n) for n in sorted(ce_set_nodes))
g_vrf.log.info("VRF role set to CE for %s" % message)
non_ce_nodes = [node for node in g_vrf if node.vrf_role != "CE"]
pe_set_nodes = []
p_set_nodes = []
for node in sorted(non_ce_nodes):
phy_neighbors = [n for n in g_phy.node(node).neighbors() if n.is_router()]
# neighbors from physical graph for connectivity
#TODO: does this do anything?
phy_neighbors = [neigh for neigh in phy_neighbors]
# filter to just this asn
if any(g_vrf.node(neigh).vrf_role == "CE" for neigh in phy_neighbors):
# phy neigh has vrf set in this graph
node.vrf_role = "PE"
pe_set_nodes.append(node)
else:
node.vrf_role = "P" # default role
p_set_nodes.append(node)
if len(pe_set_nodes):
message = ", ".join(str(n) for n in sorted(pe_set_nodes))
g_vrf.log.info("VRF role set to PE for %s" % message)
if len(p_set_nodes):
message = ", ".join(str(n) for n in sorted(p_set_nodes))
g_vrf.log.info("VRF role set to P for %s" % message)
@call_log
def add_vrf_loopbacks(g_vrf):
"""Adds loopbacks for VRFs, and stores VRFs connected to PE router"""
#autonetkit.update_http(anm)
for node in g_vrf.nodes(vrf_role="PE"):
node_vrf_names = {n.vrf for n in node.neighbors(vrf_role="CE")}
node.node_vrf_names = node_vrf_names
node.rd_indices = {}
for index, vrf_name in enumerate(node_vrf_names, 1):
node.rd_indices[vrf_name] = index
node.add_loopback(vrf_name=vrf_name,
description="loopback for vrf %s" % vrf_name)
@call_log
def build_ibgp_vpn_v4(anm):
"""Based on the ibgp_v4 hierarchy rules.
Exceptions:
1. Remove links to (PE, RRC) nodes
CE nodes are excluded from RR hierarchy ibgp creation through pre-process step
"""
#TODO: remove the bgp layer and have just ibgp and ebgp
# TODO: build from design rules, currently just builds from ibgp links in bgp layer
g_bgp = anm['bgp']
g_ibgp_v4 = anm['ibgp_v4']
g_vrf = anm['vrf']
g_ibgp_vpn_v4 = anm.add_overlay("ibgp_vpn_v4", directed=True)
v6_vrf_nodes = [n for n in g_vrf
if n.vrf is not None and n['phy'].use_ipv6 is True]
if len(v6_vrf_nodes):
message = ", ".join(str(s) for s in v6_vrf_nodes)
log.warning("This version of AutoNetkit does not support IPv6 MPLS VPNs. "
"The following nodes have IPv6 enabled but will not have an associated IPv6 MPLS VPN topolog created: %s" % message)
ibgp_v4_nodes = list(g_ibgp_v4.nodes())
pe_nodes = set(g_vrf.nodes(vrf_role = "PE"))
pe_rrc_nodes = {n for n in ibgp_v4_nodes if
n in pe_nodes and n.ibgp_role == "RRC"}
#TODO: warn if pe_rrc_nodes?
ce_nodes = set(g_vrf.nodes(vrf_role = "CE"))
if len(pe_nodes) == len(ce_nodes) == len(pe_rrc_nodes) == 0:
# no vrf nodes to connect
return
#TODO: extend this to only connect nodes which are connected in VRFs, so don't set to others
ibgp_vpn_v4_nodes = (n for n in ibgp_v4_nodes
if n not in ce_nodes)
g_ibgp_vpn_v4.add_nodes_from(ibgp_vpn_v4_nodes, retain = ["ibgp_role"])
g_ibgp_vpn_v4.add_edges_from(g_ibgp_v4.edges(), retain = "direction")
for node in g_ibgp_vpn_v4:
if node.ibgp_role in ("HRR", "RR"):
node.retain_route_target = True
ce_edges = [e for e in g_ibgp_vpn_v4.edges()
if e.src in ce_nodes or e.dst in ce_nodes]
# mark ibgp direction
ce_pe_edges = []
pe_ce_edges = []
for edge in g_ibgp_vpn_v4.edges():
if (edge.src.vrf_role, edge.dst.vrf_role) == ("CE", "PE"):
edge.direction = "up"
edge.vrf = edge.src.vrf
ce_pe_edges.append(edge)
elif (edge.src.vrf_role, edge.dst.vrf_role) == ("PE", "CE"):
edge.direction = "down"
edge.vrf = edge.dst.vrf
pe_ce_edges.append(edge)
#TODO: Document this
g_ibgpv4 = anm['ibgp_v4']
g_ibgpv6 = anm['ibgp_v6']
g_ibgpv4.remove_edges_from(ce_edges)
g_ibgpv6.remove_edges_from(ce_edges)
g_ibgpv4.add_edges_from(ce_pe_edges, retain = ["direction", "vrf"])
g_ibgpv4.add_edges_from(pe_ce_edges, retain = ["direction", "vrf"])
g_ibgpv6.add_edges_from(ce_pe_edges, retain = ["direction", "vrf"])
g_ibgpv6.add_edges_from(pe_ce_edges, retain = ["direction", "vrf"])
for edge in pe_ce_edges:
# mark as exclude so don't include in standard ibgp config stanzas
if g_ibgpv4.has_edge(edge):
edge['ibgp_v4'].exclude = True
if g_ibgpv6.has_edge(edge):
edge['ibgp_v6'].exclude = True
# legacy
g_bgp = anm['bgp']
g_bgp.remove_edges_from(ce_edges)
g_bgp.add_edges_from(ce_pe_edges, retain = ["direction", "vrf", "type"])
g_bgp.add_edges_from(pe_ce_edges, retain = ["direction", "vrf", "type"])
# also need to modify the ibgp_v4 and ibgp_v6 graphs
@call_log
def build_mpls_ldp(anm):
"""Builds MPLS LDP"""
g_in = anm['input']
g_vrf = anm['vrf']
g_l3conn = anm['l3_conn']
g_mpls_ldp = anm.add_overlay("mpls_ldp")
nodes_to_add = [n for n in g_in.routers()
if n['vrf'].vrf_role in ("PE", "P")]
g_mpls_ldp.add_nodes_from(nodes_to_add, retain=["vrf_role", "vrf"])
# store as set for faster lookup
pe_nodes = set(g_vrf.nodes(vrf_role = "PE"))
p_nodes = set(g_vrf.nodes(vrf_role = "P"))
pe_to_pe_edges = (e for e in g_l3conn.edges()
if e.src in pe_nodes and e.dst in pe_nodes)
g_mpls_ldp.add_edges_from(pe_to_pe_edges)
pe_to_p_edges = (e for e in g_l3conn.edges()
if e.src in pe_nodes and e.dst in p_nodes
or e.src in p_nodes and e.dst in pe_nodes)
g_mpls_ldp.add_edges_from(pe_to_p_edges)
p_to_p_edges = (e for e in g_l3conn.edges()
if e.src in p_nodes and e.dst in p_nodes)
g_mpls_ldp.add_edges_from(p_to_p_edges)
@call_log
def mark_ebgp_vrf(anm):
g_ebgp = anm['ebgp']
g_vrf = anm['vrf']
g_ebgpv4 = anm['ebgp_v4']
g_ebgpv6 = anm['ebgp_v6']
pe_nodes = set(g_vrf.nodes(vrf_role = "PE"))
ce_nodes = set(g_vrf.nodes(vrf_role = "CE"))
for edge in g_ebgpv4.edges():
if (edge.src in pe_nodes and edge.dst in ce_nodes):
# exclude from "regular" ebgp (as put into vrf stanza)
edge.exclude = True
edge.vrf = edge.dst['vrf'].vrf
for edge in g_ebgpv6.edges():
if (edge.src in pe_nodes and edge.dst in ce_nodes):
# exclude from "regular" ebgp (as put into vrf stanza)
edge.exclude = True
edge.vrf = edge.dst['vrf'].vrf
@call_log
def build_vrf(anm):
"""Build VRF Overlay"""
g_in = anm['input']
g_l3conn = anm['l3_conn']
g_vrf = anm.add_overlay("vrf")
import autonetkit
autonetkit.ank.set_node_default(g_in, vrf=None)
if not any(True for n in g_in.routers() if n.vrf):
log.debug("No VRFs set")
return
g_vrf.add_nodes_from(g_in.routers(), retain=["vrf_role", "vrf"])
allocate_vrf_roles(g_vrf)
vrf_pre_process(anm)
def is_pe_ce_edge(edge):
src_vrf_role = g_vrf.node(edge.src).vrf_role
dst_vrf_role = g_vrf.node(edge.dst).vrf_role
return (src_vrf_role, dst_vrf_role) in (("PE", "CE"), ("CE", "PE"))
vrf_add_edges = (e for e in g_l3conn.edges()
if is_pe_ce_edge(e))
#TODO: should mark as being towards PE or CE
g_vrf.add_edges_from(vrf_add_edges)
def is_pe_p_edge(edge):
src_vrf_role = g_vrf.node(edge.src).vrf_role
dst_vrf_role = g_vrf.node(edge.dst).vrf_role
return (src_vrf_role, dst_vrf_role) in (("PE", "P"), ("P", "PE"))
vrf_add_edges = (e for e in g_l3conn.edges()
if is_pe_p_edge(e))
g_vrf.add_edges_from(vrf_add_edges)
build_mpls_ldp(anm)
# add PE to P edges
add_vrf_loopbacks(g_vrf)
# allocate route-targets per AS
# This could later look at connected components for each ASN
route_targets = {}
for asn, devices in ank_utils.groupby("asn", g_vrf.nodes(vrf_role = "PE")):
asn_vrfs = [d.node_vrf_names for d in devices]
# flatten list to unique set
asn_vrfs = set(itertools.chain.from_iterable(asn_vrfs))
route_targets[asn] = {vrf: "%s:%s" % (asn, index)
for index, vrf in enumerate(sorted(asn_vrfs), 1)}
g_vrf.data.route_targets = route_targets
for node in g_vrf:
vrf_loopbacks = node.interfaces("is_loopback", "vrf_name")
for index, interface in enumerate(vrf_loopbacks, start = 101):
interface.index = index
for edge in g_vrf.edges():
# Set the vrf of the edge to be that of the CE device (either src or dst)
edge.vrf = edge.src.vrf if edge.src.vrf_role is "CE" else edge.dst.vrf
# map attributes to interfaces
for edge in g_vrf.edges():
for interface in edge.interfaces():
interface.vrf_name = edge.vrf |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .core import UnitedStates
class Arizona(UnitedStates):
"""Arizona"""
martin_luther_king_label = "Dr. Martin Luther King Jr./Civil Rights Day"
presidents_day_label = "Lincoln/Washington Presidents' Day"
|
"""
Written by Kushal, modified by Robik
"""
import sys
import json
import h5py
import numpy as np
DATA_PATH = '/hdd/robik/CLEVR'
GENSEN_PATH = '/hdd/robik/projects/gensen'
sys.path.append(f'{GENSEN_PATH}')
from gensen import GenSen, GenSenSingle
gensen_1 = GenSenSingle(
model_folder=f'{GENSEN_PATH}/data/models',
filename_prefix='nli_large_bothskip',
cuda=True,
pretrained_emb=f'{GENSEN_PATH}/data/embedding/glove.840B.300d.h5'
)
for split in ['train', 'val']:
feat_h5 = h5py.File(f'{DATA_PATH}/questions_{split}_clevr.h5', 'w')
ques = json.load(open(f'{DATA_PATH}/questions/CLEVR_{split}_questions.json'))
ques = ques['questions']
questions = [q['question'] for q in ques]
qids = [q['question_index'] for q in ques]
qids = np.int64(qids)
dt = h5py.special_dtype(vlen=str)
feat_h5.create_dataset('feats', (len(qids), 2048), dtype=np.float32)
feat_h5.create_dataset('qids', (len(qids),), dtype=np.int64)
feat_h5.create_dataset('questions', (len(qids),), dtype=dt)
feat_h5['qids'][:] = qids
feat_h5['questions'][:] = questions
chunksize = 5000
question_chunks = [questions[x:x + chunksize] for x in range(0, len(questions), chunksize)]
done = 0
for qchunk in question_chunks:
print(done)
_, reps_h_t = gensen_1.get_representation(
qchunk, pool='last', return_numpy=True, tokenize=True
)
feat_h5['feats'][done:done + len(qchunk)] = reps_h_t
done += len(qchunk)
feat_h5.close()
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to handle app lifecycle events."""
import logging
import os
import flask
from multitest_transport.core import config_loader
from multitest_transport.core import cron_kicker
from multitest_transport.core import ndb_upgrader
from multitest_transport.core import service_checker
from multitest_transport.models import sql_models
from multitest_transport.test_scheduler import download_util
from multitest_transport.test_scheduler import test_scheduler
from multitest_transport.util import env
from multitest_transport.util import tfc_client
from tradefed_cluster import common
APP = flask.Flask(__name__)
@APP.route('/init')
def AppStartHandler():
"""App start event handler."""
logging.info(
'Server is starting... (%s, cli_version=%s)',
env.VERSION, env.CLI_VERSION)
logging.info('os.environ=%s', os.environ)
service_checker.Check()
config_loader.Load()
cron_kicker.Init()
# Update datastore and database if necessary
ndb_upgrader.UpgradeNdb()
sql_models.db.CreateTables()
# Release any remaining test resource download trackers.
download_util.ReleaseDownloadLocks()
# Requeue non-final requests, commands and command attempts for monitoring.
tfc_client.BackfillRequestSyncs()
tfc_client.BackfillCommands()
tfc_client.BackfillCommandAttempts()
# Requeue or Cancel the pending test runs.
test_scheduler.CheckPendingTestRuns()
return common.HTTP_OK
|
"""Admin handler library."""
|
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Utilities for reading proof files from the git repo
import os.path
import StringIO
import logging
import babygit.appengine
import babygit.babygit
import babygit.repo
s = babygit.appengine.AEStore()
repo = babygit.repo.Repo(s)
class UrlCtx:
def __init__(self, basefn, instream = None):
logging.debug('basefn = ' + basefn)
self.base = os.path.split(basefn)[0]
if self.base.startswith('/'):
self.base = self.base[1:]
self.instream = instream
def resolve(self, url):
if url.startswith('/'):
fn = url[1:]
elif url == '-':
return self.instream
else:
fn = os.path.join(self.base, url)
logging.debug('opening: ' + fn)
obj = repo.traverse(fn)
if obj is None:
return None
return StringIO.StringIO(babygit.babygit.obj_contents(obj))
# todo: proofs "upto" functionality, based on split_gh_file
|
from collections import defaultdict, namedtuple
from enum import Enum
from functools import total_ordering
from itertools import product
import random
@total_ordering
class Action(Enum):
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
GridState = namedtuple('GridState', ['x', 'y', 'cows_vec'])
class MDP:
"""
(9, 9)
...
(0,0)
"""
width = 10
height = 10
@staticmethod
def _try_move(s, a):
return {
Action.UP: GridState(s.x, s.y + 1, s.cows_vec),
Action.DOWN: GridState(s.x, s.y - 1, s.cows_vec),
Action.LEFT: GridState(s.x - 1, s.y, s.cows_vec),
Action.RIGHT: GridState(s.x + 1, s.y, s.cows_vec)
}[a]
def _move(self, cell, direction):
next_cell = self._try_move(cell, direction)
if next_cell.x not in range(0, self.width) or next_cell.y not in range(0, self.height):
return cell
if (next_cell.x, next_cell.y) in self.cow_positions:
cow_ind = self.cow_positions.index((next_cell.x, next_cell.y))
new_cow_vec = tuple(True if i == cow_ind else e for i, e in enumerate(cell.cows_vec))
next_cell = GridState(next_cell.x, next_cell.y, new_cow_vec)
return next_cell
def _get_actions(self):
actions = {}
for s in self.states:
actions[s] = []
if s.x > 0:
actions[s].append(Action.LEFT)
if s.x < self.width - 1:
actions[s].append(Action.RIGHT)
if s.y > 0:
actions[s].append(Action.DOWN)
if s.y < self.width - 1:
actions[s].append(Action.UP)
return actions
@staticmethod
def reverse_action(action):
return {Action.UP: Action.DOWN,
Action.DOWN: Action.UP,
Action.LEFT: Action.RIGHT,
Action.RIGHT: Action.LEFT
}[action]
def _get_reward(self):
reward_partial = {}
for s in self.states:
for a in self.actions[s]:
next_state = self._transitions_dict[(s, a)]
if next_state == self.END_STATE:
reward_partial[(s, a)] = 100
if (next_state.x, next_state.y) in self.cow_positions:
cow_ind = self.cow_positions.index((next_state.x, next_state.y))
if not s.cows_vec[cow_ind]:
reward_partial[(s, a)] = 50
return defaultdict(int, reward_partial)
def _get_states(self):
states = []
for x in range(self.width):
for y in range(self.height):
ncows = len(self.cow_positions)
if (x, y) not in self.cow_positions:
cow_vectors = tuple(product([False, True], repeat=ncows))
else:
cow_ind = self.cow_positions.index((x, y))
cow_vectors = tuple(prod for prod in product([False, True], repeat=ncows) if prod[cow_ind])
for cw in cow_vectors:
states.append(GridState(x, y, cw))
return states
def reward_transition(self, s, a):
ss = self._transitions_dict[(s, a)]
r = self._reward_dict[(s, a)]
return r, ss
def __init__(self, lion_pos=(0, 0), cow_positions=([(9, 9)])):
self.lion_x, self.lion_y = lion_pos
self.cow_positions = cow_positions
self.START_STATE = GridState(self.lion_x, self.lion_y, tuple(False for _ in cow_positions))
self.END_STATE = GridState(self.lion_x, self.lion_y, tuple(True for _ in cow_positions))
self.states = self._get_states()
self.actions = self._get_actions()
self._transitions_dict = {(s, a): self._move(s, a) for s in self.states for a in self.actions[s]}
self._reward_dict = self._get_reward()
class StochasticMDP(MDP):
@staticmethod
def _opposite(action):
return {Action.UP: Action.DOWN,
Action.DOWN: Action.UP,
Action.LEFT: Action.RIGHT,
Action.RIGHT: Action.LEFT}[action]
def _get_transitions(self, success_prob):
def transitions_func(s, a):
prob = random.uniform(0, 1)
if prob <= success_prob:
actual_action = a
else:
actual_action = self._opposite(a)
return self._move(s, actual_action), actual_action
return transitions_func
def reward_transition(self, s, a):
ss, actual_action = self._transitions_prob(s, a)
r = self._reward_dict[(s, actual_action)]
return r, ss
def __init__(self, lion_pos=(0, 0), cow_positions=([(9, 9)]), success_prob=0.7):
super().__init__(lion_pos, cow_positions)
self._transitions_prob = self._get_transitions(success_prob)
|
from django.urls import path, include
from . import views
app_name = 'wallet'
urlpatterns = [
path('', views.wallet_home, name='wallet_home'),
path('wallet_home', views.wallet_home, name='wallet_home'),
# path('transfer_money', views.transfer_money, name='transfer_money'),
path('transfer', views.transfer, name='transfer'),
path('make_changes', views.make_changes, name='make_changes'),
path('transactions_to_be_accepted', views.transactions_to_be_accepted, name='transactions_to_be_accepted'),
path('transactions_completed', views.transactions_completed, name='transactions_completed'),
path('transactions_pending', views.transactions_pending, name='transactions_pending'),
path('add_money', views.add_money, name='add_money'),
# path('group', views.group, name='group'),
path('add_money_work', views.add_money_work, name='add_money_work'),
path('add_money_after_otp', views.add_money_after_otp, name='add_money_after_otp'),
path('transaction_accept', views.transaction_accept, name='transaction_accept'),
path('transaction_decline', views.transaction_decline, name='transaction_decline'),
] |
"""
JobsLib exceptions.
"""
__all__ = ['JobsLibError', 'TaskError', 'Terminate']
class JobsLibError(Exception):
"""
Base error, ancestor for all other JobsLib errors.
"""
pass
class TaskError(JobsLibError):
"""
Task error.
"""
pass
class Terminate(BaseException):
"""
Indicates that terminate signal has been reached.
"""
pass
|
import random
import numpy as np
from enum import IntEnum
class Directions(IntEnum):
NW = 0
N = 1
NE = 2
W = 3
X = 4
E = 5
SW = 6
S = 7
SE = 8
class ADirections(IntEnum):
E = 0
N = 1
W = 2
S = 3
X = 4
class Agent():
def __init__(self, controller):
self.controller = controller
self.viewsize = controller.viewsize
if self.viewsize == 5:
viewport = 2
elif self.viewsize == 3:
viewport = 1
elif self.viewsize == 7:
viewport = 3
self.preferences = [x + 1 for x in range(3)] #randomly decide preferences #TODO: make number a parameter
self.direction = 0
self.pos = (0,0)
self.viewport = 1 #number of tiles to either side
def action(self, ob):
return self.controller.controllerAction(ob) #preferences here?
def reset_prefs(self):
random.shuffle(self.preferences)
def hardcode_prefs(self):
self.preferences.sort()
class AC():
def __init__(self, viewsize):
self.viewsize = viewsize
self.trainable = False
self.recurrent = False
def controllerAction(self, ob, preferences=None):
'''string = ''
for y in range(ob.shape[1]):
for x in range(ob.shape[0]):
string += str(int(ob[x,y]))
string += '\n'
print(string)''
print(ob.transpose().flatten())'''
assert preferences is not None
ob2 = [preferences[int(x)-1] if x > 0 else 0 for x in ob.transpose().flatten()]
#print(ob2)
bestscore = np.max(ob2)
bestpos = np.argmax(ob2)
ret = 0
if bestscore == 0:
ret = random.randint(0,3)
elif bestpos == Directions.NW:
ret = random.choice([1,2])
elif bestpos == Directions.N:
ret = 1
elif bestpos == Directions.NE:
ret = random.choice([0,1])
elif bestpos == Directions.W:
ret = 2
elif bestpos == Directions.X:
ret = 4
elif bestpos == Directions.E:
ret = 0
elif bestpos == Directions.SW:
ret = random.choice([2,3])
elif bestpos == Directions.S:
ret = 3
else:
ret = random.choice([3,0])
#print(bestpos, ret)
return ret
class AC_Deterministic():
def __init__(self, viewsize):
self.trainable = False
self.viewsize = viewsize
self.recurrent = False
def controllerAction(self, ob, preferences, maxindex=3): #todo: 4 = numagents+numobs
null = np.zeros((self.viewsize,self.viewsize,1))
ob_zeros = np.c_[null, ob[...,:maxindex]]
obmax = np.argmax(np.arange(1,maxindex+2)*ob_zeros[...,:maxindex+1], axis=2)
ob2 = [preferences[int(x)-1] if (x > 0) else -1 for x in obmax.transpose().flatten()]
bestscore = np.max(ob2)
bestpos = np.argmax(ob2)
ret = 0
if bestscore == -1:
ret = ADirections.E
elif bestpos == Directions.NW or bestpos == Directions.N:
ret = ADirections.N
elif bestpos == Directions.NE or bestpos == Directions.E:
ret = ADirections.E
elif bestpos == Directions.W or bestpos == Directions.SW:
ret = ADirections.W
elif bestpos == Directions.X:
ret = ADirections.X
elif bestpos == Directions.S or bestpos == Directions.SE:
ret = ADirections.S
#print(bestpos, ret)
return ret
class AC_Network():
def __init__(self, modelClass, modelParams, view_size, device):
self.model = modelClass(*modelParams, )
self.trainable = True
self.viewsize = view_size
self.recurrent = True
def controllerAction(self, ob):
#return self.model.predict(ob)
return self.model.choose_action(ob.cuda())
def set_weights(self, weights):
self.model.load_state_dict(weights) |
import datetime
from keras import models
import pandas as pd
from sklearn.externals import joblib
#loads .tsv file and pickles it in data folder, returns panda dataframes
def load_tsv(filename:str):
#make sure in data folder
if 'tsv' not in filename:
filename = filename + '.tsv'
if 'data' not in filename:
if 'eruption' in filename:
filename = '../../data/eruption_data/' + filename
if 'Logger' in filename:
filename = '../../data/logger_data/' + filename
try:
df = pd.read_table(filename,sep='\t')
except:
#catch parser error
df = pd.read_table(filename,sep='\t',engine='python')
#save pickle to data folder
save_filename = filename[:-3] + 'pkl'
df.to_pickle(save_filename)
return df
def unix_to_datetime(unix_epoch_time):
return datetime.datetime.fromtimestamp(unix_epoch_time)
def datetime_to_unix(datetime):
return datetime((datetime - datetime(1970, 1, 1))).total_seconds()
def save_keras_model(model,filename):
if 'h5' not in filename:
filename = filename + '.h5'
model.save('../../data/saved_models/' + filename)
def save_sklearn_model(model,filename):
if 'joblib' not in filename:
filename = filename + '.joblib'
if 'data' not in filename:
filename = '../../data/saved_models/' + filename
joblib.dump(model,filename)
def load_model(filename:str):
#assume only filename, no rel. path specified
if 'data' not in filename:
filename = '../../data/saved_models/' + filename
return models.load_model(filename)
def save_np_array(filename:str,arr):
if 'data' not in filename:
filename = '../../data/saved_predictions/' + filename
return np.save(filename,arr) |
from typing import Dict, Tuple, Union
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated
from cart.models import CartItem
from cart.serializers import CartItemSerializer
def _is_num(n: str) -> bool:
try:
int(n)
except ValueError:
return False
else:
return True
def _process_cart_item(
request,
) -> Union[
Tuple[Dict[str, Union[bool, str]], CartItem],
Tuple[Dict[str, Union[bool, str]], None],
]:
data = {'error': True}
cart_item_id = request.POST.get('cart_item_id')
if cart_item_id is not None and _is_num(cart_item_id):
try:
cart_item = CartItem.objects.get(id=cart_item_id)
except CartItem.DoesNotExist:
data['message'] = 'Invalid cart item id provided.'
else:
data['error'] = False
return data, cart_item
else:
data['message'] = 'Cart item id not provided.'
return data, None
def _mutate_cart_item(request, delta: int) -> Response:
data: Dict[str, Union[bool, str]]
cart_item: CartItem
data, cart_item = _process_cart_item(request)
if not data['error']:
cart_item.quantity += delta
data['error'] = False
if cart_item.quantity == 0:
data['message'] = 'Cart item removed.'
cart_item.delete()
else:
if delta >= 0:
data['message'] = 'Cart item quantity incremented.'
else:
data['message'] = 'Cart item quantity decremented.'
cart_item.save()
return Response(
data,
status=status.HTTP_400_BAD_REQUEST if data['error'] else status.HTTP_200_OK,
)
class RemoveCartItemAPIView(APIView):
permission_classes = (IsAuthenticated,)
@staticmethod
def post(request) -> Response:
data, cart_item = _process_cart_item(request)
if not data['error']:
cart_item.delete()
data['error'] = False
data['message'] = 'Deleted cart item.'
return Response(
data,
status=status.HTTP_400_BAD_REQUEST if data['error'] else status.HTTP_200_OK,
)
class DecrementCartItemQuantity(APIView):
permission_classes = (IsAuthenticated,)
@staticmethod
def post(request) -> Response:
return _mutate_cart_item(request, -1)
class IncrementCartItemQuantity(APIView):
permission_classes = (IsAuthenticated,)
@staticmethod
def post(request) -> Response:
return _mutate_cart_item(request, 1)
class AddCartItemAPIView(APIView):
permission_classes = (IsAuthenticated,)
@staticmethod
def post(request) -> Response:
data = {'error': True}
painting_id = request.POST.get('painting_id')
if painting_id is not None and _is_num(painting_id):
request.user.cart.add(int(painting_id))
data['error'] = False
data['message'] = f'Added {painting_id} to cart.'
else:
data['message'] = 'Painting ID not provided.'
return Response(
data,
status=status.HTTP_400_BAD_REQUEST
if data['error']
else status.HTTP_201_CREATED,
)
class CartItemListAPIView(ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = CartItemSerializer
def get_queryset(self):
qs = self.request.user.cart.items.all()
qs.prefetch_related('painting')
return qs
|
from functools import reduce
from abc import ABC, abstractmethod
from dataclasses import dataclass
import numpy as np
import pandas as pd
import os
class Data(ABC):
@abstractmethod
def classification(self):
"""Classification function"""
@abstractmethod
def classification_age_generation(self):
"""Classification according age generation of members in household"""
@abstractmethod
def classification_sex_age(self):
"""Classification according sex household referent person and age of members"""
@abstractmethod
def clean_data(self):
"""Clean data with more than 10% of null values"""
@abstractmethod
def compute_representativity(self):
"""Compute representativity of energy consumption"""
@abstractmethod
def covariance_matrix(self):
"""Compute covariance matrix with respect energy variable"""
@abstractmethod
def drop_nonessential_columns(self):
"""Removes non-essential columns for analysis"""
@abstractmethod
def give_nodes(self):
"""Gives the name of the nodes per type of classification"""
@abstractmethod
def read_data(self):
"""Return ENIGH dataframe"""
@abstractmethod
def read_tables(self):
"""Read tables from ENIGH database"""
@abstractmethod
def proportion_nan(self):
"""Compute proportion of missing values for variables in ENIGH dataset"""
@abstractmethod
def standardization(self):
"""Standarization of dataset using Z-score per node type"""
@dataclass
class ENIGH_Data(Data):
"""Class that contains ENIGH data for year"""
year: int = 2016
clean: bool = True
type_class: str = "Sex_HHRP_Age"
def classification(self, keep_columns=False) -> pd.DataFrame:
"""Classification function"""
if keep_columns:
if self.type_class == "SexHHRP_Age":
return self.classification_sex_age(self.read_data())
elif self.type_class == "Age_Generation":
return self.classification_age_generation(self.read_data())
else:
if self.type_class == "SexHHRP_Age":
dataset = self.classification_sex_age(self.read_data())
dataset.drop(columns=["sex_hhrp","age"], inplace=True)
return dataset
elif self.type_class == "Age_Generation":
return self.classification_age_generation(self.read_data())
def classification_age_generation(self, dataset: pd.DataFrame) -> pd.DataFrame:
"""Classification according generation of members in household"""
if self.year == 2016:
generation = [dataset.edad<=16,
(dataset.edad>16) & (dataset.edad<=26),
(dataset.edad>26) & (dataset.edad<=36),
(dataset.edad>36) & (dataset.edad<=46),
(dataset.edad>46) & (dataset.edad<=56),
(dataset.edad>56) & (dataset.edad>=66)]
choices = self.give_nodes()[:-1]
dataset["node"] = np.select(generation, choices, default="G_older_50s")
elif self.year == 2018:
generation = [dataset.edad<=18,
(dataset.edad>18) & (dataset.edad<=28),
(dataset.edad>28) & (dataset.edad<=38),
(dataset.edad>38) & (dataset.edad<=48),
(dataset.edad>48) & (dataset.edad<=58),
(dataset.edad>58) & (dataset.edad>=68)]
choices = self.give_nodes()[:-1]
dataset["node"] = np.select(generation, choices, default="G_older_50s")
elif self.year == 2020:
generation = [dataset.edad<=20,
(dataset.edad>20) & (dataset.edad<=30),
(dataset.edad>30) & (dataset.edad<=40),
(dataset.edad>40) & (dataset.edad<=50),
(dataset.edad>50) & (dataset.edad<=60),
(dataset.edad>60) & (dataset.edad>=70)]
choices = self.give_nodes()[:-1]
dataset["node"] = np.select(generation, choices, default="G_older_50s")
return dataset
def classification_sex_age(self, dataset: pd.DataFrame) -> pd.DataFrame:
"""Classification according sex of household referent person and age"""
#Sex classification
sexHHRP = [(dataset.sexo_jefe==1),
(dataset.sexo_jefe==2)]
choices = ["H","M"]
dataset["sex_hhrp"] = np.select(sexHHRP, choices, default="empty")
#age classification
hh_members = [
(dataset.p12_64>0) & (dataset.p65mas==0) & (dataset.menores==0),
(dataset.p12_64>0) & (dataset.p65mas==0) & (dataset.menores>0),
(dataset.p12_64>0) & (dataset.p65mas>0) & (dataset.menores==0),
(dataset.p12_64==0) & (dataset.p65mas>0) & (dataset.menores>0),
(dataset.p12_64==0) & (dataset.p65mas>0) & (dataset.menores==0),
(dataset.p12_64>0) & (dataset.p65mas>0) & (dataset.menores>0)]
choices = ["1","2","3","4","5","6"]
dataset["age"] = np.select(hh_members, choices, default="empty")
dataset["node"] = dataset.sex_hhrp + dataset.age
return dataset
def clean_data(self, dataset: pd.DataFrame) -> pd.DataFrame:
"""Clean data with more than 10% if null values"""
column_missing = list()
for column in dataset.columns:
proportion = np.mean(dataset[column].isnull())
if (proportion>=0.1):
column_missing = np.append(column_missing, column)
dataset.drop(columns=list(column_missing),inplace=True)
dataset = dataset.dropna()
return dataset
def compute_representativity(self, covariance_matrix):
"""Compute representativity of energy consumption"""
representativity = dict()
for node in covariance_matrix.columns.unique():
proportion = (covariance_matrix[node]/sum(covariance_matrix[node])).sort_values(
ascending=False).cumsum()
representativity[node] = proportion
representativity[node] = representativity[node].to_frame()
representativity[node]["id"] = range(1,len(representativity[node])+1)
representativity[node]["covariance"] = covariance_matrix[node]
return representativity
def covariance_matrix(self) -> pd.DataFrame:
"""Compute covariance matrix with respect energy variable"""
dict_covariance = dict()
list_dataset_nodes = list()
dict_standardize = self.standardization()
for node in dict_standardize.keys():
dict_covariance[node] = abs(dict_standardize[node].cov().energia)
dict_covariance[node].drop(["energia","vivienda"], inplace=True)
dict_covariance[node].rename(node, inplace=True)
list_dataset_nodes.append(dict_covariance[node])
covariance_matrix = pd.concat(list_dataset_nodes, axis=1).fillna(0)
return covariance_matrix
def drop_nonessential_columns(self,dataset_merged):
"""Remove nonessential columns"""
dataset_merged.drop(columns=["foliohog_x",
"foliohog_y",
"ubica_geo_y",
"tam_loc_y",
"est_socio_y",
"est_dis_x",
"est_dis_y",
"upm_x",
"upm_y",
"factor_x",
"factor_y",
"smg",
"numren",
"foliohog"], inplace=True)
if self.year<2018:
dataset_merged.drop(columns=["ageb_x",
"ageb_y"], inplace=True)
dataset_merged.rename(columns={"ubica_geo_x":"ubica_geo",
"tam_loc_x":"tam_loc",
"est_socio_x":"est_socio"}, inplace=True)
return dataset_merged
def give_nodes(self) -> list:
"""Gives the name of the nodes per type of classification"""
if self.type_class == "Sex_HHRP_Age":
name_nodes = ["H1","H2","H3","H4","H5","H6","M1","M2","M3","M4","M5","M6"]
elif self.type_class == "Age_Generation":
name_nodes = ["G_after_2000", "G_90s","G_80s","G_70s","G_60s","G_50s", "G_older_50s"]
return name_nodes
def read_data(self) -> pd.DataFrame:
"""Reads data and merge into a single dataframe"""
hogar = self.read_tables("hogares.csv", self.year)
poblacion = self.read_tables("poblacion.csv",self.year)
concentrado = self.read_tables("concentradohogar.csv",self.year)
viviendas = self.read_tables("viviendas.csv",self.year)
datasets_list = [hogar, poblacion, concentrado, viviendas]
dataset_merged = reduce(lambda left, right: pd.merge(left, right, on="folioviv", how="outer"),
datasets_list)
dataset_merged = self.drop_nonessential_columns(dataset_merged)
if self.clean:
return self.clean_data(dataset_merged)
else:
return dataset_merged
def read_tables(self,table_name,year) -> pd.DataFrame:
"""Reads data from tables of ENIGH """
data_path = os.getcwd()+"/data/ENIGH" + str(year) + "/"
dataset = pd.read_csv(data_path+table_name,
index_col="folioviv",
low_memory=False,
na_values=[" ", "&"])
return dataset
def proportion_nan(self, dataset: pd.DataFrame):
"""Compute proportion of missing values from ENIGH dataset"""
proportion_list = list()
for column in dataset.columns:
proportion = np.mean(dataset[column].isnull())
proportion_list = np.append(proportion_list, proportion)
return proportion_list
def standardization(self) -> pd.DataFrame:
"""Standarization of dataset using Z-score per node type"""
dataset = self.classification().copy()
dict_standardize = dict()
for node in dataset.node.unique():
dataset_node = dataset[dataset.node==node]
dataset_node = dataset_node.copy()
for column in dataset_node.columns[:-1]:
if (dataset_node[column].std(ddof=0)==0):
dataset_node.drop(columns=column, inplace=True)
else:
dataset_node[column] = (dataset_node[column] - dataset_node[column].mean())/dataset_node[column].std()
dict_standardize[node] = dataset_node
return dict_standardize
|
from unittest import TestCase
from contoso.app import App
class AppTests(TestCase):
def test_create_instance(self):
app = App()
self.assertEqual("bar", app.foo) |
from __future__ import absolute_import
from functools import wraps
from django_ratelimit import ALL, UNSAFE
from django_ratelimit.core import is_ratelimited
from django_ratelimit.exceptions import Ratelimited
from django_ratelimit.record_handlers.proxy import RateLimitRecordProxy
__all__ = ["ratelimit"]
def ratelimit(group=None, key=None, rate=None, method=ALL, block=False):
def decorator(fn):
@wraps(fn)
def _wrapped(request, *args, **kw):
old_limited = getattr(request, "limited", False)
ratelimited = is_ratelimited(
request=request,
group=group,
fn=fn,
key=key,
rate=rate,
method=method,
increment=True,
)
request.limited = ratelimited or old_limited
if ratelimited:
RateLimitRecordProxy.exceeded_limit_record(request=request)
if ratelimited and block:
raise Ratelimited()
return fn(request, *args, **kw)
return _wrapped
return decorator
ratelimit.ALL = ALL
ratelimit.UNSAFE = UNSAFE
|
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
class Classifier(nn.Module):
def __init__(self, text_embedding_dim, hidden_dim, num_speakers):
super(Classifier, self).__init__()
self.hidden = nn.Linear(text_embedding_dim, hidden_dim)
self.output = nn.Linear(hidden_dim, num_speakers)
def forward(self, x):
x = self.hidden(x)
x = nn.functional.relu(x)
x = self.output(x)
return x
class GradientReversalLayer(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors
return -1 * grad_output * x[0]
def gradient_reversal_layer(x):
grl = GradientReversalLayer.apply
return grl(x) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position, too-many-instance-attributes, too-many-locals
"""
Plot marginal distributions saved to JSON files by `summarize_clsim_table.py`
for one or more tables.
"""
from __future__ import absolute_import, division, print_function
__all__ = [
'formatter',
'plot_clsim_table_summary',
'parse_args',
'main'
]
__author__ = 'J.L. Lanfranchi'
__license__ = '''Copyright 2017 Justin L. Lanfranchi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from argparse import ArgumentParser
from collections import Mapping, OrderedDict
from copy import deepcopy
from glob import glob
from itertools import product
from os.path import abspath, dirname, join
import sys
import matplotlib as mpl
mpl.use('agg', warn=False)
import matplotlib.pyplot as plt
import numpy as np
from pisa.utils.jsons import from_json
from pisa.utils.format import format_num
if __name__ == '__main__' and __package__ is None:
RETRO_DIR = dirname(dirname(abspath(__file__)))
if RETRO_DIR not in sys.path:
sys.path.append(RETRO_DIR)
from retro.utils.misc import expand, mkdir
from retro.utils.plot import COLOR_CYCLE_ORTHOG
def formatter(mapping, key_only=False, fname=False):
"""Formatter for labels to go in plots and and filenames.
Parameters
----------
mapping : Mapping
key_only : bool
fname : bool
"""
order = [
'hash_val',
'string',
'depth_idx',
'seed',
'table_shape',
'n_events',
'ice_model',
'tilt',
'n_photons',
'norm',
'underflow',
'overflow'
] # yapf: disable
line_sep = '\n'
if fname:
for key in ('n_photons', 'norm', 'underflow', 'overflow'):
order.remove(key)
label_strs = []
for key in order:
if key not in mapping:
continue
if key_only:
label_strs.append(key)
continue
if fname:
sep = '_'
else:
sep = '='
value = mapping[key]
if key == 'n_photons':
label_strs.append(
'{}{}{}'.format(
key, sep, format_num(value, sigfigs=3, sci_thresh=(4, -3))
)
)
elif key in ('depth_idx', 'seed', 'string', 'n_events', 'ice_model',
'tilt'):
label_strs.append('{}{}{}'.format(key, sep, value))
elif key == 'group_refractive_index':
label_strs.append('n_grp{}{:.3f}'.format(sep, value))
elif key == 'phase_refractive_index':
label_strs.append('n_phs{}{:.3f}'.format(sep, value))
elif key in ('table_shape', 'underflow', 'overflow'):
if key == 'table_shape':
name = 'shape'
elif key == 'underflow':
name = 'uflow'
elif key == 'overflow':
name = 'oflow'
str_values = []
for v in value:
if float(v) == int(v):
str_values.append(format(int(np.round(v)), 'd'))
else:
str_values.append(format_num(v, sigfigs=2, sci_thresh=(4, -3)))
if fname:
val_str = '_'.join(str_values)
fmt = '{}'
else:
val_str = ', '.join(str_values)
fmt = '({})'
label_strs.append(('{}{}%s' % fmt).format(name, sep, val_str))
elif key == 'hash_val':
label_strs.append('hash{}{}'.format(sep, value))
elif key == 'norm':
label_strs.append(
'{}{}{}'.format(
key, sep, format_num(value, sigfigs=3, sci_thresh=(4, -3))
)
)
if not label_strs:
return ''
if fname:
return '__'.join(label_strs)
label_lines = [label_strs[0]]
for label_str in label_strs[1:]:
if len(label_lines[-1]) + len(label_str) > 120:
label_lines.append(label_str)
else:
label_lines[-1] += ', ' + label_str
return line_sep.join(label_lines)
def plot_clsim_table_summary(
summaries, formats=None, outdir=None, no_legend=False
):
"""Plot the table summary produced by `summarize_clsim_table`.
Plots are made of marginalized 1D distributions, where mean, median, and/or
max are used to marginalize out the remaining dimensions (where those are
present in the summaries)..
Parameters
----------
summaries : string, summary, or iterable thereof
If string(s) are provided, each is glob-expanded. See
:method:`glob.glob` for valid syntax.
formats : None, string, or iterable of strings in {'pdf', 'png'}
If no formats are provided, the plot will not be saved.
outdir : None or string
If `formats` is specified and `outdir` is None, the plots are
saved to the present working directory.
no_legend : bool, optional
Do not display legend on plots (default is to display a legend)
Returns
-------
all_figs : list of three :class:`matplotlib.figure.Figure`
all_axes : list of three lists of :class:`matplotlib.axes.Axes`
summaries : list of :class:`collections.OrderedDict`
List of all summaries loaded
"""
orig_summaries = deepcopy(summaries)
if isinstance(summaries, (basestring, Mapping)):
summaries = [summaries]
tmp_summaries = []
for summary in summaries:
if isinstance(summary, Mapping):
tmp_summaries.append(summary)
elif isinstance(summary, basestring):
tmp_summaries.extend(glob(expand(summary)))
summaries = tmp_summaries
for summary_n, summary in enumerate(summaries):
if isinstance(summary, basestring):
summary = from_json(summary)
summaries[summary_n] = summary
if formats is None:
formats = []
elif isinstance(formats, basestring):
formats = [formats]
if outdir is not None:
outdir = expand(outdir)
mkdir(outdir)
n_summaries = len(summaries)
if n_summaries == 0:
raise ValueError(
'No summaries found based on argument `summaries`={}'
.format(orig_summaries)
)
for n, fmt in enumerate(formats):
fmt = fmt.strip().lower()
assert fmt in ('pdf', 'png'), fmt
formats[n] = fmt
all_items = OrderedDict()
for summary in summaries:
for key, value in summary.items():
if key == 'dimensions':
continue
if not all_items.has_key(key):
all_items[key] = []
all_items[key].append(value)
same_items = OrderedDict()
different_items = OrderedDict()
for key, values in all_items.items():
all_same = True
ref_value = values[0]
for value in values[1:]:
if np.any(value != ref_value):
all_same = False
if all_same:
same_items[key] = values[0]
else:
different_items[key] = values
if n_summaries > 1:
if same_items:
print('Same for all:\n{}'.format(same_items.keys()))
if different_items:
print('Different for some or all:\n{}'
.format(different_items.keys()))
same_label = formatter(same_items)
summary_has_detail = False
if set(['string', 'depth_idx', 'seed']).issubset(all_items.keys()):
summary_has_detail = True
strings = sorted(set(all_items['string']))
depths = sorted(set(all_items['depth_idx']))
seeds = sorted(set(all_items['seed']))
plot_kinds = ('mean', 'median', 'max')
plot_kinds_with_data = set()
dim_names = summaries[0]['dimensions'].keys()
n_dims = len(dim_names)
fig_x = 10 # inches
fig_header_y = 0.35 # inches
fig_one_axis_y = 5 # inches
fig_all_axes_y = n_dims * fig_one_axis_y
fig_y = fig_header_y + fig_all_axes_y # inches
all_figs = []
all_axes = []
for plot_kind in plot_kinds:
fig, f_axes = plt.subplots(
nrows=n_dims, ncols=1, squeeze=False, figsize=(fig_x, fig_y)
)
all_figs.append(fig)
f_axes = list(f_axes.flat)
for ax in f_axes:
ax.set_prop_cycle('color', COLOR_CYCLE_ORTHOG)
all_axes.append(f_axes)
n_lines = 0
xlims = [[np.inf, -np.inf]] * n_dims
summaries_order = []
if summary_has_detail:
for string, depth_idx, seed in product(strings, depths, seeds):
for summary_n, summary in enumerate(summaries):
if (summary['string'] != string
or summary['depth_idx'] != depth_idx
or summary['seed'] != seed):
continue
summaries_order.append((summary_n, summary))
else:
for summary_n, summary in enumerate(summaries):
summaries_order.append((summary_n, summary))
labels_assigned = set()
for summary_n, summary in summaries_order:
different_label = formatter({k: v[summary_n] for k, v in different_items.items()})
if different_label:
label = different_label
if label in labels_assigned:
label = None
else:
labels_assigned.add(label)
else:
label = None
for dim_num, dim_name in enumerate(dim_names):
dim_info = summary['dimensions'][dim_name]
dim_axes = [f_axes[dim_num] for f_axes in all_axes]
bin_edges = summary[dim_name + '_bin_edges']
if dim_name == 'deltaphidir':
bin_edges /= np.pi
xlims[dim_num] = [
min(xlims[dim_num][0], np.min(bin_edges)),
max(xlims[dim_num][1], np.max(bin_edges))
]
for ax, plot_kind in zip(dim_axes, plot_kinds):
if plot_kind not in dim_info:
continue
plot_kinds_with_data.add(plot_kind)
vals = dim_info[plot_kind]
ax.step(bin_edges, [vals[0]] + list(vals),
linewidth=1, clip_on=True,
label=label)
n_lines += 1
dim_labels = dict(
r=r'$r$',
costheta=r'$\cos\theta$',
t=r'$t$',
costhetadir=r'$\cos\theta_{\rm dir}$',
deltaphidir=r'$\Delta\phi_{\rm dir}$'
)
units = dict(r='m', t='ns', deltaphidir=r'rad/$\pi$')
logx_dims = []
logy_dims = ['r', 'time', 'deltaphidir']
flabel = ''
same_flabel = formatter(same_items, fname=True)
different_flabel = formatter(different_items, key_only=True, fname=True)
if same_flabel:
flabel += '__same__' + same_flabel
if different_flabel:
flabel += '__differ__' + different_flabel
for kind_idx, (plot_kind, fig) in enumerate(zip(plot_kinds, all_figs)):
if plot_kind not in plot_kinds_with_data:
continue
for dim_num, (dim_name, ax) in enumerate(zip(dim_names, all_axes[kind_idx])):
#if dim_num == 0 and different_items:
if different_items and not no_legend:
ax.legend(loc='best', frameon=False,
prop=dict(size=7, family='monospace'))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('none')
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.set_xlim(xlims[dim_num])
xlabel = dim_labels[dim_name]
if dim_name in units:
xlabel += ' ({})'.format(units[dim_name])
ax.set_xlabel(xlabel)
if dim_name in logx_dims:
ax.set_xscale('log')
if dim_name in logy_dims:
ax.set_yscale('log')
fig.tight_layout(rect=(0, 0, 1, fig_all_axes_y/fig_y))
suptitle = (
'Marginalized distributions (taking {} over all other axes)'
.format(plot_kind)
)
if same_label:
suptitle += '\n' + same_label
fig.suptitle(suptitle, y=(fig_all_axes_y + fig_header_y*0.8) / fig_y,
fontsize=9)
for fmt in formats:
outfpath = ('clsim_table_summaries{}__{}.{}'
.format(flabel, plot_kind, fmt))
if outdir:
outfpath = join(outdir, outfpath)
fig.savefig(outfpath, dpi=300)
print('Saved image to "{}"'.format(outfpath))
return all_figs, all_axes, summaries
def parse_args(description=__doc__):
"""Parse command line args.
Returns
-------
args : Namespace
"""
parser = ArgumentParser(description=description)
parser.add_argument(
'--formats', choices=('pdf', 'png'), nargs='+', default='pdf',
help='''Save plots to chosen format(s). Choices are "pdf" and "png".'''
)
parser.add_argument(
'--outdir', default=None,
help='''Directory to which to save the plot(s). Defaults to same
directory as the present working directory.'''
)
parser.add_argument(
'--no-legend', action='store_true',
help='''Do not display a legend on the individual plots'''
)
parser.add_argument(
'summaries', nargs='+',
help='''Path(s) to summary JSON files to plot. Note that literal
strings are glob-expanded.'''
)
return parser.parse_args()
def main():
"""Main function for calling plot_clsim_table_summary as a script"""
args = parse_args()
kwargs = vars(args)
plot_clsim_table_summary(**kwargs)
if __name__ == '__main__':
main()
|
"""Plylist exceptions."""
class UnknownFormatException(Exception):
"""Raised if playlist format cannot be determined."""
pass
class UnsupportedFormatException(Exception):
"""Raised if playlist format is not supported."""
pass
class PlaylistReadError(Exception):
"""Raised if playlist file cannot be read."""
pass
|
import Adafruit_DHT
import RPi.GPIO as gpio
from modules.UUGear import *
input_value_type = sys.argv[1]
input_gpio_id = sys.argv[2]
def get_dht11_value(value_type, value_id):
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, value_id)
if humidity is not None and temperature is not None:
if value_type == 'tem':
output = temperature
elif value_type == 'hum':
output = humidity
else:
output = 'dht11 error'
else:
output = 'dht11 error'
return str(output)
def get_flame_sensor_value(value_id):
gpio.setmode(gpio.BCM)
gpio.setup(int(value_id), gpio.IN)
output = gpio.input(int(value_id))
return str(output)
if input_value_type == 'tem':
print(get_dht11_value('tem', input_gpio_id))
elif input_value_type == 'hum':
print(get_dht11_value('hum', input_gpio_id))
elif input_value_type == 'flame':
print(get_flame_sensor_value(input_gpio_id))
else:
print('input error')
sys.exit(0)
|
from google.appengine.ext import ndb
class User(ndb.Model):
"""
User model for the FILL app.
"""
# User Properties
name = ndb.StringProperty(required = True)
username = ndb.StringProperty(required = True)
email = ndb.StringProperty(required = True)
password_hash = ndb.StringProperty(required = True)
bio = ndb.TextProperty()
education = ndb.TextProperty()
skills = ndb.TextProperty()
interests = ndb.TextProperty()
profile_pic = ndb.StringProperty(default="http://www.homepcpatrol.com/sites/default/files/imagecache/Profile_Full/alice-stilwell.jpg")
@classmethod
def is_username_available(self, username):
"""Check if a username is available to claim"""
return self.query(self.username == username).count() is 0
@classmethod
def is_email_available(self, email):
"""Check if an email is available to claim"""
return self.query(self.email == email).count() is 0
@classmethod
def get_user(self, username):
"""Return the first user matching a username query"""
userlist = self.query(self.username == username).fetch(1)
if len(userlist) == 0:
return None
else:
return userlist[0]
@classmethod
def get_users(self, username):
"""Return a list of users matching a query"""
userlist = self.query(self.username == username).fetch()
if len(userlist) == 0:
return None
else:
return userlist
@classmethod
def get_user_by_id(self, id):
"""Get an event by its key id"""
return self.get_by_id(int(id))
class Event(ndb.Model):
"""
Event model for the FILL app.
"""
# Basic Info
name = ndb.StringProperty(required=True)
date = ndb.DateTimeProperty(required=True)
admin = ndb.KeyProperty(required=True)
location = ndb.StringProperty(required=True)
description = ndb.TextProperty(required=True)
language = ndb.StringProperty(required=True)
accessibility = ndb.StringProperty(required=True)
hours = ndb.IntegerProperty(required=True)
physical_activity = ndb.StringProperty(required=True)
# Needed Personnel (Integers)
volunteers_needed = ndb.IntegerProperty()
drivers_needed = ndb.IntegerProperty()
translators_needed = ndb.IntegerProperty()
# Requests (List of Users)
volunteer_requests = ndb.KeyProperty(repeated=True)
driver_requests = ndb.KeyProperty(repeated=True)
translator_requests = ndb.KeyProperty(repeated=True)
# Accepted Personnel (List of Users)
volunteers = ndb.KeyProperty(repeated=True)
drivers = ndb.KeyProperty(repeated=True)
translators = ndb.KeyProperty(repeated=True)
# Instance methods for getting progress bar ratios
def volunteer_fill_percentage(self):
return int(float(len(self.volunteers))/self.volunteers_needed*100)
def driver_fill_percentage(self):
return int(float(len(self.drivers))/self.drivers_needed*100)
def translator_fill_percentage(self):
return int(float(len(self.translators))/self.translators_needed*100)
def verify(self):
# Fix some event values that are broken ... deprecated
if self.volunteers_needed == [None]:
self.volunteers_needed = []
if self.drivers_needed == [None]:
self.drivers_needed = []
if self.translators_needed == [None]:
self.translators_needed = []
if self.volunteer_requests == [None]:
self.volunteer_requests = []
if self.driver_requests == [None]:
self.driver_requests = []
if self.translator_requests == [None]:
self.translator_requests = []
if self.volunteers == [None]:
self.volunteers = []
if self.drivers == [None]:
self.drivers = []
if self.translators == [None]:
self.translators = []
# Public API
@classmethod
def get_events_by_name(self, name):
"""Get a list of events matching name"""
return self.query(self.name == name).fetch()
@classmethod
def get_events_by_admin(self, user):
"""Get a list of events by a user"""
return self.query(self.admin == user).fetch()
@classmethod
def get_events_by_volunteer(self, user):
"""Get a list of events by a user"""
return self.query(Event.volunteers.IN([user]) or Event.drivers.IN([user]) or Event.translators.IN([user])).fetch()
@classmethod
def get_events_by_request(self, user):
"""Get a list of events by a user"""
return self.query(Event.volunteer_requests.IN([user]) or Event.driver_requests.IN([user]) or Event.translator_requests.IN([user])).fetch()
@classmethod
def get_event_by_id(self, id):
"""Get an event by its key id"""
return self.get_by_id(int(id))
class Post(ndb.Model):
"""
Post model for the FILL app.
"""
# Basic Info
title = ndb.StringProperty(required=True)
body = ndb.TextProperty(required=True)
writer = ndb.KeyProperty(required=True)
# Public API
@classmethod
def get_posts_by_writer(self, user):
"""Get a list of posts by a user"""
return self.query(self.writer == user).fetch()
@classmethod
def get_post_by_id(self, id):
"""Get a post by its key id"""
return self.get_by_id(int(id))
# Debug
if __name__ == '__main__':
pass |
from neo4j import GraphDatabase
import json
uri = "bolt://graph_db:7687"
driver = GraphDatabase.driver(uri, encrypted=False)
def get_gdb():
return driver
def save_feedback(feedback):
with driver.session() as session:
with session.begin_transaction() as tx:
tx.run("""
CREATE (a :feedback) SET a += $props
""", {"props": feedback})
def save_contribution(contribution):
with driver.session() as session:
with session.begin_transaction() as tx:
tx.run("""
CREATE (a :CAREHOME) SET a += $props
""", {"props": contribution})
feedback = {"ref":"uuid_0001", "author":"John Doe", "comments":"Average performance", "rating":3}
contribution = {"ref":"uuid_1001", "author":"Jane Doe", "comments":"Above Average performance, impressed by price", "rating":4}
save_feedback(feedback)
save_contribution(contribution)
def seed_loc_data():
with open('app/data/mocks/loc_marker_list.json') as f:
data = json.load(f)
for item in data:
save_contribution(item)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Module documentation goes here."""
from __future__ import print_function
__author__ = "First Last"
__copyright__ = "Copyright 2018, First Last"
__credits__ = ["C D", "A B"]
__license__ = "Apache 2.0"
__version__ = "1.0.1"
__maintainer__ = "First Last"
__email__ = "test@example.org"
__status__ = "Development"
import argparse
from logzero import logger
def log(function):
"""Handy logging decorator."""
def inner(*args, **kwargs):
"""Innter method."""
logger.debug(function)
function(*args, **kwargs)
return inner
class Greeter():
"""Example function with types documented in the docstring."""
def __init__(self):
self.message = 'Hello world!'
def set_message(self, message: str):
"""Function description."""
self.message = message
@log
def print_message(self):
"""Function description."""
print(self.message)
def main(args: argparse.Namespace):
""" Main entry point of the app """
Greeter().print_message()
logger.info(args)
if __name__ == "__main__":
PARSER = argparse.ArgumentParser()
# Required positional argument
PARSER.add_argument("arg", help="Required positional argument")
# Optional argument flag which defaults to False
PARSER.add_argument("-f", "--flag", action="store_true", default=False)
# Optional argument which requires a parameter (eg. -d test)
PARSER.add_argument("-n", "--name", action="store", dest="name")
# Optional verbosity counter (eg. -v, -vv, -vvv, etc.)
PARSER.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Verbosity (-v, -vv, etc)")
# Specify output of "--version"
PARSER.add_argument(
"--version",
action="version",
version="%(prog)s (version {version})".format(version=__version__))
MYARGS = PARSER.parse_args()
main(MYARGS)
|
from .constraint import Constraint
import sys
class Halfspace(Constraint):
"""Halfspace
A halfspace is a set of the form H = {c'x <= b}, where c is a given
vector and b is a constant scalar.
"""
def __init__(self, normal_vector, offset):
"""Construct a new halfspace H = {c'x <= b}
:param normal_vector: vector c
"param offset: parameter b
"""
self.__normal_vector = [float(x) for x in normal_vector]
self.__offset = offset
@property
def normal_vector(self):
"""
Returns vector c
:return:
"""
return self.__normal_vector
@property
def offset(self):
"""
Returns parameter b
:return:
"""
return self.__offset
def dimension(self):
"""
Dimension of the halfspace
:return: length of normal vector
"""
return len(self.__normal_vector)
def project(self, u):
raise NotImplementedError()
def distance_squared(self, u):
raise NotImplementedError()
def is_convex(self):
"""
A halfspace is a convex set
:return:
"""
return True
def is_compact(self):
"""Whether the set is compact
H is compact iff b < 0 and c = 0, in which case H is empty
"""
eps = sys.float_info.epsilon
# if b < 0 and c = 0, then H is empty, hence compact
if self.offset < 0 and sum([self.normal_vector[idx]
for idx in range(self.dimension())]) < eps:
return True
return False
|
from mock import patch
from randstr import randstr
def test_default_randstr():
with patch('randstr.random') as random:
random.choice.side_effect = 'helloworld'
assert randstr() == 'hellowo'
def test_length():
with patch('randstr.random') as random:
result = 'helloworld'
random.choice.side_effect = result
assert randstr(len(result)) == result
def test_custom_chars():
assert randstr(chars='x') == 'x' * 7
|
# Generated by Django 3.2.9 on 2021-11-15 14:41
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import rentcars.utils.paths
import rentcars.validators
class Migration(migrations.Migration):
initial = True
dependencies = [
('tgbot', '0002_delete_location'),
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('license_plate', models.CharField(max_length=9, validators=[rentcars.validators.license_plate_validator], verbose_name='Регистрационный знак')),
('vin', models.CharField(max_length=17, validators=[rentcars.validators.vin_validator], verbose_name='VIN')),
('model', models.CharField(max_length=50, verbose_name='Марка, модель')),
('type', models.CharField(max_length=50, verbose_name='Тип ТС')),
('category', models.CharField(help_text='Одна английская буква из ABCD', max_length=1, validators=[rentcars.validators.vehicle_category_validator], verbose_name='Категория ТС')),
('year_manufacture', models.CharField(max_length=4, validators=[rentcars.validators.vehicle_manufactured_year_validator], verbose_name='Год выпуска')),
('color', models.CharField(max_length=50, verbose_name='Цвет ТС')),
('power', models.IntegerField(verbose_name='Мощность двигателя, л.с.')),
('ecological_class', models.CharField(max_length=20, verbose_name='Экологический класс')),
('vehicle_passport_serial', models.CharField(max_length=4, validators=[rentcars.validators.vehicle_passport_serial_validator], verbose_name='Серия ПТС')),
('vehicle_passport_number', models.CharField(max_length=6, validators=[rentcars.validators.passport_number_validator], verbose_name='Номер ПТС')),
('max_mass', models.IntegerField(verbose_name='Разрешенная max масса, кг')),
('sts_serial', models.CharField(help_text='Серия свидетельства о регистрации ТС', max_length=4, validators=[rentcars.validators.sts_serial_validator], verbose_name='Серия СТС')),
('sts_number', models.CharField(help_text='Номер свидетельства о регистрации ТС', max_length=6, validators=[rentcars.validators.sts_number_validator], verbose_name='Номер СТС')),
],
options={
'verbose_name': 'Автомобиль',
'verbose_name_plural': 'Автомобили',
'ordering': ['license_plate'],
},
),
migrations.CreateModel(
name='Contract',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='contracts/files/', verbose_name='Файл договора')),
('is_approved', models.BooleanField(default=False, verbose_name='Подтвержден')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Дата формирования договора')),
('approved_at', models.DateTimeField(blank=True, null=True, verbose_name='Дата подтверждения договора')),
('closed_at', models.DateTimeField(null=True, verbose_name='Дата завершения действия договора')),
('car', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='contracts', to='rentcars.car', verbose_name='Машина')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='contracts', to='tgbot.user')),
],
options={
'verbose_name': 'Договор',
'verbose_name_plural': 'Договоры',
},
),
migrations.CreateModel(
name='PhotoCarContract',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to=rentcars.utils.paths.contract_photos_path, verbose_name='Фотографии машины')),
('file_id', models.CharField(blank=True, max_length=250, verbose_name='ID фотографии на серверах Telegram')),
('contract', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='car_photos', to='rentcars.contract')),
],
options={
'verbose_name': 'Фотография машины во время заключения договора',
'verbose_name_plural': 'Фотографии машины во время заключения договора',
},
),
migrations.CreateModel(
name='PhotoCar',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to=rentcars.utils.paths.car_photos_path, verbose_name='Фотографии машины')),
('file_id', models.CharField(blank=True, max_length=250, verbose_name='ID фотографии на серверах Telegram')),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='rentcars.car')),
],
options={
'verbose_name': 'Фотография машины',
'verbose_name_plural': 'Фотографии машины',
},
),
migrations.CreateModel(
name='PersonalData',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('first_name', models.CharField(max_length=100, validators=[rentcars.validators.ru_eng_letters_validator], verbose_name='Имя')),
('middle_name', models.CharField(max_length=100, validators=[rentcars.validators.ru_eng_letters_validator], verbose_name='Отчество')),
('last_name', models.CharField(max_length=100, validators=[rentcars.validators.ru_eng_letters_validator], verbose_name='Фамилия')),
('gender', models.IntegerField(choices=[(0, 'Муж'), (1, 'Жен')], verbose_name='Пол')),
('birthday', models.DateField(validators=[rentcars.validators.birthday_date_validate], verbose_name='Дата рождения')),
('email', models.EmailField(max_length=70, validators=[django.core.validators.EmailValidator(message='Адрес электронной почты должен быть правильным. Например, rustamwho@mail.com')], verbose_name='Почта')),
('phone_number', models.CharField(max_length=12, validators=[rentcars.validators.phone_number_validator])),
('passport_serial', models.CharField(max_length=4, validators=[rentcars.validators.passport_serial_validator], verbose_name='Серия паспорта')),
('passport_number', models.CharField(max_length=6, validators=[rentcars.validators.passport_number_validator], verbose_name='Номер паспорта')),
('passport_date_of_issue', models.DateField(verbose_name='Дата выдачи паспорта')),
('passport_issued_by', models.CharField(max_length=255, validators=[rentcars.validators.passport_issued_by_validator], verbose_name='Кем выдан паспорт')),
('address_registration', models.CharField(max_length=256, validators=[rentcars.validators.address_validator], verbose_name='Адрес прописки')),
('address_of_residence', models.CharField(max_length=256, validators=[rentcars.validators.address_validator], verbose_name='Адрес места жительства')),
('close_person_name', models.CharField(blank=True, max_length=50, null=True, validators=[rentcars.validators.close_person_name_validator], verbose_name='Близкий человек')),
('close_person_phone', models.CharField(blank=True, max_length=12, null=True, validators=[rentcars.validators.phone_number_validator], verbose_name='Номер близкого человека')),
('close_person_address', models.CharField(blank=True, max_length=256, null=True, validators=[rentcars.validators.address_validator], verbose_name='Адрес места жительства')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='personal_data', to='tgbot.user')),
],
options={
'verbose_name': 'Персональные данные',
'verbose_name_plural': 'Персональные данные',
},
),
migrations.CreateModel(
name='Fine',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(null=True, verbose_name='Дата и время штрафа')),
('amount', models.PositiveIntegerField(verbose_name='Сумма штрафа')),
('screenshot', models.ImageField(blank=True, null=True, upload_to=rentcars.utils.paths.fine_screens_path, verbose_name='Скриншот штрафа')),
('screenshot_id', models.CharField(blank=True, max_length=250, null=True, verbose_name='ID скриншота на серверах Telegram')),
('is_paid', models.BooleanField(default=False, verbose_name='Штраф оплачен')),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fines', to='rentcars.car', verbose_name='Машина')),
('contract', models.ForeignKey(blank=True, help_text='Заполнится автоматически после сохранения!', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='fines', to='rentcars.contract', verbose_name='Договор')),
('user', models.ForeignKey(blank=True, help_text='Заполнится автоматически после сохранения!', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='fines', to='tgbot.user', verbose_name='Водитель')),
],
options={
'verbose_name': 'Штраф',
'verbose_name_plural': 'Штрафы',
'ordering': ['is_paid', '-datetime'],
},
),
migrations.AddField(
model_name='car',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='cars', to='rentcars.personaldata'),
),
]
|
import sqlite3
import pandas as pd
def connect_to_db(db_name='buddymove_holidayiq.sqlite3'):
return sqlite3.connect(db_name)
def execute_query(cursor, query):
cursor.execute(query)
return cursor.fetchall()
df = pd.read_csv('buddymove_holidayiq.csv')
GET_ALL= """
SELECT *
FROM reviews;
"""
conn = connect_to_db('buddymove_holidayiq.sqlite3')
curs = conn.cursor()
def put_reviews_in_db():
curs.execute("DROP TABLE reviews")
df.to_sql('reviews', conn)
put_reviews_in_db()
# a = execute_query(curs, GET_ALL)
# 1-How many rows?
Total_rows = """
select count(*) from reviews
"""
# 2-users reviewed >=100 Nature also reviewed >=100 in the Shopping
review100 = """
select count(*) from reviews
where Nature >=100 AND Shopping >=100
"""
# 3-strech - average of each category
avg_cat = """
select avg(Sports), avg(Religious), avg(Nature), avg(Theatre), avg(Shopping), avg(Picnic) from reviews
"""
if __name__ == '__main__':
conn = connect_to_db()
curs = conn.cursor()
test_to_see_database = execute_query(curs, GET_ALL)
# print("get all at the beginning: ", test_to_see_database)
# result1 = execute_query(curs, GET_CHARACTERS)
result1 = execute_query(curs, Total_rows)
result2 = execute_query(curs, review100)
result3 = execute_query(curs, avg_cat)
# # print(result1)
print("1-How many rows?\n", result1)
print("\n2-users reviewed >=100 Nature also reviewed >=100 in the Shopping\n", result2)
print("\n3-average per category", result3)
|
#!/usr/bin/python
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# most of it copied from AWX's scan_packages module
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: package_facts
short_description: package information as facts
description:
- Return information about installed packages as facts
options:
manager:
description:
- The package manager used by the system so we can query the package information.
- Since 2.8 this is a list and can support multiple package managers per system.
- The 'portage' and 'pkg' options were added in version 2.8.
default: ['auto']
choices: ['auto', 'rpm', 'apt', 'portage', 'pkg', 'pacman']
required: False
type: list
strategy:
description:
- This option controls how the module queries the package managers on the system.
C(first) means it will return only information for the first supported package manager available.
C(all) will return information for all supported and available package managers on the system.
choices: ['first', 'all']
default: 'first'
version_added: "2.8"
version_added: "2.5"
requirements:
- For 'portage' support it requires the C(qlist) utility, which is part of 'app-portage/portage-utils'.
- For Debian-based systems C(python-apt) package must be installed on targeted hosts.
author:
- Matthew Jones (@matburt)
- Brian Coca (@bcoca)
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: Gather the package facts
package_facts:
manager: auto
- name: Print the package facts
debug:
var: ansible_facts.packages
- name: Check whether a package called foobar is installed
debug:
msg: "{{ ansible_facts.packages['foobar'] | length }} versions of foobar are installed!"
when: "'foobar' in ansible_facts.packages"
'''
RETURN = '''
ansible_facts:
description: facts to add to ansible_facts
returned: always
type: complex
contains:
packages:
description:
- Maps the package name to a non-empty list of dicts with package information.
- Every dict in the list corresponds to one installed version of the package.
- The fields described below are present for all package managers. Depending on the
package manager, there might be more fields for a package.
returned: when operating system level package manager is specified or auto detected manager
type: dict
contains:
name:
description: The package's name.
returned: always
type: str
version:
description: The package's version.
returned: always
type: str
source:
description: Where information on the package came from.
returned: always
type: str
sample: |-
{
"packages": {
"kernel": [
{
"name": "kernel",
"source": "rpm",
"version": "3.10.0",
...
},
{
"name": "kernel",
"source": "rpm",
"version": "3.10.0",
...
},
...
],
"kernel-tools": [
{
"name": "kernel-tools",
"source": "rpm",
"version": "3.10.0",
...
}
],
...
}
}
# Sample rpm
{
"packages": {
"kernel": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.26.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.16.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.10.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.21.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools-libs": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools-libs",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
}
}
# Sample deb
{
"packages": {
"libbz2-1.0": [
{
"version": "1.0.6-5",
"source": "apt",
"arch": "amd64",
"name": "libbz2-1.0"
}
],
"patch": [
{
"version": "2.7.1-4ubuntu1",
"source": "apt",
"arch": "amd64",
"name": "patch"
}
],
}
}
'''
import re
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.facts.packages import LibMgr, CLIMgr, get_all_pkg_managers
class RPM(LibMgr):
LIB = 'rpm'
def list_installed(self):
return self._lib.TransactionSet().dbMatch()
def get_package_details(self, package):
return dict(name=package[self._lib.RPMTAG_NAME],
version=package[self._lib.RPMTAG_VERSION],
release=package[self._lib.RPMTAG_RELEASE],
epoch=package[self._lib.RPMTAG_EPOCH],
arch=package[self._lib.RPMTAG_ARCH],)
def is_available(self):
''' we expect the python bindings installed, but this gives warning if they are missing and we have rpm cli'''
we_have_lib = super(RPM, self).is_available()
try:
get_bin_path('rpm')
if not we_have_lib:
module.warn('Found "rpm" but %s' % (missing_required_lib('rpm')))
except ValueError:
pass
return we_have_lib
class APT(LibMgr):
LIB = 'apt'
def __init__(self):
self._cache = None
super(APT, self).__init__()
@property
def pkg_cache(self):
if self._cache is not None:
return self._cache
self._cache = self._lib.Cache()
return self._cache
def is_available(self):
''' we expect the python bindings installed, but if there is apt/apt-get give warning about missing bindings'''
we_have_lib = super(APT, self).is_available()
if not we_have_lib:
for exe in ('apt', 'apt-get', 'aptitude'):
try:
get_bin_path(exe)
except ValueError:
continue
else:
module.warn('Found "%s" but %s' % (exe, missing_required_lib('apt')))
break
return we_have_lib
def list_installed(self):
# Store the cache to avoid running pkg_cache() for each item in the comprehension, which is very slow
cache = self.pkg_cache
return [pk for pk in cache.keys() if cache[pk].is_installed]
def get_package_details(self, package):
ac_pkg = self.pkg_cache[package].installed
return dict(name=package, version=ac_pkg.version, arch=ac_pkg.architecture, category=ac_pkg.section, origin=ac_pkg.origins[0].origin)
class PACMAN(CLIMgr):
CLI = 'pacman'
def list_installed(self):
rc, out, err = module.run_command([self._cli, '-Qi'], environ_update=dict(LC_ALL='C'))
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.split("\n\n")[:-1]
def get_package_details(self, package):
# parse values of details that might extend over several lines
raw_pkg_details = {}
last_detail = None
for line in package.splitlines():
m = re.match(r"([\w ]*[\w]) +: (.*)", line)
if m:
last_detail = m.group(1)
raw_pkg_details[last_detail] = m.group(2)
else:
# append value to previous detail
raw_pkg_details[last_detail] = raw_pkg_details[last_detail] + " " + line.lstrip()
provides = None
if raw_pkg_details['Provides'] != 'None':
provides = [
p.split('=')[0]
for p in raw_pkg_details['Provides'].split(' ')
]
return {
'name': raw_pkg_details['Name'],
'version': raw_pkg_details['Version'],
'arch': raw_pkg_details['Architecture'],
'provides': provides,
}
class PKG(CLIMgr):
CLI = 'pkg'
atoms = ['name', 'version', 'origin', 'installed', 'automatic', 'arch', 'category', 'prefix', 'vital']
def list_installed(self):
rc, out, err = module.run_command([self._cli, 'query', "%%%s" % '\t%'.join(['n', 'v', 'R', 't', 'a', 'q', 'o', 'p', 'V'])])
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.splitlines()
def get_package_details(self, package):
pkg = dict(zip(self.atoms, package.split('\t')))
if 'arch' in pkg:
try:
pkg['arch'] = pkg['arch'].split(':')[2]
except IndexError:
pass
if 'automatic' in pkg:
pkg['automatic'] = bool(int(pkg['automatic']))
if 'category' in pkg:
pkg['category'] = pkg['category'].split('/', 1)[0]
if 'version' in pkg:
if ',' in pkg['version']:
pkg['version'], pkg['port_epoch'] = pkg['version'].split(',', 1)
else:
pkg['port_epoch'] = 0
if '_' in pkg['version']:
pkg['version'], pkg['revision'] = pkg['version'].split('_', 1)
else:
pkg['revision'] = '0'
if 'vital' in pkg:
pkg['vital'] = bool(int(pkg['vital']))
return pkg
class PORTAGE(CLIMgr):
CLI = 'qlist'
atoms = ['category', 'name', 'version', 'ebuild_revision', 'slots', 'prefixes', 'sufixes']
def list_installed(self):
rc, out, err = module.run_command(' '.join([self._cli, '-Iv', '|', 'xargs', '-n', '1024', 'qatom']), use_unsafe_shell=True)
if rc != 0:
raise RuntimeError("Unable to list packages rc=%s : %s" % (rc, to_native(err)))
return out.splitlines()
def get_package_details(self, package):
return dict(zip(self.atoms, package.split()))
def main():
# get supported pkg managers
PKG_MANAGERS = get_all_pkg_managers()
PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()]
# start work
global module
module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'default': ['auto']},
strategy={'choices': ['first', 'all'], 'default': 'first'}),
supports_check_mode=True)
packages = {}
results = {'ansible_facts': {}}
managers = [x.lower() for x in module.params['manager']]
strategy = module.params['strategy']
if 'auto' in managers:
# keep order from user, we do dedupe below
managers.extend(PKG_MANAGER_NAMES)
managers.remove('auto')
unsupported = set(managers).difference(PKG_MANAGER_NAMES)
if unsupported:
if 'auto' in module.params['manager']:
msg = 'Could not auto detect a usable package manager, check warnings for details.'
else:
msg = 'Unsupported package managers requested: %s' % (', '.join(unsupported))
module.fail_json(msg=msg)
found = 0
seen = set()
for pkgmgr in managers:
if found and strategy == 'first':
break
# dedupe as per above
if pkgmgr in seen:
continue
seen.add(pkgmgr)
try:
try:
# manager throws exception on init (calls self.test) if not usable.
manager = PKG_MANAGERS[pkgmgr]()
if manager.is_available():
found += 1
packages.update(manager.get_packages())
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e)))
continue
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e)))
if found == 0:
msg = ('Could not detect a supported package manager from the following list: %s, '
'or the required Python library is not installed. Check warnings for details.' % managers)
module.fail_json(msg=msg)
# Set the facts, this will override the facts in ansible_facts that might exist from previous runs
# when using operating system level or distribution package managers
results['ansible_facts']['packages'] = packages
module.exit_json(**results)
if __name__ == '__main__':
main()
|
from utils import load_json_from_file
from path import (
FIELDS_OF_STUDY_PATH,
US_STATE_CODE_MAP,
CANADA_STATE_CODE_MAP,
SPAIN_STATE_CODE_MAP,
)
USERS_TO_GENERATE = 120
# Percentage of users to be enrolled in at least one course.
# Users will be divided evenly among the fake programs.
PCT_USERS_ENROLLED = 0.9
# Course settings
PAST_COURSE_RUNS_TO_CREATE = 3
COURSE_RUN_MONTH_RANGES = [(1, 5), (8, 12)]
COURSE_DAY = 15
ENROLLMENT_DELTA = dict(days=14)
UPGRADE_DELTA = dict(days=7)
FIELDS_OF_STUDY = load_json_from_file(FIELDS_OF_STUDY_PATH)
DEGREES = {
'MASTERS': {
'name': 'm',
'school_suffix': 'University',
'grad_age': 30
},
'BACHELORS': {
'name': 'b',
'school_suffix': 'University',
'grad_age': 22
},
'HIGH_SCHOOL': {
'name': 'hs',
'school_suffix': 'High School',
'grad_age': 18
}
}
GRAD_AGES = [degree_info['grad_age'] for degree_info in DEGREES.values()]
MIN_AGE = 18
EMPLOYMENT = {
"Computer Software": {
'company_name': ['Google', 'Microsoft', 'Apple'],
'position': ['Software Engineer', 'DevOps']
},
"Banking": {
'company_name': ['TD Bank', 'Chase', 'Bank of America', 'Fidelity'],
'position': ['Branch Manager', 'Teller']
},
"Financial Services": {
'company_name': ['Goldman Sachs', 'Berkshire Hathaway', 'Vanguard'],
'position': ['Financial Analyst', 'Fund Manager']
},
"Automotive": {
'company_name': ['Ford', 'Toyota', 'Hyundai', 'Audi', 'Volvo'],
'position': ['Mechanic', 'Salesperson']
}
}
EMPLOYMENT_YEAR_LENGTH = 2
COUNTRY_STATE_CODE_MAP = {
'US': load_json_from_file(US_STATE_CODE_MAP),
'CA': load_json_from_file(CANADA_STATE_CODE_MAP),
'ES': load_json_from_file(SPAIN_STATE_CODE_MAP)
}
COPY_TO_FIELDS = [
('country', 'nationality'),
('country', 'birth_country'),
('first_name', 'preferred_name'),
('first_name', 'edx_name')
]
|
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
async_parallel_load = 17
clock_in = 27
data_in = 22
clock_enable_pin = 23
def setup():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(async_parallel_load, GPIO.OUT)
GPIO.setup(clock_enable_pin, GPIO.OUT)
GPIO.setup(clock_in, GPIO.OUT)
GPIO.setup(data_in, GPIO.OUT)
setup() |
import glob
import os
import sys
import pandas as pd
from murasame.divider import Divider
from murasame.formatter import Formatter
from murasame.maker import Maker
from murasame.utils import load_setting
tmpdir = load_setting()['system']['tmpdir']
resdir = load_setting()['system']['resdir']
def formatter():
setting = load_setting()['formatter']
dfs = pd.read_excel(setting['input'], sheetname=None)
df = pd.concat(list(dfs.values()))
fmt = Formatter(df)
fmt.trim_space()
if setting['region']['use']:
base = setting['region']['base']
file = setting['region']['file']
fmt.trim_prefecture(base)
fmt.append_region(base, file)
fmt.select_column(setting['columns'])
fmt.save(os.path.join(tmpdir, 'formatter'))
def divider():
setting = load_setting()['divider']
df = pd.read_excel(os.path.join(tmpdir, 'formatter', 'sheet.xlsx'))
div = Divider(df, files=setting["files"], base=setting["base"])
div.save(os.path.join(tmpdir, 'divider'))
def maker():
setting = load_setting()['maker']
files = glob.glob(os.path.join(tmpdir, 'divider', '*.xlsx'))
for f in files:
root, base = os.path.split(f)
name, ext = os.path.splitext(base)
maker = Maker(file=f)
maker.make_board(keys=setting['board']['keys'])
maker.make_sheet(id_label=setting['sheet']['id'], seat_label='座席',
fill='不戦')
boardname = '{}_board.xlsx'.format(name)
sheetname = '{}_sheet.xlsx'.format(name)
maker.save_board(os.path.join(resdir, boardname))
maker.save_sheet(os.path.join(resdir, sheetname),
sort_by=setting['sheet']['sort_by'])
if __name__ == '__main__':
os.chdir(os.path.dirname(sys.argv[0]))
formatter()
divider()
maker()
|
v = input('digite algo: ')
print('o tipo primitivo desse valor é: ',type(v))
print('Só tem espaços? ',v.isspace())
print('É um número? ',v.isnumeric())
print('É alfabético? ',v.isalpha())
print('é alfanumérico? ',v.isalnum())
print('está em maiúsculas? ',v.isupper())
print('está em menúsculas? ',v.islower())
|
"""
Django settings for gdshowreelvote project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from get_docker_secret import get_docker_secret
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_docker_secret('gdshowreel_django_secret', default=None)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False'
# Domain name
ALLOWED_HOSTS = os.environ.get('DJANGO_ALLOWED_HOSTS','').split(',')
# Application definition
INSTALLED_APPS = [
'vote.apps.VoteConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mozilla_django_oidc',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'mozilla_django_oidc.middleware.SessionRefresh',
]
ROOT_URLCONF = 'gdshowreelvote.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'vote.context_processors.common',
],
},
},
]
WSGI_APPLICATION = 'gdshowreelvote.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'gdshowreel',
'USER': 'mysql',
'PASSWORD': get_docker_secret('gdshowreel_django_db_password'),
'HOST': 'database',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
SERVE_STATICS = True
STATIC_ROOT = os.environ.get('DJANGO_STATIC_ROOT', "/var/www/showreel.godotengine.org/static/")
STATIC_URL = '/static/'
### Authentication ###
AUTHENTICATION_BACKENDS = [
'vote.auth.OIDCAuthenticationBackend',
'django.contrib.auth.backends.ModelBackend',
]
AUTH_USER_MODEL = 'vote.User'
# OICD client connection
OIDC_RP_CLIENT_ID = get_docker_secret('gdshowreel_oidc_rp_client_id')
OIDC_RP_CLIENT_SECRET = get_docker_secret('gdshowreel_oidc_rp_client_secret')
print(OIDC_RP_CLIENT_SECRET)
# Signing algorihtm
OIDC_RP_SIGN_ALGO = 'RS256'
# Keycloak configuration
KEYCLOAK_REALM = "master"
KEYCLOAK_HOSTNAME = "keycloak:8080"
# Keycloak roles in authentication claims
KEYCLOAK_ROLES_PATH_IN_CLAIMS = ["realm_access", "roles"]
KEYCLOAK_STAFF_ROLE = "staff"
KEYCLOAK_SUPERUSER_ROLE = "admin"
# Keycloak OICD endpoints. You can get those at this endpoint http://{keycloakhost}:{port}/auth/realms/{realm}/.well-known/openid-configuration
OIDC_OP_AUTHORIZATION_ENDPOINT = f"http://{KEYCLOAK_HOSTNAME}/auth/realms/{KEYCLOAK_REALM}/protocol/openid-connect/auth" # URL of the OIDC OP authorization endpoint
OIDC_OP_TOKEN_ENDPOINT = f"http://{KEYCLOAK_HOSTNAME}/auth/realms/{KEYCLOAK_REALM}/protocol/openid-connect/token" # URL of the OIDC OP token endpoint
OIDC_OP_USER_ENDPOINT = f"http://{KEYCLOAK_HOSTNAME}/auth/realms/{KEYCLOAK_REALM}/protocol/openid-connect/userinfo" # URL of the OIDC OP userinfo endpoint
OIDC_OP_JWKS_ENDPOINT = f"http://{KEYCLOAK_HOSTNAME}/auth/realms/{KEYCLOAK_REALM}/protocol/openid-connect/certs"
OIDC_OP_LOGOUT_ENDPOINT = f"http://{KEYCLOAK_HOSTNAME}/auth/realms/{KEYCLOAK_REALM}/protocol/openid-connect/logout"
# URLS
LOGIN_REDIRECT_URL = '/submissions'
LOGOUT_REDIRECT_URL = '/login'
LOGIN_URL = 'login'
# Automatic Keycloak logout
OIDC_OP_LOGOUT_URL_METHOD = "vote.auth.logout"
### Security ###
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
### Custom settings ###
VOTE_MAX_SUBMISSIONS_PER_SHOWREEL = 3
VOTE_ONLY_STAFF_CAN_VOTE = True |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Nov, 2019
@author: Nathan de Lara <ndelara@enst.fr>
"""
from abc import ABC
import numpy as np
from sknetwork.hierarchy.postprocess import split_dendrogram
from sknetwork.utils.base import Algorithm
class BaseHierarchy(Algorithm, ABC):
"""Base class for hierarchical clustering algorithms."""
def __init__(self):
self.dendrogram_ = None
def fit_transform(self, *args, **kwargs) -> np.ndarray:
"""Fit algorithm to data and return the dendrogram. Same parameters as the ``fit`` method.
Returns
-------
dendrogram : np.ndarray
Dendrogram.
"""
self.fit(*args, **kwargs)
return self.dendrogram_
class BaseBiHierarchy(BaseHierarchy, ABC):
"""Base class for hierarchical clustering algorithms."""
def __init__(self):
super(BaseBiHierarchy, self).__init__()
self.dendrogram_row_ = None
self.dendrogram_col_ = None
self.dendrogram_full_ = None
def _split_vars(self, shape):
dendrogram_row, dendrogram_col = split_dendrogram(self.dendrogram_, shape)
self.dendrogram_full_ = self.dendrogram_
self.dendrogram_ = dendrogram_row
self.dendrogram_row_ = dendrogram_row
self.dendrogram_col_ = dendrogram_col
return self
|
# Generated by Django 2.2.6 on 2019-10-27 07:10
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ontask', '0058_auto_20191027_1252'),
]
operations = [
migrations.AlterField(
model_name='scheduledoperation',
name='action',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='scheduled_operations', to='ontask.Action'),
),
migrations.AlterField(
model_name='scheduledoperation',
name='workflow',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='scheduled_operations', to='ontask.Workflow'),
),
]
|
import click
import binascii
import os
import yaml
from ckan_cloud_operator import logs
from . import driver
@click.group()
def postgres():
"""Manage PostgreSQL databases unrelated to the operator or cluster"""
pass
@postgres.command()
@click.option('--db-name')
@click.option('--db-password')
@click.option('--admin-connection-string')
def create_base_db(db_name, db_password, admin_connection_string):
with _get_admin_connection(admin_connection_string) as admin_conn:
db_name = db_name or _generate_password(8)
db_password = db_password or _generate_password(12)
logs.info(f'creating base db: {db_name} / {db_password}')
driver.create_base_db(admin_conn, db_name, db_password)
logs.exit_great_success()
@postgres.command()
@click.option('--db-name')
@click.option('--admin-connection-string')
def db_role_info(db_name, admin_connection_string):
assert db_name
with _get_admin_connection(admin_connection_string) as admin_conn:
print(yaml.dump(driver.get_db_role_info(admin_conn, db_name), default_flow_style=False))
@postgres.command()
@click.option('--admin-connection-string')
@click.option('--full', is_flag=True)
@click.option('--validate', is_flag=True)
def db_names(admin_connection_string, full, validate):
with _get_admin_connection(admin_connection_string) as admin_conn:
print(yaml.dump(list(driver.list_db_names(admin_conn, full, validate)), default_flow_style=False))
@postgres.command()
@click.option('--admin-connection-string')
@click.option('--full', is_flag=True)
@click.option('--validate', is_flag=True)
def roles(admin_connection_string, full, validate):
with _get_admin_connection(admin_connection_string) as admin_conn:
print(yaml.dump(list(driver.list_roles(admin_conn, full, validate)), default_flow_style=False))
def _get_admin_connection_string(connection_string):
if connection_string: return connection_string
from ckan_cloud_operator.providers.db.manager import get_external_admin_connection_string
return get_external_admin_connection_string()
def _get_admin_connection(connection_string):
return driver.connect(_get_admin_connection_string(connection_string))
def _generate_password(size):
return binascii.hexlify(os.urandom(size)).decode()
|
from datetime import date, datetime
import pandas as pd
import pytest
from pytz import timezone
from liualgotrader.common import config
from liualgotrader.common.data_loader import DataLoader # type: ignore
from liualgotrader.common.types import DataConnectorType, TimeScale
nyc = timezone("America/New_York")
@pytest.mark.devtest
def test_create_data_loader_default() -> bool:
return bool(DataLoader(connector=DataConnectorType.alpaca))
@pytest.mark.devtest
def test_apple_stock_latest_price() -> bool:
print("test_apple_stock_latest_price")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price = dl["AAPL"].close[-1]
last_price_time = dl["AAPL"].close.index[-1]
print(f"apple {last_price} @ {last_price_time}")
return True
@pytest.mark.devtest
def test_apple_stock_current_price() -> bool:
print("test_apple_stock_current_price")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price = dl["AAPL"].close[-1]
last_price_time = dl["AAPL"].close.index[-1]
before_price = dl["AAPL"].close[-5]
before_price_time = dl["AAPL"].close.index[-5]
print(
f"apple {last_price} @ {last_price_time}, before was {before_price}@{before_price_time}"
)
return True
@pytest.mark.devtest
def test_apple_stock_current_price_range_int_minute() -> bool:
print("test_apple_stock_current_price_range_int_minute")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].close[-5:-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_current_price_range_int_day() -> bool:
print("test_apple_stock_current_price_range_int_day")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].close[-6:-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def no_test_apple_stock_daily_price() -> bool:
print("test_apple_stock_daily_price")
dl = DataLoader(scale=TimeScale.day, connector=DataConnectorType.alpaca)
last_price = dl["AAPL"].close[-1]
last_price_time = dl["AAPL"].close.index[-1]
print(last_price, last_price_time)
before_price = dl["AAPL"].close[-5]
print(f"before_price {before_price}, {dl['AAPL']}")
print(f"apple {last_price} @ {last_price_time}, before was {before_price}")
return True
@pytest.mark.devtest
def test_negative_current_price() -> bool:
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
try:
dl["DFGDFGDFG"].close[-1]
except ValueError:
return True
return False
@pytest.mark.devtest
def test_apple_stock_close_price_range_str_day() -> bool:
print("test_apple_stock_close_price_range_int_day")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].close[
"2021-01-01":"2021-01-05" # type:ignore
] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_close_price_range_str_minute() -> bool:
print("test_apple_stock_close_price_range_str_minute")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].close[
"2021-01-05 09:45:00":"2021-01-05 09:50:00" # type:ignore
]
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_close_price_range_str_minute_int() -> bool:
print("test_apple_stock_close_price_range_str_minute")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].close[
"2021-12-15 09:45:00":-1 # type:ignore
] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_int_minute() -> bool:
print("test_apple_stock_close_price_range_str_minute")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"][-5:-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_int_day() -> bool:
print("test_apple_stock_price_range_int_day")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"][-5:-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_day() -> bool:
print("test_apple_stock_price_range_date_day")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"]["2020-10-05":"2020-10-08"] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_int_day() -> bool:
print("test_apple_stock_price_range_date_int_day")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"]["2020-10-05":-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_int_min() -> bool:
print("test_apple_stock_price_range_date_int_min")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"]["2020-10-05":-1] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_int_min_open() -> bool:
print("test_apple_stock_price_range_date_int_min_open")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"]["2020-10-05":] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_open_range_date_int_min_open() -> bool:
print("test_apple_stock_price_close_range_date_int_min_open")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
last_price_range = dl["AAPL"].open["2020-10-05":] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_min_open() -> bool:
print("test_apple_stock_price_range_date_min_open")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
try:
last_price_range = dl["AAPL"][:] # type:ignore
print(last_price_range)
except ValueError:
return True
return True
@pytest.mark.devtest
def test_apple_stock_price_open_range_date_min_open() -> bool:
print("test_apple_stock_price_open_range_date_min_open")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
try:
last_price_range = dl["AAPL"].open[:] # type:ignore
print(last_price_range)
except ValueError:
return True
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_min() -> bool:
print("test_apple_stock_price_range_date_min")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = date(year=2021, month=2, day=1)
d2 = date(year=2021, month=2, day=2)
last_price_range = dl["AAPL"][d1:d2].between_time( # type:ignore
"9:30", "16:00"
) # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_min_mixed() -> bool:
print("test_apple_stock_price_range_date_min_mixed")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = date(year=2021, month=2, day=1)
last_price_range = dl["AAPL"][d1:"2021-02-02"].between_time( # type:ignore
"9:30", "16:00"
) # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_range_date_day_mixed() -> bool:
print("test_apple_stock_price_range_date_day_mixed")
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
d1 = date(year=2021, month=2, day=1)
last_price_range = dl["AAPL"][d1:"2021-02-02"] # type:ignore
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_open_range_date_min_mixed() -> bool:
print("test_apple_stock_price_range_date_min_mixed")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = datetime(year=2021, month=2, day=1, hour=3, minute=0)
last_price_range = (
dl["AAPL"]
.open[d1:"2021-02-01 21:00:00"] # type:ignore
.between_time("9:30", "16:00") # type:ignore
)
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_open_str() -> bool:
print("test_apple_stock_price_open_str")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = date(year=2021, month=2, day=1)
last_price_range = dl["AAPL"].open["2021-02-02 09:45:00"]
print(last_price_range)
return True
@pytest.mark.devtest
def test_apple_stock_price_open_date() -> bool:
print("test_apple_stock_price_open_date")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = nyc.localize(datetime(year=2021, month=2, day=1, hour=9, minute=30))
last_price_range = dl["AAPL"].open[d1]
print(last_price_range)
return True
@pytest.mark.devtest
def test_get_symbols_alpaca() -> bool:
print("test_get_symbols_alpaca")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
tickers = dl.data_api.get_symbols()
print(len(tickers))
return True
@pytest.mark.devtest
def test_apple_update() -> bool:
print("test_apple_stock_price_open_str")
dl = DataLoader(TimeScale.minute, connector=DataConnectorType.alpaca)
d1 = date(year=2021, month=2, day=1)
last_price_range = dl["AAPL"][-1]
print("after this")
dl["AAPL"].loc["2021-02-02 09:46:00"] = [
100.0,
100.0,
100.0,
100.0,
100.0,
100.0,
100.0,
100.0,
]
print(dl["AAPL"].loc["2021-02-02 09:46:00"])
return True
|
from PyQt5 import QtCore, QtGui
import cv2
import urllib
import numpy as np
from ca import caget, caput
import pv
class CameraViewer(QtGui.QWindow):
def __init__(self):
super(CameraViewer, self).__init__()
self.imageLabel = QtGui.QImage()
self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)
self.imageLabel.setScaledContents(True)
self.scrollArea = QtGui.QScrollArea()
self.scrollArea.setWidget(self.imageLabel)
self.setCentralWidget(self.scrollArea)
self.setWindowTitle("Image Viewer")
self.resize(640, 480)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.open)
timer.start(33) #30 Hz
def open(self):
camera='http://'+str(caget('ME14E-DI-CAM-01:MJPG:HOST_RBV'))+':'+str(caget('ME14E-DI-CAM-01:MJPG:HTTP_PORT_RBV'))+'/cam'+str(caget('ME14E-DI-CAM-01:MJPG:CLIENTS_RBV'))+'.mjpg.mjpg'
stream=urllib.urlopen(camera)
bytes=''
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
print a
b = bytes.find('\xff\xd9')
print b
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_COLOR)
cv2.imshow(camera,i)
if cv2.waitKey(1) ==27:
exit(0)
self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(i))
self.imageLabel.adjustSize()
if __name__ == '__main__':
import sys
app = QtGui.QGuiApplication(sys.argv)
CameraViewer = CameraViewer()
CameraViewer.show()
sys.exit(app.exec_())
|
"""
Test script for BIDeN. This script will run all test cases ( 2^N -1, N = max number of component).
We test the detailed case results of Task I using this script.
"""
import os
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util import html
import util.util as util
from itertools import combinations
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
train_dataset = create_dataset(util.copyconf(opt, phase="train"))
model = create_model(opt) # create a model given opt.model and other options
web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
opt.test_choice = 0
dic = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H'}
items = []
all = []
for i in range(opt.max_domain):
items += dic[i]
for s in range(1, (len(items) + 1)):
for p in combinations(items, s):
cur = ''
for i in range(s):
cur = cur + p[i]
all.append(cur)
for j in range(len(all)):
current = all[j]
opt.test_input = current
if j == 0:
model.setup(opt) # regular setup: load and print networks; create schedulers
model.parallelize()
print("Current test", current)
for i, data in enumerate(dataset):
model.set_input(data) # unpack data from data loader
model.test() # run inference
|
from unittest import TestCase
import geopandas as gpd
from xmlunittest import XmlTestMixin
import logging
from src.eq_fault_geom.geomio.cfm_faults import CfmMultiFault
from src.eq_fault_geom.geomio.cfm_faults import CfmFault
from src.eq_fault_geom.geomio.cfm_faults import required_fields
from src.eq_fault_geom.geomio.cfm_faults import expected_fields
from src.eq_fault_geom.geomio.cfm_faults import valid_dip_directions
from shapely.geometry.polygon import LineString
class test_cfm_faults(TestCase, XmlTestMixin):
def setUp(self):
self.filename = "../../../data/cfm_linework/NZ_CFM_v0_6_160221.shp"
self.fault_geodataframe = gpd.GeoDataFrame.from_file(self.filename)
self.cmf_faults = CfmMultiFault(self.fault_geodataframe)
self.logger = logging.getLogger('cmf_logger')
# Sort alphabetically by name
self.sorted_df = self.fault_geodataframe.sort_values("Name")
# Reset index to line up with alphabetical sorting
self.sorted_df = self.sorted_df.reset_index(drop=True)
self.faults = []
def test_check_input1(self):
df_response = self.fault_geodataframe[required_fields[:-1]].copy()
with self.assertRaises(ValueError):
self.cmf_faults.check_input1(df_response)
def test_check_input2(self):
exf = [i for i in expected_fields if i not in ['Source2', 'Source1_1', 'Name', 'Qual_Code']]
df_reponse = self.fault_geodataframe[exf[:-1]].copy()
with self.assertLogs(logger=self.logger, level='WARNING') as cm:
self.cmf_faults.check_input2(df_reponse)
print(cm.output)
self.assertIn(
"WARNING:cmf_logger:missing expected field", cm.output[0]
)
def test_add_fault(self):
self.assertGreater(len(self.sorted_df), 0, "Not enough rows in the input data set")
for i,fault in self.sorted_df.iterrows():
length = len(self.cmf_faults.faults)
self.cmf_faults.add_fault(fault)
self.assertAlmostEqual(length+1, len(self.cmf_faults.faults))
#assert False
def test_fault_numbers(self):
#self.assertIsNone(self.cmf_faults.fault_numbers)
if self.assertIsNotNone(self.cmf_faults.fault_numbers):
self.assertIsInstance(self.cmf_faults.fault_numbers, int)
self.assertFalse(len(self.cmf_faults.fault_numbers) == 0, 'The fault number is missing')
#assert False
def test_from_shp(self):
multiFault = self.cmf_faults.from_shp(self.filename)
self.assertIsNotNone(multiFault)
response = isinstance(multiFault, CfmMultiFault)
self.assertTrue(response, 'supplied object is not a "src.eq_fault_geom.geomio.cfm_faults.CfmMultiFault"'
', it is a "{}"'.format( type( multiFault )))
# def test_to_opensha_xml(self):
# prettyXml = self.cmf_faults.to_opensha_xml()
# self.assertIsNotNone(prettyXml)
# self.assertXmlDocument(prettyXml)
# #assert False
class test_cfm_fault(TestCase):
def setUp(self):
self.cmf_fault = CfmFault()
self.logger = logging.getLogger('cmf_logger')
self.filename = "../../../data/cfm_linework/NZ_CFM_v0_6_160221.shp"
self.fault_geodataframe = gpd.GeoDataFrame.from_file(self.filename)
self.cmf_faults = CfmMultiFault(self.fault_geodataframe)
# Sort alphabetically by name
self.sorted_df = self.fault_geodataframe.sort_values("Name")
# Reset index to line up with alphabetical sorting
self.sorted_df = self.sorted_df.reset_index(drop=True)
# def test_depth_best(self): => This gets tested by depth_max and depth_min
# self.cmf_fault.depth_best = 5.5
# self.assertAlmostEqual(self.cmf_fault.depth_best, 5.5)
# self.cmf_fault._depth_best = 3.3
# self.assertNotEqual(self.cmf_fault.depth_best, 5.5)
#
# self.depth_min = 20
# self.depth_max = 25.6
# depth = 17.4
# with self.assertLogs(logger=self.logger, level='WARNING') as cm:
# self.cmf_fault.depth_best = depth
# self.assertIn(
# "WARNING:cmf_logger:depth_best lower than depth_min", cm.output
# )
def test_depth_max(self):
self.cmf_fault.depth_max = 10.5
self.assertAlmostEqual(self.cmf_fault.depth_max, 10.5)
self.cmf_fault._depth_max = 8.6
self.assertNotEqual(self.cmf_fault.depth_max, 10.5)
with self.assertRaises(Exception):
self.cmf_fault.depth_max = "Hello"
# depth_min = self.cmf_fault.depth_min
# depth_best = self.cmf_fault.depth_best
# depth = min(depth_min, depth_best) - 1
self.cmf_fault.depth_min = 20
self.cmf_fault.depth_best = 20.4
depth = 19.5
with self.assertLogs(logger=self.logger, level='WARNING') as cm:
self.cmf_fault.depth_max = depth
self.assertIn(
"WARNING:cmf_logger:depth_max lower than either depth_min or depth_best", cm.output
)
def test_depth_min(self):
self.cmf_fault.depth_min = 30.5
self.assertAlmostEqual(self.cmf_fault.depth_min, 30.5)
self.cmf_fault._depth_min = 1.5
self.assertNotEqual(self.cmf_fault.depth_min, 10.5)
with self.assertRaises(Exception):
self.cmf_fault.depth_min = "Hello"
self.cmf_fault.depth_max = 50
self.cmf_fault.depth_best = 10
depth = 30.5
with self.assertLogs(logger=self.logger, level='WARNING') as cm:
self.cmf_fault.depth_min = depth
self.assertIn(
"WARNING:cmf_logger:depth_min higher than either depth_max or depth_best", cm.output
)
# def test_validate_depth(self): #no need to test this as it's used and tested in depth_max and depth_min
# assert False
# def test_dip_best(self): => get tested by dip max and dip_min
# assert False
def test_dip_max(self):
self.cmf_fault.dip_max = 10.5
self.assertAlmostEqual(self.cmf_fault.dip_max, 10.5)
self.cmf_fault._dip_max = 8.6
self.assertNotEqual(self.cmf_fault.dip_max, 10.5)
#
with self.assertRaises(Exception):
self.cmf_fault.dip_max = "Hello"
self.cmf_fault.dip_min = 20.6
self.cmf_fault.dip_best = 40.1
dip = 19.5
with self.assertLogs(logger=self.logger, level='WARNING') as cm:
self.cmf_fault.dip_max = dip
self.assertIn(
"WARNING:cmf_logger:dip_max is lower than dip min or dip best", cm.output
)
def test_dip_min(self):
self.cmf_fault.dip_min = 10.5
self.assertAlmostEqual(self.cmf_fault.dip_min, 10.5)
self.cmf_fault._dip_min = 8.6
self.assertNotEqual(self.cmf_fault.dip_min, 10.5)
#
with self.assertRaises(Exception):
self.cmf_fault.dip_min = "Hello"
self.cmf_fault.dip_max = 45.3
self.cmf_fault.dip_best = 40.1
dip = 50.6
with self.assertLogs(logger=self.logger, level='WARNING') as cm:
self.cmf_fault.dip_min = dip
self.assertIn(
"WARNING:cmf_logger:dip_min is higher than dip max or dip best", cm.output
)
#not sure if the test beolw is correct;
def test_dip_dir_str(self):
dip_dir = 'NE'
self.cmf_fault.dip_dir_str = dip_dir
self.assertIsInstance(dip_dir, str)
series = self.sorted_df.iloc[0]
self.cmf_fault.nztm_trace = series['geometry']
with self.assertLogs(logger=self.logger, level='WARNING') as cm:
self.cmf_fault.validate_dip_direction()
self.assertIn(
"WARNING:cmf_logger:Supplied trace and dip direction are inconsistent", cm.output
)
dip_dir = None
self.cmf_fault.dip_dir_str = dip_dir
self.assertAlmostEqual(self.cmf_fault.dip_dir, 330.15406806735234)
#still working on this
def test_dip_sigma(self):
self.cmf_fault._dip_sigma = 8.6
self.assertAlmostEqual(self.cmf_fault.dip_sigma, 8.6)
self.cmf_fault._dip_sigma = None
self.cmf_fault.dip_min = 16
self.cmf_fault.dip_max = 25
self.assertAlmostEqual(self.cmf_fault.dip_sigma, 4.5)
# I think no need to test the Valuerror here as it should satisfy the validate_dip() function
# self.cmf_fault.dip_min = None
# self.cmf_fault.dip_max = 25
# with self.assertRaises(ValueError):
# self.cmf_fault.dip_sigma
#
# def test_dip_dir(self):
# assert False
#
def test_validate_dip_direction(self):
series = self.sorted_df.iloc[0]
self.cmf_fault.nztm_trace = series['geometry']
dip_dir = 'SE'
self.cmf_fault.dip_dir_str = dip_dir
self.cmf_fault.validate_dip_direction()
self.assertAlmostEqual(self.cmf_fault.dip_dir, 150.15406806735643)
dip_dir = None
self.cmf_fault.dip_dir_str = dip_dir
with self.assertLogs(logger=self.logger, level='WARNING') as cm:
self.cmf_fault.validate_dip_direction()
self.assertIn(
"WARNING:cmf_logger:Insufficient information to validate dip direction", cm.output
)
dip_dir = 'NE'
self.cmf_fault.dip_dir_str = dip_dir
with self.assertLogs(logger=self.logger, level='WARNING') as cm:
self.cmf_fault.validate_dip_direction()
self.assertIn(
"WARNING:cmf_logger:Supplied trace and dip direction are inconsistent", cm.output
)
def test_validate_dip(self):
dip = 15.6
self.assertIsInstance(self.cmf_fault.validate_dip(dip), float)
dip = "Hello"
with self.assertRaises(Exception):
self.cmf_fault.validate_dip(dip)
dip = -20.6 #should be between 0 - 90 otherwise assert error
with self.assertRaises(Exception):
self.cmf_fault.validate_dip(dip)
def test_nztm_trace(self):
series = self.sorted_df.iloc[0]
trace = series['geometry']
#trace = 0.124
self.cmf_fault.nztm_trace = trace
def test_wgs_trace(self):
series = self.sorted_df.iloc[0]
trace = series['geometry']
self.cmf_fault.nztm_trace = trace
reponseX, reponseY = self.cmf_fault.wgs_trace.coords.xy
response = reponseX.tolist()
actual = [172.81975618060406, 172.78381840673984, 172.7622924223485]
self.assertAlmostEqual(response, actual)
def test_rake_max(self):
self.cmf_fault.rake_max = 40.5
self.assertAlmostEqual(self.cmf_fault.rake_max, 40.5)
self.cmf_fault._rake_max = 20.1
self.assertNotEqual(self.cmf_fault.rake_max, 40.5)
# #
with self.assertRaises(Exception):
self.cmf_fault.rake_max = "rake"
rake = 5.21
self.cmf_fault.rake_min = 19.00
self.cmf_fault.rake_best = 40.10
with self.assertLogs(logger=self.logger, level='WARNING') as cm:
self.cmf_fault.rake_max = rake
self.assertIn(
"WARNING:cmf_logger:rake_max is lower than rake min or rake best", cm.output
)
def test_rake_min(self):
self.cmf_fault.rake_min = 10.5
self.assertAlmostEqual(self.cmf_fault.rake_min, 10.5)
self.cmf_fault._rake_min = 8.6
self.assertNotEqual(self.cmf_fault.rake_min, 10.5)
#
with self.assertRaises(Exception):
self.cmf_fault.rake_min = "Hello"
self.cmf_fault.rake_max = 45.3
self.cmf_fault.rake_best = 40
rake = 50
with self.assertLogs(logger=self.logger, level='WARNING') as cm:
self.cmf_fault.rake_min = rake
self.assertIn(
"WARNING:cmf_logger:rake_min is higher than rake max or rake best", cm.output
)
#
def test_rake_best(self):
self.cmf_fault.rake_min = 30.4
rake = 20
with self.assertLogs(logger=self.logger, level='WARNING') as cm:
self.cmf_fault.rake_best = rake
self.assertIn(
"WARNING:cmf_logger:rake_best is lower than rake_min", cm.output
)
self.cmf_fault.rake_max = 15.7
with self.assertLogs(logger=self.logger, level='WARNING') as cm1:
self.cmf_fault.rake_best = rake
self.assertIn(
"WARNING:cmf_logger:rake_best is greater than rake_max", cm1.output
)
self.cmf_fault.sense_dom = 'dextral'
self.cmf_fault.sense_sec = 'sinistral'
with self.assertLogs(logger=self.logger, level='WARNING') as cm2:
self.cmf_fault.rake_best = rake
self.assertIn(
"WARNING:cmf_logger:Supplied rake differs from dominant slip sense", cm2.output
)
#
# def test_sense_dom(self):
# assert False
#
#
# def test_sense_sec(self):
# assert False
#
#
# def test_rake_to_opensha(self):
# assert False
#
# def test_validate_rake(self):
# assert False
#
# def test_validate_rake_sense(self):
# assert False
#
# def test_sr_best(self):
# assert False
#
#
# def test_sr_min(self):
# assert False
#
# def test_sr_max(self):
# assert False
#
#
# def test_validate_sr(self):
# assert False
#
# def test_sr_sigma(self):
# assert False
#
#
# def test_name(self):
# assert False
#
# def test_number(self):
# assert False
#
# def test_parent(self):
# assert False
#
# def test_from_series(self):
# series = self.sorted_df.iloc[0]
# # length = series.
# response = self.cmf_fault.from_series(series)
#
# def test_to_xml(self):
# assert False
|
from django import forms
from apps.usuario.models import Perfil
from django.contrib.auth import authenticate
from django.contrib.auth.forms import PasswordResetForm
from apps.usuario.templatetags.utils import ROLES
"""
Funcion
"""
def must_be_gt(value_password):
if len(value_password) < 7:
raise forms.ValidationError('El Password debe tener mas de 8 Caracter')
"""
Constantes
"""
ERROR_MESSAGE_USUARIO = {'required':'El usuario es requerido','unique':'El usuario ya se encuentra registrado','invalid': 'Ingrese el usuario valido'}
ERROR_MESSAGE_PASSWORD = {'required':'El password es requerido'}
ERROR_MESSAGE_EMAIL = {'required':'el email es requerido','invalid':'Ingrese un correo valido'}
#Usuario Perfil Login
class LoginUsuarioPerfilForm(forms.ModelForm):
usuario = forms.CharField(max_length= 15)
password = forms.CharField(max_length= 15,widget=forms.PasswordInput)
class Meta:
model = Perfil
fields = ['usuario', 'password']
def __init__(self, *args, **kwargs):
self.request = kwargs.pop("request", None)
super(LoginUsuarioPerfilForm, self).__init__(*args, **kwargs)
def clean_password(self):
password = self.cleaned_data.get('password')
if password:
if len(password) < 4:
raise forms.ValidationError("¡La contraseña debe tener al menos 8 caracteres!")
return password
def clean(self):
usuario = self.cleaned_data.get("usuario")
password = self.cleaned_data.get("password")
if usuario and password:
self.perfil = authenticate(usuario=usuario,password=password)
if self.perfil:
if not self.perfil.estado:
pass
else:
pass
#raise form.ValidationError("Usuario y Contraseña no válidos")
return self.cleaned_data
#Usuario Perfil Password
class PasswordUsuarioPerfilForm(forms.Form):
password = forms.CharField( max_length= 20,label='Contraseña Actual', widget= forms.PasswordInput(attrs={
'class': 'form-control',
'type': 'password',
'placeholder': 'Ingresa la Contraseña',
'autocomplete': 'off'
}) )
new_password = forms.CharField(max_length=20,label='Nueva Contraseña', widget=forms.PasswordInput(attrs={
'class': 'form-control',
'type': 'password',
'placeholder': 'Ingresa la Contraseña',
'autocomplete': 'off'
}),validators = [must_be_gt] )
repeat_password = forms.CharField( max_length= 20,label='Confirmar Nueva Contraseña', widget= forms.PasswordInput(attrs={
'class': 'form-control',
'type':'password',
'placeholder': 'Ingresa Verificar Nueva Contraseña',
'autocomplete': 'off'
}),validators = [must_be_gt] )
def clean_repeat_password(self):
clean_data = super(PasswordUsuarioPerfilForm,self).clean()
password1 = clean_data['new_password']
password2 = clean_data['repeat_password']
if len(password1) < 8:
raise forms.ValidationError(
'La Contraseña debe tener al menos 8 Caracteres!')
if password1 != password2:
raise forms.ValidationError('La Confirmar Contraseña no coincide con la Nueva Contraseña')
#Usuario Perfil
class EditarUsuarioPerfilForm(forms.ModelForm):
usuario = forms.CharField(max_length=20,label='Usuario',widget=forms.TextInput(
attrs={'class': 'form-control',}),error_messages=ERROR_MESSAGE_USUARIO)
email = forms.EmailField(label='Correo Electronico',
help_text='Ingresa Email',widget=forms.TextInput(attrs={
'class': 'form-control',}),error_messages=ERROR_MESSAGE_EMAIL)
nombre = forms.CharField(label='Nombre',max_length=50,
help_text='Ingresa Nombre',widget=forms.TextInput(attrs={'class': 'form-control',}))
apellido = forms.CharField(label='Apellidos', max_length=100,widget=forms.TextInput(attrs={
'class': 'form-control',}),help_text='Ingresa Apellido',)
class Meta:
model = Perfil
fields = ['usuario','email','nombre','roles','perfil_img']
widgets = {'roles': forms.Select(choices=ROLES)}
def __init__(self, *args, **kwargs):
super(EditarUsuarioPerfilForm, self).__init__(*args, **kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
#Password-Reset
class PasswordResetEmailForm(PasswordResetForm):
def clean_email(self):
email = self.cleaned_data.get('email')
if not Perfil.objects.filter(email__iexact=email,estado=True).exists():
raise forms.ValidationError("El Usuario no existe con este Correo Electrónico")
return email
class PerfilFrom(forms.ModelForm):
class Meta:
model = Perfil
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
"""
class PerfilFrom1(form.ModelForm):
usuario = form.ModelChoiceField(queryset=Usuario.objects.filter(is_active=True).order_by('nombre'),
empty_label="Selecccione Usuario")
class Meta:
model = Perfil
fields = ['usuario',
'phone',
'observaciones',
'perfil_img',
'estado']
labels = {'usuario': "Seleccione Usuario",
'phone': "Numero de telefono movil",
'observaciones': "Ingrese Alguna Observacion",
'perfil_img': "Ingrese Imagen de Perfil",
'Estado': "Estado"}
widgets = {
#'Usuario': form.Select(),
'phone': form.TextInput(),
'observaciones': form.TextInput(),
#'perfil_img': form.ImageField()
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
"""
|
"""
Module for a R2T pulse shape:
W.T. Franks FMP Berlin
"""
import de.bruker.nmr.mfw.root as root
#import os
import sys
from sys import argv
import TopCmds
import math
def dialog():
MAS=float(TopCmds.GETPAR("CNST 31"))
TauR= float(1000000/MAS)
input = TopCmds.INPUT_DIALOG("R2T input", "", \
["Nominal RF ampl","MAS","R2T duration","RI,RO duration","steps","Ramp","I offset","S offset"],\
["85",str('%.0f' % MAS),"1.5",str( '%3.2f' % (TauR)),"1000","20","8.000","6.000"],\
["kHz"," Hz","ms","us","","+/- %","kHz","kHz"],\
["1","1","1","1","1","1","1","1"],\
["Accept","Close"], ['a','c'], 10)
return input
def name_no_confirm(input,Name,SP):
if Name=="":
Name = str("R2T_" + input[1] + "MAS_" + input[6] + "I_" + input[7] + "S.wave")
if SP=="":
SP = str("SPNAM18")
TopCmds.PUTPAR(str(SP),str(File))
return Name, SP
def name_confirm(input):
Name = str("R2T_" + input[1] + "MAS_" + input[6] + "I_" + input[7] + "S.wave")
SP = str("SPNAM18")
Wave = str(TopCmds.INPUT_DIALOG("R2T Files", "", ["C File = ","C Wave =",],\
[Name,SP],["",""],["1","1"],["Accept","Close"], ['a','c'], 30))
Files = Wave[8:len(Wave)-21] #get rid of Java formatting
i = Files.find(",")
File = Files[0:i-1]
SP = Files[i+3:]
TopCmds.PUTPAR(str(SP),str(File))
return File, SP
def find_match(input):
import math
NomRF = 1000*float(input[0])
MAS = float(input[1])
Ioffs = 1000*float(input[6])
Soffs = 1000*float(input[7])
m=1
Match=m*MAS-Ioffs-Soffs
if Match<0:
m=2
Match=m*MAS-Ioffs-Soffs
if Match<0:
TopCmds.MSG("Cannot find match conditions, spinning too slowly")
TopCmds.EXIT()
found=0
for n in range(10000):
WIeff = math.sqrt(Ioffs*Ioffs+float(n*n))
WSeff = math.sqrt(Soffs*Soffs+float(n*n))
Match=(m*MAS)-WIeff-WSeff
#mm is the amplitude in Hz
if Match > 0.0: mm=n
if Match < 0.0: found=1
if found==0:
TopCmds.MSG("Match condition not found within 10kHz, aborting")
TopCmds.EXIT()
YorN=TopCmds.SELECT("Scaling", "The match condition is around %i Hz \n\n Which scaling should be used?" % mm , ["Calibration", "0.5" , "None(1.0)"])
# CONFIRM("Scale Shape Pulse for Match","The amplitude should be approximately %i Hz \n \nUse scaling?" % Match)
if YorN < 0: Scale=1.0
if YorN == 0: Scale=float(Match)/NomRF
if YorN == 1: Scale=0.5
if YorN == 2: Scale=1.0
return Scale
def make(Scale,input,name):
import math
ampl = [] # normalized to 0...100
ph = [] # normalized to 0...100
pi = 3.14159265
durat = 1000*float(input[2])
RIRO = float(input[3])
steps = int(input[4])
Delta = float(input[5])
RIOsteps = int(steps*RIRO/durat)
Start=100.0-(Delta)
End=100.0+(Delta)
for i in range(steps):
if i < RIOsteps:
RF=1.0*i*Start/RIOsteps
if i >= RIOsteps:
RF=1.0*Start+(i-RIOsteps)*(End-Start)/(steps-(2*RIOsteps))
if i > (steps-RIOsteps):
RF=1.0*End-(i-steps+RIOsteps)*End/RIOsteps
ampl.append(Scale*RF)
ph.append(0.0)
TopCmds.SAVE_SHAPE(name[0], "NoRotation", ampl,ph)
|
from hackerrank.ConsoleReader import *
from hackerrank.SortingList import *
from hackerrank.LambdaCollection import *
from hackerrank.NumpyCollection import *
# Enter your code here. Read input from STDIN. Print output to STDOUT
print(createnumpyarray([1, 2, 3, 4]).T.flatten())
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from datetime import datetime
from openerp import SUPERUSER_ID, api
from openerp.exceptions import except_orm, Warning, RedirectWarning
class account_invoice(osv.osv):
_inherit = "account.invoice"
_columns = {
'contract_id':fields.many2one('market.contract',string='总包合同'),
'labor_contract_id':fields.many2one('labor.contract',string='分包合同'),
}
class account_payment(osv.osv):
_inherit = 'account.payment'
_columns = {
'actual_line':fields.one2many('payment.actual.line','account_payment_id',string=u'分包合同明细',),
'market_actual_line':fields.one2many('market.payment.actual.line','account_payment_id',string=u'总包合同明细',),
}
@api.multi
def post(self):
for obj in self:
all_amount = 0
if obj.partner_type == 'supplier':
for line in obj.actual_line:
all_amount += line.payment_total
if obj.partner_type == 'customer':
for line in obj.market_actual_line:
all_amount += line.payment_total
if all_amount > obj.amount:
raise osv.except_osv(u'警告,明细行付款金额总和比付款总额大!')
return super(account_payment,self).post()
|
"""
Unit tests for `execute_task`, `handle_failure`,
and `run_shell` from `ScriptRunner.pipeline`.
"""
from unittest.mock import MagicMock
import pytest
from exceptions import StopKeywordFailures, UnknownKeywordFailures
from pipeline import (
SKIP,
STOP,
UNKNOWN_KEYWORD_MESSAGE,
execute_task,
handle_failure,
run_shell,
)
class TestHandleFailure:
"""Wraps tests for `handle_failure`."""
@staticmethod
def test_stop(capsys):
"""
Passes test if `handle_failure(STOP)` raises
`StopKeywordFailures` and prints to stdout `STOP`.
"""
with pytest.raises(StopKeywordFailures):
handle_failure(STOP)
out, err = capsys.readouterr()
assert out.rstrip("\n") == STOP
@staticmethod
def test_skip(capsys):
"""
Passes test if `handle_failure(SKIP)` prints to stdout `SKIP`.
"""
handle_failure(SKIP)
out, err = capsys.readouterr()
assert out.rstrip("\n") == SKIP
@staticmethod
def test_unknown_keyword():
"""
Passes test if `handle_failure` with unknown keyword
raises `UnknownKeywordFailures(UNKNOWN_KEYWORD_MESSAGE)`.
"""
with pytest.raises(UnknownKeywordFailures, match=UNKNOWN_KEYWORD_MESSAGE):
handle_failure("something_unknown")
def test_execute_task(monkeypatch, capsys, test_file):
"""
Generates python script, executes with mocked task
and checks that:
+ strerr is empty;
+ stdout contains printed task;
+ stdout contains printed script message;
+ stdout contains script exception;
+ stdout contains `SKIP` keyword.
"""
# Generates the python script with print and exception.
message = "pyscript message"
exception = "pyscript exception"
filename = "pyscript.py"
content = f"print('{message}')\nraise Exception('{exception}')"
pyscript = test_file(filename, content)
# Mocks `Task` instance.
mocked_task = MagicMock()
mocked_task.command = ["python", pyscript]
mocked_task.failures = SKIP
mocked_task.next_task = False
# Executes.
execute_task(mocked_task)
# Catches stdout, stderr.
out, err = capsys.readouterr()
assert err == ""
assert str(mocked_task) in out
assert message in out
assert exception in out
assert SKIP in out
def test_run_shell():
"""
Passes test if `run_shell` executes `echo` command.
"""
test_message = "test message"
out, err = run_shell(["echo", test_message])
assert (out.rstrip("\n"), err) == (test_message, "")
|
import string
import base64
class VegaRenderer:
script_code = string.Template('''
(function(spec) {
var view = new vega.View(vega.parse(spec), {
renderer: '${renderer}',
container: '#${div_id}',
hover: true
});
view.runAsync().then(function() {
parent.document.getElementById('${iframe_id}').onload();
});
}) (JSON.parse(atob('${vega_spec}')));
''')
def __init__(self):
self._data = {}
def _make_vega_spec(self, match):
raise NotImplementedError()
def write_head(self, doc):
doc, tag, text = doc.tagtext()
with tag('script', src='https://vega.github.io/vega/vega.min.js'):
pass
def write_match(self, doc, match, fetch_id):
doc, tag, text = doc.tagtext()
div_id = fetch_id()
with tag('div', id=div_id):
self._data[div_id] = match
def write_script(self, doc, iframe_id):
doc, tag, text = doc.tagtext()
with tag('script'):
for div_id, match in self._data.items():
text(VegaRenderer.script_code.safe_substitute(
renderer='canvas', # canvas or svg
div_id=div_id,
iframe_id=iframe_id,
vega_spec=base64.b64encode(
self._make_vega_spec(match).encode('utf8')).decode('utf8')
))
|
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from tensorflow.keras.layers import Input, Lambda, Embedding, Dense, Concatenate, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, History, TensorBoard
class FullyConnectedNetwork:
def __init__(self, input_size, hyperparameters, categorical_sizes):
self.hyperparameters = hyperparameters
self.categorical_sizes = categorical_sizes
self.history = History()
inputs = Input(shape=(input_size,))
embedding_layers = list()
for i, col_name in enumerate(sorted(list(categorical_sizes.keys()))):
categorical_size = categorical_sizes[col_name]
embedding_size = int(categorical_size ** (hyperparameters['embedding_factor']))
ith_input_slice = Lambda(lambda x: x[:, i])(inputs)
embedding = Embedding(categorical_size, embedding_size, input_length=1)(ith_input_slice)
embedding_layers.append(embedding)
numeric_inputs_slice = Lambda(lambda x: x[:, len(categorical_sizes):])(inputs)
to_concat = embedding_layers + [numeric_inputs_slice]
all_inputs = Concatenate(axis=1)(to_concat)
hidden_input = all_inputs
for block_params in self.hyperparameters['dense_blocks']:
hidden_output = Dense(block_params['size'], activation='relu')(hidden_input)
hidden_output = Dropout(block_params['dropout_rate'])(hidden_output)
hidden_input = hidden_output
outputs = Dense(1, activation='linear')(hidden_output)
self.model = Model(inputs, outputs)
# define optimization procedure
self.lr_annealer = ReduceLROnPlateau(monitor='val_mean_squared_error', factor=hyperparameters['lr_plateau_factor'],
patience=hyperparameters['lr_plateau_patience'], verbose=1)
self.early_stopper = EarlyStopping(monitor='val_mean_squared_error', min_delta=hyperparameters['early_stopping_min_delta'],
patience=hyperparameters['early_stopping_patience'], verbose=1)
self.tensorboard = TensorBoard(log_dir='train_logs', histogram_freq=1)
self.model.compile(optimizer=Adam(lr=hyperparameters['learning_rate']),
loss='mean_squared_error',
metrics=['mean_squared_error'])
def preproc_train(self, train_df):
train_inputs = train_df.drop('target', axis=1)
all_cols_set = set(train_inputs.columns)
categorical_cols_set = set(list(self.categorical_sizes.keys()))
self.non_categorical_cols = list(all_cols_set - categorical_cols_set)
self.column_order = sorted(list(categorical_cols_set)) + sorted(self.non_categorical_cols)
# normalize non-categorical columns
self.non_categorical_train_mean = train_inputs[self.non_categorical_cols].mean(axis=0)
self.non_categorical_train_std = train_inputs[self.non_categorical_cols].std(axis=0)
train_inputs[self.non_categorical_cols] -= self.non_categorical_train_mean
train_inputs[self.non_categorical_cols] /= self.non_categorical_train_std
# ensure that inputs are presented in the right order
train_inputs = train_inputs[self.column_order]
x_train = train_inputs.values
y_train = train_df['target'].values
# split training and validation
x_train, x_validation, y_train, y_validation = train_test_split(x_train, y_train,
test_size=self.hyperparameters[
'validation_percentage'])
return x_train, y_train, x_validation, y_validation
def train(self, train_df):
x_train, y_train, x_validation, y_validation = self.preproc_train(train_df)
return self.model.fit(x_train, y_train, epochs=self.hyperparameters['n_epochs'],
batch_size=self.hyperparameters['batch_size'],
validation_data=(x_validation, y_validation),
callbacks=[self.lr_annealer, self.early_stopper, self.history, self.tensorboard],
verbose=1)
def preproc_inference(self, test_df):
test_inputs = test_df.drop('target', axis=1)
# normalize non-categorical columns
test_inputs[self.non_categorical_cols] -= self.non_categorical_train_mean
test_inputs[self.non_categorical_cols] /= self.non_categorical_train_std
# ensure that inputs are presented in the right order
test_inputs = test_inputs[self.column_order]
x_test = test_inputs.values
y_test = test_df['target'].values
return x_test, y_test
def predict(self, x_test):
return self.model.predict(x_test).flatten()
def evaluate(self, test_df):
x_test, y_test = self.preproc_inference(test_df)
preds = self.predict(x_test)
return mean_squared_error(y_test, preds)
|
#Snake Tutorial Python
import dwave
from pyqubo import Binary, solve_qubo
from dwave.system import DWaveSampler, EmbeddingComposite
from dimod import ExactSolver
from pyqubo import Binary
import networkx as nx
#import dwave_networkx as dnx
import math
import random
import pygame
import tkinter as tk
import dimod
from tkinter import messagebox
import pickle
import json
from dimod.serialization.json import DimodEncoder, DimodDecoder
GRID_SIZE = 3
NON_SNAKE_WEIGHTS=1
SNAKE_WEIGHTS=100
CHI = 3
LAMBDA = 10
MU = 2
GAMMA = 10
class cube(object):
rows = 20
w = 500
def __init__(self,start,dirnx=1,dirny=0,color=(0,255,0)):
self.pos = start
self.dirnx = 1
self.dirny = 0
self.color = color
def move(self, dirnx, dirny):
self.dirnx = dirnx
self.dirny = dirny
self.pos = (self.pos[0] + self.dirnx, self.pos[1] + self.dirny)
def draw(self, surface, eyes=False):
dis = self.w // self.rows
i = self.pos[0]
j = self.pos[1]
pygame.draw.rect(surface, self.color, (i*dis+1,j*dis+1, dis-2, dis-2))
if eyes:
centre = dis//2
radius = 3
circleMiddle = (i*dis+centre-radius,j*dis+8)
circleMiddle2 = (i*dis + dis -radius*2, j*dis+8)
pygame.draw.circle(surface, (0,0,0), circleMiddle, radius)
pygame.draw.circle(surface, (0,0,0), circleMiddle2, radius)
class snake(object):
body = []
turns = {}
def __init__(self, color, pos):
self.color = color
self.head = cube(pos)
self.body.append(self.head)
self.dirnx = 0
self.dirny = 1
def move(self):
# for event in pygame.event.get():
# if event.type == pygame.QUIT:
# pygame.quit()
# keys = pygame.key.get_pressed()
# for key in keys:
# if keys[pygame.K_LEFT]:
# self.dirnx = -1
# self.dirny = 0
# self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
# elif keys[pygame.K_RIGHT]:
# self.dirnx = 1
# self.dirny = 0
# self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
# elif keys[pygame.K_UP]:
# self.dirnx = 0
# self.dirny = -1
# self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
# elif keys[pygame.K_DOWN]:
# self.dirnx = 0
# self.dirny = 1
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
for i, c in enumerate(self.body):
p = c.pos[:]
if p in self.turns:
turn = self.turns[p]
c.move(turn[0],turn[1])
if i == len(self.body)-1:
self.turns.pop(p)
else:
if c.dirnx == -1 and c.pos[0] <= 0: c.pos = (c.rows-1, c.pos[1])
elif c.dirnx == 1 and c.pos[0] >= c.rows-1: c.pos = (0,c.pos[1])
elif c.dirny == 1 and c.pos[1] >= c.rows-1: c.pos = (c.pos[0], 0)
elif c.dirny == -1 and c.pos[1] <= 0: c.pos = (c.pos[0],c.rows-1)
else: c.move(c.dirnx,c.dirny)
def move_via_dwave():
# 1: Embed Graph
# 2: Create Qubo
# 3: Pass Qubo to Dwave
# 4: Unpack qubo to graph
# 5:
pass
def get_snake_unconnected_graph(self):
# G = nx.Graph()
# for i in range(grid_size):
# for j in range(grid_size):
# G.add_node((i,j))
G = nx.grid_2d_graph(GRID_SIZE, GRID_SIZE, periodic =False)
nx.set_edge_attributes(G, NON_SNAKE_WEIGHTS, "weight" )
return G
def snake_to_graph(self):
G = s.get_snake_unconnected_graph()
prev_cube_pos = False
for cube in self.body:
print(f'snake body cube: {cube.pos}')
if prev_cube_pos:
G.add_edge(cube.pos, prev_cube_pos, weight=SNAKE_WEIGHTS)
prev_cube_pos = cube.pos
#nx.bipartite_layout(G,G.nodes())
#nx.draw(G)
#print(f'xs: {G.nodes()}')
return G
def graph_to_moves(self, path_graph):
num_moves = len(path_graph.edges)
moves = []
head_pos = self.body[0].pos
prev_head_poses = []
prev_head_poses.append(head_pos)
run_flag = True
print(f'all edges: {path_graph.edges()}')
for i in range(len(path_graph.edges)):
for edges in path_graph.edges(head_pos):
print(f'head_pos {head_pos}')
print(f'edges: {edges}')
for edge in edges:
print(f'loop edge: {edge}')
if edge not in prev_head_poses:
print("AS")
prev_head_poses.append(head_pos)
moves.append((head_pos[0]-edge[0], head_pos[1]-edge[1]))
head_pos = edge
break
print(moves)
# while run_flag:
# if len(path_graph.edges(head_pos)) == 1 and head_pos not in prev_head_poses:
# run_flag = False
# for edges in path_graph.edges(head_pos):
# for edge in edges:
# if edge not in prev_head_poses:
# print(f'len:{len(path_graph.edges(head_pos))} head: {head_pos} edge:{edge}')
# moves = (head_pos[0]-edge[0], head_pos[1]-edge[1])
# head_pos = edge
# break
# print(f'moves: {moves}')
def reset(self, pos):
self.head = cube(pos)
self.body = []
self.body.append(self.head)
self.turns = {}
self.dirnx = 0
self.dirny = 1
def addCube(self):
tail = self.body[-1]
dx, dy = tail.dirnx, tail.dirny
if dx == 1 and dy == 0:
self.body.append(cube((tail.pos[0]-1,tail.pos[1])))
elif dx == -1 and dy == 0:
self.body.append(cube((tail.pos[0]+1,tail.pos[1])))
elif dx == 0 and dy == 1:
self.body.append(cube((tail.pos[0],tail.pos[1]-1)))
elif dx == 0 and dy == -1:
self.body.append(cube((tail.pos[0],tail.pos[1]+1)))
self.body[-1].dirnx = dx
self.body[-1].dirny = dy
def draw(self, surface):
for i, c in enumerate(self.body):
if i ==0:
c.draw(surface, True)
else:
c.draw(surface)
def drawGrid(w, rows, surface):
sizeBtwn = w // rows
x = 0
y = 0
for l in range(rows):
x = x + sizeBtwn
y = y + sizeBtwn
# pygame.draw.line(surface, (000,000,000), (x,0),(x,w))
# pygame.draw.line(surface, (255,000), (0,y),(w,y))
def redrawWindow(surface):
global rows, width, s, snack
surface.fill((255,255,255))
s.draw(surface)
snack.draw(surface)
drawGrid(width,rows, surface)
pygame.display.update()
def randomSnack(rows, item):
positions = item.body
while True:
x = random.randrange(rows)
y = random.randrange(rows)
if len(list(filter(lambda z:z.pos == (x,y), positions))) > 0:
continue
else:
break
return (x,y)
def reset(self, pos):
self.head = cube(pos)
self.body = []
self.body.append(self.head)
self.turns = {}
self.dirnx = 0
self.dirny = 1
def addCube(self):
tail = self.body[-1]
dx, dy = tail.dirnx, tail.dirny
if dx == 1 and dy == 0:
self.body.append(cube((tail.pos[0]-1,tail.pos[1])))
elif dx == -1 and dy == 0:
self.body.append(cube((tail.pos[0]+1,tail.pos[1])))
elif dx == 0 and dy == 1:
self.body.append(cube((tail.pos[0],tail.pos[1]-1)))
elif dx == 0 and dy == -1:
self.body.append(cube((tail.pos[0],tail.pos[1]+1)))
self.body[-1].dirnx = dx
self.body[-1].dirny = dy
def draw(self, surface):
for i, c in enumerate(self.body):
if i ==0:
c.draw(surface, True)
else:
c.draw(surface)
def drawGrid(w, rows, surface):
sizeBtwn = w // rows
x = 0
y = 0
for l in range(rows):
x = x + sizeBtwn
y = y + sizeBtwn
# pygame.draw.line(surface, (000,000,000), (x,0),(x,w))
# pygame.draw.line(surface, (255,000), (0,y),(w,y))
def redrawWindow(surface):
global rows, width, s, snack
surface.fill((255,255,255))
s.draw(surface)
snack.draw(surface)
drawGrid(width,rows, surface)
pygame.display.update()
def randomSnack(rows, item):
positions = item.body
while True:
x = random.randrange(rows)
y = random.randrange(rows)
if len(list(filter(lambda z:z.pos == (x,y), positions))) > 0:
continue
else:
break
return (x,y)
def message_box(subject, content):
root = tk.Tk()
root.attributes("-topmost", True)
root.withdraw()
messagebox.showinfo(subject, content)
try:
root.destroy()
except:
pass
class PathSolver():
def __init__(self, graph, snake, apple_pos):
self.head = snake.body[0].pos
self.snake = snake
self.apple = apple_pos
self.graph = graph.copy()
self.vars = self.create_vars()
self.get_qubo()
self.run_dwave()
self.get_shortest_path_graph()
self.get_moves()
def get_shortest_path_graph(self):
graph = nx.grid_2d_graph(GRID_SIZE, GRID_SIZE, periodic =False)
self.path_edges = []
for edge in graph.edges():
try:
if self.sampleset.first.sample[f'{edge}']:
print(f'{edge} MADE IT')
self.path_edges.append(edge)
#nx.set_edge_attributes(graph, f'{edge}',
#print(f"we made it: {edge}")
except KeyError:
print(edge)
#print(f'No Key: {edge}')
def get_moves(self):
temp_head = self.head
i = 0
self.moves = []
while(i< len(self.path_edges)):
for path_edge in self.path_edges:
if path_edge[0] == temp_head:
self.moves.append((path_edge[1][0] - temp_head[0], path_edge[1][1] - temp_head[1]))
temp_head = path_edge[1]
i+=1
print(self.moves)
def run_dwave(self):
#sampler = DWaveSampler().sample_qubo(self.qubo)
print(len(self.qubo))
# Dwavesolver = EmbeddingComposite(DWaveSampler())
# sampleset = Dwavesolver.sample_qubo(self.qubo, num_reads=1000)
self.sampleset = ExactSolver().sample_qubo(self.qubo)
#print(f'SAMPLE: {self.sampleset.first}')
for k,v in self.sampleset.first.sample.items():
print(f'Key: {k}\t\t\t\t Val: {v}')
# Q.update(coupler_strengths)
# Sample once on a D-Wave system and print the returned sample
#response = DWaveSampler().sample_qubo(Q, num_reads=1)
#print(sampleset)
def get_qubo(self):
H= self.one_body_terms() + self.two_body_terms() + self.get_justin_trubo()
model = H.compile()
#print(model)
self.qubo, self.offset = model.to_qubo()
#print(f'QUBO:\n {self.qubo}')
def create_vars(self):
vars ={}
print('===========MAPPINGS==============')
for i, edge in enumerate(self.graph.edges.data()):
e=(edge[0],edge[1])
print(f'edge: {e} \t data: {edge[2]}')
label = f'({edge[0]}, {edge[1]})'
vars[e] = Binary(label)
return vars
def two_body_terms(self):
#twobody head terms
#Checked @ 10:04
H=0
H1=0
for head_edge_1 in self.head_edges:
for head_edge_2 in self.head_edges:
if head_edge_1 == head_edge_2:
H1 += LAMBDA*self.vars[head_edge_1]*self.vars[head_edge_2]
else:
H1 += LAMBDA*self.vars[head_edge_1]*self.vars[head_edge_2]
#apple twobody terms
# Checked @ 10:06
H2=0
for apple_edge_1 in self.apple_edges:
for apple_edge_2 in self.apple_edges:
if apple_edge_1 == apple_edge_2:
H2 +=GAMMA*self.vars[apple_edge_1]*self.vars[apple_edge_2]
else:
H2 += GAMMA*self.vars[apple_edge_1]*self.vars[apple_edge_2]
#make sure graph is connected
H3=0
for edge in self.graph.edges():
e=self.get_valid_key(edge)
for nodal_edge in self.graph.edges(e[0]):
n = self.get_valid_key(nodal_edge)
if e == n:
pass
else:
#print(f'EDGE: {e} OTHER EDGE: {n} FORMULA: {-1*MU*self.vars[n]*self.vars[e]}')
H3 -= MU*self.vars[n]*self.vars[e]
for nodal_edge in self.graph.edges(e[1]):
n = self.get_valid_key(nodal_edge)
if e == n:
pass
else:
#print(f'EDGE: {e} OTHER EDGE: {n} FORMULA: {-1*MU*self.vars[n]*self.vars[e]}')
H3 -= MU*self.vars[n]*self.vars[e]
#manby body terms
#Checked at 10:37
H4=0
for edge_1 in self.graph.edges():
e1 = self.get_valid_key(edge_1)
if e1 in self.apple_edges or e1 in self.head_edges:
pass
else:
for edge_2 in self.graph.edges():
e2 = self.get_valid_key(edge_2)
if e2 in self.apple_edges or e2 in self.head_edges or e2 in self.head_neck_edge:
pass
else:
#print(f'edge1: {e1} edge2 {e2} forumla {-2*CHI*self.vars[e1]*self.vars[e2]}')
H4 += -2*CHI*self.vars[e1]*self.vars[e2]
#print(H4)
H = H1+H2+H3+H4
return H
def get_justin_trubo(self):
#Checked @ 10:46
H=0
for edge_1 in self.graph.edges():
e1 = self.get_valid_key(edge_1)
if e1 in self.apple_edges or e1 in self.head_edges or e1 == self.head_neck_edge:
pass
else:
for edge_2 in self.graph.edges():
e2 = self.get_valid_key(edge_2)
if e2 in self.apple_edges or e2 in self.head_edges or e2 == self.head_neck_edge:
pass
else:
for edge_3 in self.graph.edges():
e3 = self.get_valid_key(edge_3)
if e3 in self.apple_edges or e3 in self.head_edges or e3 == self.head_neck_edge:
pass
else:
#print(f'Edge1: {e1}\tEdge2: {e2}\tEdge3: {e3}\tFormula: {CHI*self.vars[e1]*self.vars[e2]*self.vars[e3]}')
H += CHI*self.vars[e1]*self.vars[e2]*self.vars[e3]
#print(H)
return H
def get_node(self, tup):
if tup[0][0] == tup[1][0]:
return tup[0][0]
if tup[0][0] == tup[1][1]:
return tup[0][0]
if tup[0][1] == tup[1][0]:
return tup[0][1]
if tup[0][1] == tup[1][1]:
return tup[0][1]
def one_body_terms(self):
#Distance traversted term
H = 0
H1 = 0
#checked H1 @ 09:12
for edge in self.graph.edges.data():
dict_key = self.get_valid_key((edge[0],edge[1]))
#print(edge[2]["weight"])
H1 += edge[2]["weight"]*self.vars[dict_key]
#Links around the head term
#checked @ 09:37
H2=0
self.head_edges = []
head_neck_edge = self.get_valid_key((self.head, self.snake.body[1].pos))
self.head_neck_edge = head_neck_edge
# print(f'head_neck_edge: {head_neck_edge}')
for edge in self.graph.edges(self.head):
head_edge = self.get_valid_key(edge)
# print(f'head_edge: {head_edge}')
if not self.get_valid_key(head_edge) == self.get_valid_key(head_neck_edge):
H2 -= 2*LAMBDA*self.vars[head_edge]
self.head_edges.append(head_edge)
#Links around the apple term
#Checked at 09:38
H3=0
self.apple_edges=[]
for edge in self.graph.edges(self.apple):
apple_edge = self.get_valid_key(edge)
H3 -= 2*GAMMA*self.vars[apple_edge]
self.apple_edges.append(apple_edge)
#Links from 3body bulk term
# checked @ 09:55
H4=0
for edge in self.graph.edges():
e = self.get_valid_key(edge)
if e not in self.head_edges and edge not in self.apple_edges and not e==head_neck_edge:
H4 += 4*CHI*self.vars[self.get_valid_key(edge)]
H= H1 + H2 + H3 + H4
return H
def get_valid_key(self, key):
if key in self.vars:
return key
elif (key[1], key[0]) in self.vars:
return (key[1], key[0])
else:
raise "Awer"
def main():
global width, rows, s, snack, sampleset
width = 100
rows = 3
win = pygame.display.set_mode((width, width))
s = snake((0,255,0), (1,1)) #snake starts at rtupple
#snack = cube(randomSnack(rows, s), color=(255,0,0))
snack = cube(randomSnack(3, s), color=(255,0,0))
flag = True
s.addCube()
s.snake_to_graph()
H=s.get_snake_unconnected_graph()
H.add_node((10,10))
H.add_node((11,10))
H.add_node((11,11))
H.add_edge((10,10),(11,10))
H.add_edge((11,10),(11,11))
print(f'HYPERS \t LAMBDA: {LAMBDA}\t CHI: {CHI}\t MU: {MU}\t GAMMA: {GAMMA}')
ps=PathSolver(s.snake_to_graph(), s, snack.pos)
redrawWindow(win)
pygame.time.delay(10)
clock = pygame.time.Clock()
for move in ps.moves:
pygame.time.delay(10)
print(move)
s.dirnx=move[0]
s.dirny=move[1]
s.move()
redrawWindow(win)
# while flag:
# pygame.time.delay(50)
# clock.tick(10)
# s.move()
# if s.body[0].pos == snack.pos:
# s.addCube()
# snack = cube(randomSnack(rows, s), color=(255,0,0))
# for block in s.body:
# print(block.pos)
# for x in range(len(s.body)):
# if s.body[x].pos in list(map(lambda z:z.pos,s.body[x+1:])):
# print('Score: ', len(s.body))
# message_box('You Lost!', 'Play again...')
# s.reset((10,10))
# break
# redrawWindow(win)
# pass
main()
|
import unittest
from escherauth.escherauth import EscherRequest
class EscherRequestTest(unittest.TestCase):
def test_object_basic(self):
request = EscherRequest({
'method': 'GET',
'host': 'host.foo.com',
'uri': '/?foo=bar',
'headers': [
('Date', 'Mon, 09 Sep 2011 23:36:00 GMT'),
('Host', 'host.foo.com'),
],
})
self.assertEqual(request.method(), 'GET')
self.assertEqual(request.host(), 'host.foo.com')
self.assertEqual(request.path(), '/')
self.assertListEqual(request.query_parts(), [
('foo', 'bar'),
])
self.assertListEqual(request.headers(), [
('Date', 'Mon, 09 Sep 2011 23:36:00 GMT'),
('Host', 'host.foo.com'),
])
self.assertEqual(request.body(), '') # there was no body specified
def test_object_complex(self):
request = EscherRequest({
'method': 'POST',
'host': 'host.foo.com',
'uri': '/example/path/?foo=bar&abc=cba',
'headers': [],
'body': 'HELLO WORLD!',
})
self.assertEqual(request.method(), 'POST')
self.assertEqual(request.host(), 'host.foo.com')
self.assertEqual(request.path(), '/example/path/')
self.assertListEqual(request.query_parts(), [
('foo', 'bar'),
('abc', 'cba'),
])
self.assertListEqual(request.headers(), [])
self.assertEqual(request.body(), 'HELLO WORLD!')
def test_object_add_header(self):
request = EscherRequest({
'method': 'POST',
'host': 'host.foo.com',
'uri': '/example/path/?foo=bar&abc=cba',
'headers': [],
'body': 'HELLO WORLD!',
})
request.add_header('Foo', 'Bar')
self.assertListEqual(request.headers(), [('Foo', 'Bar')])
|
from os.path import dirname, abspath, join
PROJECT_PATH = join(dirname(abspath(__file__)), '..')
RESOURCE_PATH = join(PROJECT_PATH, 'sinling', 'resources')
|
# Copyright (c) 2020 fortiss GmbH
#
# Authors: Patrick Hart, Julian Bernhard, Klemens Esterle, and
# Tobias Kessler
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import unittest
import numpy as np
import time
from bark.runtime.commons.parameters import ParameterServer
from bark_ml.environments.blueprints import ContinuousHighwayBlueprint, \
ContinuousSingleLaneBlueprint
from bark_ml.environments.single_agent_runtime import SingleAgentRuntime
from bark_ml.evaluators.evaluator_configs import GoalReached
from bark_ml.evaluators.general_evaluator import GeneralEvaluator
from bark_ml.core.evaluators import GoalReachedEvaluator
class PyEvaluatorTests(unittest.TestCase):
def test_goal_reached_evaluator(self):
params = ParameterServer()
bp = ContinuousHighwayBlueprint(params)
env = SingleAgentRuntime(blueprint=bp, render=True)
env.reset()
world = env._world
eval_id = env._scenario._eval_agent_ids[0]
observed_world = world.Observe([eval_id])[0]
evaluator = GoalReached(params)
action = np.array([0., 0.], dtype=np.float32)
start_time = time.time()
print(evaluator.Evaluate(observed_world, action))
end_time = time.time()
print(f"It took {end_time-start_time} seconds.")
def test_goal_reached_cpp_evaluator(self):
params = ParameterServer()
bp = ContinuousHighwayBlueprint(params)
env = SingleAgentRuntime(blueprint=bp, render=True)
env.reset()
world = env._world
eval_id = env._scenario._eval_agent_ids[0]
observed_world = world.Observe([eval_id])[0]
evaluator = GoalReachedEvaluator(params)
action = np.array([0., 0.], dtype=np.float32)
start_time = time.time()
print(evaluator.Evaluate(observed_world, action))
end_time = time.time()
print(f"The goal reached took {end_time-start_time} seconds.")
def test_reward_shaping_evaluator(self):
params = ParameterServer()
bp = ContinuousHighwayBlueprint(params)
env = SingleAgentRuntime(blueprint=bp, render=True)
env.reset()
world = env._world
eval_id = env._scenario._eval_agent_ids[0]
observed_world = world.Observe([eval_id])[0]
evaluator = GoalReachedEvaluator(params)
action = np.array([0., 0.], dtype=np.float32)
start_time = time.time()
print(evaluator.Evaluate(observed_world, action))
end_time = time.time()
print(f"The reward shaping evaluator took {end_time-start_time} seconds.")
def test_general_evaluator(self):
params = ParameterServer()
bp = ContinuousSingleLaneBlueprint(params)
env = SingleAgentRuntime(blueprint=bp, render=True)
evaluator = GeneralEvaluator(params)
env._evaluator = evaluator
env.reset()
for _ in range(0, 4):
state, terminal, reward, info = env.step(np.array([0., 0.]))
print(terminal, reward)
if __name__ == '__main__':
unittest.main() |
def inputli():
return list(map(int,input().split()))
def inputls():
return input().split()
def inputlf():
return list(map(float,input().split()))
def lcs(a,b):
dp = [[0]*(len(b)+1) for i in range(len(a)+1)]
maxi = 0
for i in range(1,len(a)+1):
for j in range(1,len(b)+1):
if a[i-1] == b[j-1]:
dp[i][j] = dp[i-1][j-1]+1
else:
dp[i][j] = max(dp[i-1][j],dp[i][j-1])
maxi = max(maxi,dp[i][j])
return maxi
def dist(a,b):
dp = [[0]*(len(b)+1) for i in range(len(a)+1)]
for i in range(1, len(a) + 1):
dp[i][0] = i
for j in range(1, len(b) + 1):
dp[0][j] = j
mini = 0
for i in range(1,len(a)+1):
for j in range(1,len(b)+1):
if a[i-1] == b[j-1]:
cost = 0#dp[i][j] = dp[i-1][j-1]+1
else:
cost = 1
dp[i][j] = min(dp[i-1][j]+1,
dp[i][j-1]+1,dp[i-1][j-1]+cost)
mini = min(mini,dp[i][j])
return dp[len(a)][len(b)]
a = input()
b = input()
#cnt = lcs(a,b)
#print(cnt,len(a)-cnt)
#print(len(a)-cnt)
ans = dist(a,b)
print(ans)
|
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
def get_estimator():
categorical_cols = ['Sex', 'Pclass', 'Embarked']
numerical_cols = ['Age', 'SibSp', 'Parch', 'Fare']
preprocessor = make_column_transformer(
(OneHotEncoder(handle_unknown='ignore'), categorical_cols),
(SimpleImputer(strategy='constant', fill_value=-1), numerical_cols),
)
pipeline = Pipeline([
('transformer', preprocessor),
('classifier', LogisticRegression()),
])
return pipeline
|
import time, random, sys
import urllib, urllib2
import mechanize
from bs4 import BeautifulSoup
"""TODO: do method for getting neopoints
do auto food club betting/collecting
refactor/clean up code
"""
b = mechanize.Browser()
def init():
b.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36')]
def login():
print "Logging in"
b.open("http://www.neopets.com/login/")
username=sys.argv[1]
password=sys.argv[2]
b.select_form(nr=0)
b.form["username"] = username
b.form["password"] = password
b.submit()
b.open('http://www.neopets.com/inventory.phtml')
assert b.title() == 'Neopets - Inventory'
def randomize():
print "Calling randomize to avoid detection"
list = ['http://www.neopets.com/explore.phtml',
'http://www.neopets.com/island/tradingpost.phtml',
'http://www.neopets.com/games/',
'http://www.neopets.com/objects.phtml',
'http://www.neopets.com/market_bazaar.phtml',
'http://www.neopets.com/market.phtml?type=wizard',
'http://www.neopets.com/battledome/battledome.phtml',
'http://www.neopets.com/market.phtml?type=your']
time.sleep(random.uniform(0,3))
b.open(list[random.randint(0,len(list)-1)])
time.sleep(random.uniform(0,1))
def buyStock():
print "Buying stocks"
priceIndex = 5
tickerIndex = 1
numToBuy = 1000
b.open("http://www.neopets.com/stockmarket.phtml?type=list&bargain=true")
soup = BeautifulSoup(b.response().read())
table = soup.find('table',align='center')
rows = table.find_all('tr')
for row in rows:
price = row.find_all('td')[priceIndex].get_text()
ticker = row.find_all('td')[tickerIndex].get_text()
#print ticker, 'is trading at ', price
if not price.isdigit() or int(price) < 15:
continue
else:
break
b.open("http://www.neopets.com/stockmarket.phtml?type=buy")
b.select_form(nr=1)
b.form["ticker_symbol"] = ticker
b.form["amount_shares"] = str(numToBuy)
respose = b.submit()
print "Bought",numToBuy,"shares of",ticker,"for",price,"each,total cost:",int(price)*numToBuy
#currently does no verification, maybe will do soon
def oneclickdailies():
print "Running dailies"
snowager()
#adventcalendar()
buriedtreasure()
randomize()
tombola()
randomize()
shrine()
randomize()
fruitmachine()
randomize()
fishingvortex()
randomize()
omelette()
randomize()
jelly()
randomize()
bankinterest()
foodclubodds()
monthlyfreebies()
def buriedtreasure():
print "Running buried treasure"
x = random.randint(1,475)
y = random.randint(1,475)
url = "http://www.neopets.com/pirates/buriedtreasure/buriedtreasure.phtml?"+str(x)+","+str(y)
print "Coordinates are",x, "and",y
b.open(url)
if "won" in b.response().read():
print "WINNER IN BURIED TREASURE"
else:
print "Not a winner in buried treasure"
def tombola():
print "Running Tombola"
b.open("http://www.neopets.com/island/tombola.phtml")
b.select_form(nr=1)
b.submit()
if "winner!" in b.response().read():
print "WINNING TICKET IN TOMBOLA"
else:
print "Not a winning ticket in Tombola"
def shrine():
print "Running Shrine"
b.open("http://www.neopets.com/desert/shrine.phtml")
b.select_form(nr=1)
b.submit()
if ("Nothing" or "nothing") in b.response().read():
print "Nothing happens for Shrine"
else:
print "WINNER IN SHRINE"
def fruitmachine():
print "Running Fruitmachine"
#not sure if this works, since it's flash based
b.open("http://www.neopets.com/desert/fruit/index.phtml")
b.select_form(nr=1)
b.submit()
def fishingvortex():
print "Running fishing vortex"
b.open("http://www.neopets.com/water/fishing.phtml")
b.select_form(nr=1)
b.submit()
def omelette():
print "Running Omelette"
#if the omelette or jelly doesn't exist, then it just goes back to the map
#because that is the second form
b.open("http://www.neopets.com/prehistoric/omelette.phtml")
try:
b.select_form(nr=1)
b.submit()
except Exception:
print "Some error has occurred!"
def jelly():
print "Running jelly"
b.open("http://www.neopets.com/jelly/jelly.phtml")
try:
b.select_form(nr=1)
b.submit()
except Exception:
print "Some error has occurred!"
def monthlyfreebies():
#mothly only
print "Running Monthly freebies"
b.open("http://www.neopets.com/freebies/index.phtml")
def bankinterest():
print "Running Bank interest"
b.open("http://www.neopets.com/bank.phtml")
try:
b.select_form(nr=3)
b.submit()
except Exception:
print "Some error has occurred!"
def snowager():
print "Running Snowager"
b.open("http://www.neopets.com/winter/snowager.phtml")
try:
b.select_form(nr=0)
b.submit()
except Exception:
print "Some error has occurred!"
def adventcalendar():
print "Running AdventCalendar"
b.open("http://www.neopets.com/winter/adventcalendar.phtml")
try:
b.select_form(nr=1)
b.submit()
except Exception:
print "Some error has occurred!"
def getpetstats(name):
#pretty bad implementation, but it does the job, may cause issues later on
b.open("http://www.neopets.com/island/training.phtml?type=status")
soup = BeautifulSoup(b.response().read())
table = soup.find('table',align='center')
rows = table.find_all('tr')
petexists = False
stats = {}
for row in rows:
if petexists:
list = row.find('td').get_text().split(":")
for i in list:
if i[1].isdigit():
if len(stats) == 0:
stats['Lvl'] = i[:-4].strip()
elif len(stats) == 1:
stats['Str'] = i[:-4].strip()
elif len(stats) == 2:
stats['Def'] = i[:-4].strip()
elif len(stats) == 3:
stats['Mov'] = i[:-4].strip()
elif len(stats) == 4:
stats['Hp'] = i.split("/")[1].strip()
break
if name in row.get_text():
petexists = True
return stats
def enrolltrainingschool(petname, coursetype):
b.open("http://www.neopets.com/island/training.phtml?type=courses")
b.select_form(nr=1)
b.form['course_type'] = [coursetype]
b.form['pet_name'] = [petname]
b.submit()
def paytrainingschool(petname):
print "Paying training school"
#assumes we have the req'd codestones
#does nothing if in training
url = "http://www.neopets.com/island/process_training.phtml?type=pay&pet_name=" + petname
b.open(url)
def completetraining():
print "Completing training course"
b.open("http://www.neopets.com/island/training.phtml?type=status")
try:
b.select_form(nr=1)
b.submit()
except Exception:
print "Some error has occurred!"
def trainpet(petname):
#currently we are indifferent to which stat we train, as long as it's str, def, or hp, lvl has lowest priority
#we need to first determine the stat to train
#currently, we'll just pick any that is less than half our level
#complete our training first, this may fail if we train more than one pet at a time
stats = getpetstats(petname)
print "Before completing training", petname, "has the following stats:"
print stats
completetraining()
stats = getpetstats(petname)
print "After completing training", petname, "has the following stats:"
print stats
level = int(stats['Lvl'])
strength = int(stats['Str'])
defense = int(stats['Def'])
movement = int(stats['Mov'])
hp = int(stats['Hp'])
if strength / 2 >= level or defense / 2 >= level or hp / 2 >= level or movement / 2 >= level:
## if (strength / 2 or defense / 2 or hp /2 or movement /2) >= level:
coursetype = "Level"
elif strength == min(strength,defense,hp):
coursetype = "Strength"
elif defense == min(strength,defense,hp):
coursetype = "Defence"
elif hp == min(strength,defense,hp):
coursetype = "Endurance"
print 'Enrolling', petname, 'in', coursetype
enrolltrainingschool(petname, coursetype)
paytrainingschool(petname)
#wait, then complete course
def zappet(petname):
print "Zapping pet:", petname
stats = getpetstats(petname)
print "Before zapping pet", petname, "has the following stats:"
print stats
b.open("http://www.neopets.com/lab2.phtml")
try:
b.select_form(nr=1)
b.form.set_value([petname], name='chosen')
b.submit()
stats = getpetstats(petname)
print "After zapping pet", petname, "has the following stats:"
print stats
print "Done zapping pet:", petname
except Exception:
print "Some error has occurred!"
def doubleornothing():
b.open('http://www.neopets.com/medieval/doubleornothing.phtml')
def foodclubodds():
print "Calculating today's food club odds"
#calculate the food club oddsd to see if there are any arbitrage situations
b.open("http://www.neopets.com/pirates/foodclub.phtml?type=bet")
soup = BeautifulSoup(b.response().read())
betform = soup.find('form', action='process_foodclub.phtml')
scripttags = betform.find_all("script")
dict = {}
listcounter = 0
paircounter = 0
#list counter is the index of the list, there are 5 of them
#pair counter is the index of the contestants, there are 4 of them per list
#as it curerntly stands, 50 is the max bet amount
for tag in scripttags:
for pair in tag.get_text().strip().split('\n'):
cell = pair.split('=')
index = str(listcounter) + str(paircounter)
if listcounter == 5:
dict[index] = (cell[1]).strip()
else:
dict[index] = (cell[1])[:-1].strip()
paircounter+=1
listcounter+=1
paircounter=0
oddsum = 0.0
for i in range(0,6):
if i == 5:
print 'Max bet amount is:', dict['50']
break
for j in range(0,4):
oddsum+= 1.0/int(dict[str(i)+str(j)])
print 'List %s has cumulative odds: %s' %(i,oddsum)
oddsum=0.0
print 'Done processing food club odds'
#should save the previous url as this will go to the inventory
def getNPs():
b.open('http://www.neopets.com/inventory.phtml')
soup = BeautifulSoup(b.response().read())
# print soup.find(id="npanchor").string
return int(soup.find(id="npanchor").string.replace(",",""))
def main():
print 'Welcome to the Neopets Runner!'
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
if len(sys.argv) == 4 or len(sys.argv) == 5:
init()
login()
print "You have", getNPs(), "NPs"
if len(sys.argv) == 4:
#a for all, d for dailies only, b for buy stock
#format python neopetrunner.py [a|d|b] or any combo
if 'a' in sys.argv[3]:
oneclickdailies()
buyStock()
foodclubodds()
else:
if 'd' in sys.argv[3]:
oneclickdailies()
if 'b' in sys.argv[3]:
buyStock()
if 'fc' in sys.argv[3]:
foodclubodds()
else:
print "Please use proper parameters!"
#options at the moment, only trainpet, format: python neopetrunner.py train <petName>
elif len(sys.argv) == 5:
assert sys.argv[3] == 'train' or 'all' or 'lab'
# assert sys.argv[4] == 'X_Avon_X' #let's stick with this for now
if sys.argv[3].lower() == 'train':
trainpet(sys.argv[4])
elif sys.argv[3].lower() == 'lab':
zappet(sys.argv[4])
elif sys.argv[3].lower() == 'all':
oneclickdailies()
buyStock()
zappet(sys.argv[4])
trainpet(sys.argv[4])
print "You have", getNPs(), "NPs"
else:
print "Please use proper parameters!"
print 'Done'
def test():
init()
login()
b.open('http://www.neopets.com/inventory.phtml')
# print b.response().read()
print "you have", getNPs(), "NPs"
if __name__ == '__main__':
main()
# test()
|
"""
This file contains a class that represents a game item.
Author: Alejandro Mujica (aledrums@gmail.com)
Date: 07/22/20
"""
from src.game_object import GameObject
class GameItem(GameObject):
def __init__(self, collidable, consumable,
on_collide=None, on_consume=None,
*args, **kwargs):
super(GameItem, self).__init__(*args, **kwargs)
self.collidable = collidable
self.consumable = consumable
self._on_collide = on_collide
self._on_consume = on_consume
self.in_play = True
def respawn(self, x=None, y=None):
if x is not None:
self.x = x
if y is not None:
self.y = y
self.in_play = True
def on_collide(self, another):
if not self.collidable or self._on_collide is None:
return None
return self._on_collide(self, another)
def on_consume(self, consumer):
if not self.consumable:
return None
self.in_play = False
return (None if self._on_consume is None
else self._on_consume(self, consumer))
|
#-----------------------------------------------------------------------------
#
# Paper: When Crowdsourcing Fails: A Study of Expertise on Crowdsourced
# Design Evaluation
# Author: Alex Burnap - aburnap@umich.edu
# Date: October 10, 2014
# License: Apache v2
# Description: Class definitions for crowdsourced evaluation process
#
#-----------------------------------------------------------------------------
import numpy as np
import scipy.stats as stats
from sklearn.mixture import GMM
#---------------------------------------------------------------------------------------------
class Participant(object):
"""
Each participant has attributes ability, GOING TO ADD BIAS LATER
"""
def __init__(self, true_ability=None):
self.true_ability = true_ability
#---------------------------------------------------------------------------------------------
class Design(object):
"""
Each Design has attributes- true_criteria_score , true_evaluation_difficulty
"""
def __init__(self,true_criteria_score, true_evaluation_difficulty):
self.true_criteria_score = true_criteria_score
self.true_evaluation_difficulty= true_evaluation_difficulty
#---------------------------------------------------------------------------------------------
class Environment(object):
"""
This Environment contains participants, designs, and evaluations.
Takes arguements num_participants, num_designs, crowd_composition
"""
def __init__(self, cp, dp, ep):
self.cp = cp
self.dp = dp
self.ep = ep
self.num_participants = cp['num_participants']
self.num_designs = dp['num_designs']
self.num_subcriteria = dp['num_subcriteria']
self.participants = self.create_participants()
self.designs = self.create_designs()
self.evaluations_matrix = np.zeros([cp['num_participants'], dp['num_designs']])
self.temp_3D_array = np.zeros([self.ep['num_queries_per_participant'],self.num_participants, self.num_designs])
def create_participants(self):
ability_vector = self.create_true_participant_abilities()
return [Participant(ability_vector[i]) for i in xrange(self.num_participants)]
def create_designs(self):
criteria_score_vector = self.create_true_design_criteria_scores()
evaluation_difficulty_vector = self.create_true_design_evaluation_difficulties()
return [Design(criteria_score_vector[i], evaluation_difficulty_vector[i]) for i in xrange(self.num_designs)]
#---------------------------------------------------------------------------------------------
def create_true_participant_abilities(self):
"""
This function returns a vector of (currently scalar) abilities given 1 of 3 types of crowd makeup.
Arguements are a string crowd_makeup = 'mixture' or 'homogenous' or 'random'
If 'homogeneous', then a 2-tuple of the mean ability and its variance.
If 'random', no additional tuple is required.
If 'mixture', then a 6-tuple of the low and high ability value means, their variances, their mixing coefficients is required
"""
if self.cp['crowd_makeup'] == 'homogeneous':
# return np.random.normal(self.cp['homo_mean'], self.cp['homo_std_dev'], self.cp['num_participants']).clip(0,1)
return stats.truncnorm(-self.cp['homo_mean']/self.cp['homo_std_dev'], (1-self.cp['homo_mean'])/self.cp['homo_std_dev'], self.cp['homo_mean'], self.cp['homo_std_dev']).rvs(size=self.cp['num_participants'])
elif self.cp['crowd_makeup'] == 'random':
return np.random.uniform(0,1,self.cp['num_participants'])
elif self.cp['crowd_makeup'] == 'mixture':
gmm = GMM(2, n_iter=1)
gmm.means_ = np.array([ [self.cp['mixture_means'][0]], [self.cp['mixture_means'][1]]])
gmm.covars_ = np.array([ [self.cp['mixture_std_dev'][0]], [self.cp['mixture_std_dev'][1]] ]) ** 2
gmm.weights_ = np.array([self.cp['mixture_coefficients'][0], self.cp['mixture_coefficients'][1]])
packed = gmm.sample(self.cp['num_participants']).clip(0,1)
return [packed[i][0] for i in xrange(len(packed))]
#---------------------------------------------------------------------------------------------
def create_true_design_criteria_scores(self):
true_criteria_scores=np.zeros(self.num_designs)
if self.dp['true_design_criteria_score_makeup'] == 'random':
for i in xrange(self.num_designs):
true_criteria_scores[i] = np.random.random()
return true_criteria_scores
def create_true_design_evaluation_difficulties(self):
"""
This functions returns a vector of (currently scalar) design evaluation difficulties
"""
true_evaluation_difficulties=np.zeros(self.num_designs)
if self.dp['true_design_evaluation_difficulty_makeup'] == 'same':
for i in xrange(self.num_designs):
true_evaluation_difficulties[i] = self.dp['true_design_evaluation_difficulty_score']
return true_evaluation_difficulties
#---------------------------------------------------------------------------------------------
def run_evaluations(self):
"""
This function runs the number of queries per participant on each participant in entire crowd.
Each query has num_designs_per_query shown to the participant.
We do not model the effect of information overload with too many designs, or too few either.
It is an incredibly inefficient function right now, optimize in future if it slows things down too much.
"""
temp_3D_array = np.zeros([self.ep['num_queries_per_participant'],self.num_participants, self.num_designs])
for p_ind, participant in enumerate(self.participants):
for q_ind in xrange(self.ep['num_queries_per_participant']):
for d_ind in self.random_indices():
temp_3D_array[q_ind,p_ind,d_ind] = self.evaluate(participant, self.designs[d_ind])
for i in xrange(self.num_participants):
for j in xrange(self.num_designs):
self.evaluations_matrix[i,j] = temp_3D_array[:,i,j].sum()/max(1,len(*np.nonzero(temp_3D_array[:,i,j])))
self.temp_3D_array = temp_3D_array
"""
[a[:,i].sum() for i in range(10)]
for i in range(2):
for j in range(3):
print a[:,i,j].sum()
"""
def evaluate(self, participant, design):
"""
Function returns a single evaluation given a participant and a design
"""
t = design.true_criteria_score
a = participant.true_ability
d = design.true_evaluation_difficulty
b = self.ep['interface_difficulty']
s = self.ep['logistic_scale']
error_sigma = 1.0 - stats.logistic.cdf(a-d, b, s)
evaluation_0_1 = stats.truncnorm(-t/error_sigma, (1-t)/error_sigma, t, error_sigma).rvs(size=1)[0]
evaluation_1_5 = evaluation_0_1 * 4 + 1
return evaluation_1_5
def make_error_data(self, participant):
"""
This function is only for diagnostics purposes
"""
t = .7
a = participant.true_ability
d = .5
b = self.ep['interface_difficulty']
s = self.ep['logistic_scale']
error_sigma = 1.0 - stats.logistic.cdf(a-.5, b, s)
return error_sigma, stats.truncnorm(-t/error_sigma, (1-t)/error_sigma, t, error_sigma).rvs(size=1000)
def random_indices(self):
"""
Returns a vector of random indices without replacement. Crazy this has to be done by hand...
"""
random_indices = []
while len(random_indices) < self.ep['num_designs_per_query']:
random_index = np.random.random_integers(0, self.num_designs - 1)
if random_index not in random_indices:
random_indices.append(random_index)
return random_indices
|
#Assignment 14
#
#Total Sales
#
#Design a program that asks the user to enter a store's sales for each
#day of the week and display the result.
#use lists for storing
|
'''
Function:
快递查询系统
Author:
Car
微信公众号:
Car的皮皮
'''
import os
import time
import pickle
import random
import requests
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5 import QtWidgets, QtGui
'''快递查询系统'''
class InquiryExpress(QWidget):
tool_name = '快递查询系统'
def __init__(self, parent=None, title='快递查询系统 —— Car的皮皮', **kwargs):
super(InquiryExpress, self).__init__(parent)
rootdir = os.path.split(os.path.abspath(__file__))[0]
self.companies = pickle.load(
open(os.path.join(rootdir, 'resources/companies.pkl'), 'rb')
)
self.setWindowTitle(title)
self.setWindowIcon(QIcon(os.path.join(rootdir, 'resources/icon.jpg')))
self.label1 = QLabel('快递单号:')
self.line_edit = QLineEdit()
self.label2 = QLabel('查询结果:')
self.text = QTextEdit()
self.button = QPushButton()
self.button.setText('查询')
self.grid = QGridLayout()
self.grid.addWidget(self.label1, 1, 0)
self.grid.addWidget(self.line_edit, 1, 1, 1, 39)
self.grid.addWidget(self.button, 1, 40)
self.grid.addWidget(self.label2, 2, 0)
self.grid.addWidget(self.text, 2, 1, 1, 40)
self.setLayout(self.grid)
self.setFixedSize(600, 400)
self.button.clicked.connect(self.inquiry)
'''查询'''
def inquiry(self):
number = self.line_edit.text()
try:
infos = self.getExpressInfo(number)
if not infos:
infos = ['-' * 40 + '\n' + '单号不存在或已过期\n' + '-' * 40 + '\n']
except:
infos = ['-' * 40 + '\n' + '快递单号有误, 请重新输入.\n' + '-' * 40 + '\n']
self.text.setText('\n\n\n'.join(infos)[:-1])
'''利用快递100查询快递'''
def getExpressInfo(self, number):
session = requests.Session()
# 获得快递公司信息
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}
infos = []
express_info = session.get(
f'http://www.kuaidi100.com/autonumber/autoComNum?resultv2=1&text={number}',
headers=headers,
).json()['auto'][0]
# 查询快递
url = 'https://m.kuaidi100.com/query'
data = {
'postid': number,
'id': '1',
'valicode': '',
'temp': str(random.random()),
'type': express_info['comCode'],
'phone': '',
'token': '',
'platform': 'MWWW',
}
response = session.get('https://m.kuaidi100.com/result.jsp')
cookie = requests.utils.dict_from_cookiejar(response.cookies)
headers['Cookie'] = f"csrftoken={cookie['csrftoken']}; WWWID={cookie['WWWID']}"
response_json = session.post(url, headers=headers, data=data).json()
express_data, infos = response_json['data'], []
if ('name' in express_info) and express_info['name']:
info = '公司: %s\n' % express_info['name']
else:
info = '公司: %s\n' % self.py2hz(company_name)
for idx, item in enumerate(express_data):
if idx == 0:
info += (
'-' * 40
+ '\n时间:\n'
+ item['time']
+ '\n进度:\n'
+ item['context']
+ '\n'
+ '-' * 40
+ '\n'
)
else:
info += (
'时间:\n'
+ item['time']
+ '\n进度:\n'
+ item['context']
+ '\n'
+ '-' * 40
+ '\n'
)
if not express_data:
info += '-' * 40 + '\n' + '单号不存在或已过期\n' + '-' * 40 + '\n'
infos.append(info)
return infos
'''将快递公司的拼音变为汉字'''
def py2hz(self, py):
return self.companies.get(py)
|
import unittest
import numpy as np
from plate_analysis import is_connected
from plate_analysis import connectivity_hinges
from plate_analysis import dijkstra
from plate_analysis import connectivity_penalization
class TestConnectivityChecker(unittest.TestCase):
t2 = np.array([[0, 0, 0, 1, 1, 0],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 0]])
def test_all(self):
t1 = np.array([[0, 0, 0, 1, 1, 0],
[1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 0]])
self.assertTrue(is_connected(t1) == True)
self.assertTrue(is_connected(self.t2) == False)
nh = connectivity_hinges(t1)
self.assertTrue(nh == 6)
def test_dijkstra(self):
e2 = np.array([[0, 1, 2, 1, 1, 2],
[1, 1, 1, 1, 2, 3],
[2, 2, 2, 2, 3, 4],
[3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 3, 4]])
distances = dijkstra(self.t2, (0, 0))
self.assertTrue(np.all(distances == e2))
ni, dm = connectivity_penalization(self.t2)
self.assertTrue(ni == 1)
self.assertTrue(dm == 2)
if __name__ == '__main__':
unittest.main()
|
from .l import l
def func(name, body, kernel=None):
f = l(body, kernel=kernel)
kernel.define(name, f)
return f
|
import time
import random
import discord
from discord.ext import commands
import subsys.R5MemCalls as mem
import subsys.R5SoundCalls as snd
from utility.R5config import DIS_KEY
print("Vwooooo!")
R5 = commands.Bot(command_prefix='R5:')
#player resources
rules = "Edge of the Empire sourcebook: https://drive.google.com/file/d/1etmm_GumqWdRdmlRZBA2hEGKR2sadT-G/view?usp=sharing"
diceApp = "Android App for game dice: https://play.google.com/store/apps/details?id=com.visttux.empireedgediceroller&hl=en_US&gl=US"
videoGuide = "Quick Video Tutorial :https://www.youtube.com/watch?v=Ht6x47NhgG8"
fullResource = rules + "\n~~~~~~~~~~~~~~~~~~~~\n" + diceApp + "\n~~~~~~~~~~~~~~~~~~~~\n" + videoGuide
@R5.command('planets', brief="List planets/locations that have been visited")
async def planets(ctx):
await ctx.send(mem.getPlanetList())
@R5.command('planet', brief="List current planet (under construction)")
async def planet(ctx):
await ctx.send(mem.getCurrentPlanet())
@R5.command('speak', brief="*R5 plays a random sound")
async def speak(ctx):
R5.voice_clients[0].play(discord.FFmpegPCMAudio(snd.returnRandomSound()))
@R5.command('obligation', brief="R5 rolls a d100 to determine the obligation.")
async def obligation(ctx):
try:
if not R5.voice_clients[0].is_playing():
R5.voice_clients[0].play(discord.FFmpegPCMAudio(snd.returnRandomSound()))
await ctx.send(random.randint(1, 100))
else:
await ctx.send(random.randint(1, 100))
except discord.ext.commands.CommandInvokeError:
await ctx.send(random.randint(1, 100))
except AttributeError:
await ctx.send(random.randint(1, 100))
except IndexError:
await ctx.send(random.randint(1, 100))
@R5.command('join', brief="Joins R5 to the voice channel. * means voice only!")
async def join(ctx):
vc = ctx.message.author.voice.channel
try:
await vc.connect()
voice = discord.utils.get(R5.voice_clients, guild=ctx.guild)
voice.play(discord.FFmpegPCMAudio(snd.returnChannelSound()))
except discord.ClientException:
voice = discord.utils.get(R5.voice_clients, guild=ctx.guild)
await ctx.send("BEEEEP!")
voice.play(discord.FFmpegPCMAudio(RES_DIR + err))
@R5.command('leave', brief="*Kicks R5 from the voice channel.")
async def leave(ctx):
try:
R5.voice_clients[0].stop()
R5.voice_clients[0].play(discord.FFmpegPCMAudio(snd.returnChannelSound()))
print("snooze")
time.sleep(4)
await R5.voice_clients[0].disconnect()
except discord.ClientException:
await ctx.send("BEEEEP!")
except IndexError:
await ctx.send("BEEEEP!")
finally:
print("Sound played")
@R5.command('play', brief="*Plays a song that R5 has in his memory banks")
async def play(ctx, song: str):
voice = discord.utils.get(R5.voice_clients, guild=ctx.guild)
voice.play(discord.FFmpegPCMAudio(song))
@R5.command('stop', brief="*Stops R5 from playing any music/sounds.")
async def stop(ctx):
voice = discord.utils.get(R5.voice_clients, guild=ctx.guild)
voice.stop()
@R5.command('query', brief='Search databank for information about a subject. Encase in ""')
async def query(ctx, arg: str):
userquery = arg.replace('"', '').replace(" ", "_")
try:
if not R5.voice_clients[0].is_playing:
R5.voice_clients[0].play(discord.FFmpegPCMAudio(snd.returnRandomSound()))
await ctx.message.author.send("https://starwars.fandom.com/wiki/" + userquery)
else:
await ctx.message.author.send("https://starwars.fandom.com/wiki/" + userquery)
except discord.ext.commands.CommandInvokeError:
await ctx.message.author.send("https://starwars.fandom.com/wiki/" + userquery)
except AttributeError:
await ctx.message.author.send("https://starwars.fandom.com/wiki/" + userquery)
except IndexError:
await ctx.message.author.send("https://starwars.fandom.com/wiki/" + userquery)
@R5.command('atmos', brief='*Plays atmospheric sounds. R5:help atmos to get list of sounds',
description="List of sound options will be here")
async def atmos(ctx, arg: str):
try:
R5.voice_clients[0].play(discord.FFmpegPCMAudio(snd.returnAtmosphericSound(arg)))
except IndexError:
await ctx.send('BEEEEEP!')
@R5.command('resources', brief='Provides a list of player resources')
async def resources(ctx):
try:
if not R5.voice_clients[0].is_playing():
R5.voice_clients[0].play(discord.FFmpegPCMAudio(snd.returnRandomSound()))
await ctx.message.author.send(fullResource)
else:
await ctx.message.author.send(fullResource)
except discord.ext.commands.CommandInvokeError:
await ctx.message.author.send(fullResource)
except AttributeError:
await ctx.message.author.send(fullResource)
except IndexError:
await ctx.message.author.send(fullResource)
R5.run(DIS_KEY)
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""LinkTree class for setting up trees of symbolic links."""
from __future__ import print_function
import filecmp
import os
import shutil
from collections import OrderedDict
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp, touch, traverse_tree
from llnl.util.symlink import islink, symlink
__all__ = ['LinkTree']
empty_file_name = '.spack-empty'
def remove_link(src, dest):
if not islink(dest):
raise ValueError("%s is not a link tree!" % dest)
# remove if dest is a hardlink/symlink to src; this will only
# be false if two packages are merged into a prefix and have a
# conflicting file
if filecmp.cmp(src, dest, shallow=True):
os.remove(dest)
class MergeConflict:
"""
The invariant here is that src_a and src_b are both mapped
to dst:
project(src_a) == project(src_b) == dst
"""
def __init__(self, dst, src_a=None, src_b=None):
self.dst = dst
self.src_a = src_a
self.src_b = src_b
class SourceMergeVisitor(object):
"""
Visitor that produces actions:
- An ordered list of directories to create in dst
- A list of files to link in dst
- A list of merge conflicts in dst/
"""
def __init__(self, ignore=None):
self.ignore = ignore if ignore is not None else lambda f: False
# When mapping <src root> to <dst root>/<projection>, we need
# to prepend the <projection> bit to the relative path in the
# destination dir.
self.projection = ''
# When a file blocks another file, the conflict can sometimes
# be resolved / ignored (e.g. <prefix>/LICENSE or
# or <site-packages>/<namespace>/__init__.py conflicts can be
# ignored).
self.file_conflicts = []
# When we have to create a dir where a file is, or a file
# where a dir is, we have fatal errors, listed here.
self.fatal_conflicts = []
# What directories we have to make; this is an ordered set,
# so that we have a fast lookup and can run mkdir in order.
self.directories = OrderedDict()
# Files to link. Maps dst_rel to (src_rel, src_root)
self.files = OrderedDict()
def before_visit_dir(self, root, rel_path, depth):
"""
Register a directory if dst / rel_path is not blocked by a file or ignored.
"""
proj_rel_path = os.path.join(self.projection, rel_path)
if self.ignore(rel_path):
# Don't recurse when dir is ignored.
return False
elif proj_rel_path in self.files:
# Can't create a dir where a file is.
src_a_root, src_a_relpath = self.files[proj_rel_path]
self.fatal_conflicts.append(MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path)))
return False
elif proj_rel_path in self.directories:
# No new directory, carry on.
return True
else:
# Register new directory.
self.directories[proj_rel_path] = (root, rel_path)
return True
def after_visit_dir(self, root, rel_path, depth):
pass
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""
Replace symlinked dirs with actual directories when possible in low depths,
otherwise handle it as a file (i.e. we link to the symlink).
Transforming symlinks into dirs makes it more likely we can merge directories,
e.g. when <prefix>/lib -> <prefix>/subdir/lib.
We only do this when the symlink is pointing into a subdirectory from the
symlink's directory, to avoid potential infinite recursion; and only at a
constant level of nesting, to avoid potential exponential blowups in file
duplication.
"""
if self.ignore(rel_path):
return False
# Only follow symlinked dirs in <prefix>/**/**/*
if depth > 1:
handle_as_dir = False
else:
# Only follow symlinked dirs when pointing deeper
src = os.path.join(root, rel_path)
real_parent = os.path.realpath(os.path.dirname(src))
real_child = os.path.realpath(src)
handle_as_dir = real_child.startswith(real_parent)
if handle_as_dir:
return self.before_visit_dir(root, rel_path, depth)
self.visit_file(root, rel_path, depth)
return False
def after_visit_symlinked_dir(self, root, rel_path, depth):
pass
def visit_file(self, root, rel_path, depth):
proj_rel_path = os.path.join(self.projection, rel_path)
if self.ignore(rel_path):
pass
elif proj_rel_path in self.directories:
# Can't create a file where a dir is; fatal error
src_a_root, src_a_relpath = self.directories[proj_rel_path]
self.fatal_conflicts.append(MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path)))
elif proj_rel_path in self.files:
# In some cases we can resolve file-file conflicts
src_a_root, src_a_relpath = self.files[proj_rel_path]
self.file_conflicts.append(MergeConflict(
dst=proj_rel_path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join(root, rel_path)))
else:
# Otherwise register this file to be linked.
self.files[proj_rel_path] = (root, rel_path)
def set_projection(self, projection):
self.projection = os.path.normpath(projection)
# Todo, is this how to check in general for empty projection?
if self.projection == '.':
self.projection = ''
return
# If there is a projection, we'll also create the directories
# it consists of, and check whether that's causing conflicts.
path = ''
for part in self.projection.split(os.sep):
path = os.path.join(path, part)
if path not in self.files:
self.directories[path] = ('<projection>', path)
else:
# Can't create a dir where a file is.
src_a_root, src_a_relpath = self.files[path]
self.fatal_conflicts.append(MergeConflict(
dst=path,
src_a=os.path.join(src_a_root, src_a_relpath),
src_b=os.path.join('<projection>', path)))
class DestinationMergeVisitor(object):
"""DestinatinoMergeVisitor takes a SourceMergeVisitor
and:
a. registers additional conflicts when merging
to the destination prefix
b. removes redundant mkdir operations when
directories already exist in the destination
prefix.
This also makes sure that symlinked directories
in the target prefix will never be merged with
directories in the sources directories.
"""
def __init__(self, source_merge_visitor):
self.src = source_merge_visitor
def before_visit_dir(self, root, rel_path, depth):
# If destination dir is a file in a src dir, add a conflict,
# and don't traverse deeper
if rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append(MergeConflict(
rel_path,
os.path.join(src_a_root, src_a_relpath),
os.path.join(root, rel_path)))
return False
# If destination dir was also a src dir, remove the mkdir
# action, and traverse deeper.
if rel_path in self.src.directories:
del self.src.directories[rel_path]
return True
# If the destination dir does not appear in the src dir,
# don't descend into it.
return False
def after_visit_dir(self, root, rel_path, depth):
pass
def before_visit_symlinked_dir(self, root, rel_path, depth):
"""
Symlinked directories in the destination prefix should
be seen as files; we should not accidentally merge
source dir with a symlinked dest dir.
"""
# Always conflict
if rel_path in self.src.directories:
src_a_root, src_a_relpath = self.src.directories[rel_path]
self.src.fatal_conflicts.append(MergeConflict(
rel_path,
os.path.join(src_a_root, src_a_relpath),
os.path.join(root, rel_path)))
if rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append(MergeConflict(
rel_path,
os.path.join(src_a_root, src_a_relpath),
os.path.join(root, rel_path)))
# Never descend into symlinked target dirs.
return False
def after_visit_symlinked_dir(self, root, rel_path, depth):
pass
def visit_file(self, root, rel_path, depth):
# Can't merge a file if target already exists
if rel_path in self.src.directories:
src_a_root, src_a_relpath = self.src.directories[rel_path]
self.src.fatal_conflicts.append(MergeConflict(
rel_path,
os.path.join(src_a_root, src_a_relpath),
os.path.join(root, rel_path)))
elif rel_path in self.src.files:
src_a_root, src_a_relpath = self.src.files[rel_path]
self.src.fatal_conflicts.append(MergeConflict(
rel_path,
os.path.join(src_a_root, src_a_relpath),
os.path.join(root, rel_path)))
class LinkTree(object):
"""Class to create trees of symbolic links from a source directory.
LinkTree objects are constructed with a source root. Their
methods allow you to create and delete trees of symbolic links
back to the source tree in specific destination directories.
Trees comprise symlinks only to files; directries are never
symlinked to, to prevent the source directory from ever being
modified.
"""
def __init__(self, source_root):
if not os.path.exists(source_root):
raise IOError("No such file or directory: '%s'", source_root)
self._root = source_root
def find_conflict(self, dest_root, ignore=None,
ignore_file_conflicts=False):
"""Returns the first file in dest that conflicts with src"""
ignore = ignore or (lambda x: False)
conflicts = self.find_dir_conflicts(dest_root, ignore)
if not ignore_file_conflicts:
conflicts.extend(
dst for src, dst
in self.get_file_map(dest_root, ignore).items()
if os.path.exists(dst))
if conflicts:
return conflicts[0]
def find_dir_conflicts(self, dest_root, ignore):
conflicts = []
kwargs = {'follow_nonexisting': False, 'ignore': ignore}
for src, dest in traverse_tree(self._root, dest_root, **kwargs):
if os.path.isdir(src):
if os.path.exists(dest) and not os.path.isdir(dest):
conflicts.append("File blocks directory: %s" % dest)
elif os.path.exists(dest) and os.path.isdir(dest):
conflicts.append("Directory blocks directory: %s" % dest)
return conflicts
def get_file_map(self, dest_root, ignore):
merge_map = {}
kwargs = {'follow_nonexisting': True, 'ignore': ignore}
for src, dest in traverse_tree(self._root, dest_root, **kwargs):
if not os.path.isdir(src):
merge_map[src] = dest
return merge_map
def merge_directories(self, dest_root, ignore):
for src, dest in traverse_tree(self._root, dest_root, ignore=ignore):
if os.path.isdir(src):
if not os.path.exists(dest):
mkdirp(dest)
continue
if not os.path.isdir(dest):
raise ValueError("File blocks directory: %s" % dest)
# mark empty directories so they aren't removed on unmerge.
if not os.listdir(dest):
marker = os.path.join(dest, empty_file_name)
touch(marker)
def unmerge_directories(self, dest_root, ignore):
for src, dest in traverse_tree(
self._root, dest_root, ignore=ignore, order='post'):
if os.path.isdir(src):
if not os.path.exists(dest):
continue
elif not os.path.isdir(dest):
raise ValueError("File blocks directory: %s" % dest)
# remove directory if it is empty.
if not os.listdir(dest):
shutil.rmtree(dest, ignore_errors=True)
# remove empty dir marker if present.
marker = os.path.join(dest, empty_file_name)
if os.path.exists(marker):
os.remove(marker)
def merge(self, dest_root, ignore_conflicts=False, ignore=None,
link=symlink, relative=False):
"""Link all files in src into dest, creating directories
if necessary.
Keyword Args:
ignore_conflicts (bool): if True, do not break when the target exists;
return a list of files that could not be linked
ignore (callable): callable that returns True if a file is to be
ignored in the merge (by default ignore nothing)
link (callable): function to create links with (defaults to llnl.util.symlink)
relative (bool): create all symlinks relative to the target
(default False)
"""
if ignore is None:
ignore = lambda x: False
conflict = self.find_conflict(
dest_root, ignore=ignore, ignore_file_conflicts=ignore_conflicts)
if conflict:
raise SingleMergeConflictError(conflict)
self.merge_directories(dest_root, ignore)
existing = []
for src, dst in self.get_file_map(dest_root, ignore).items():
if os.path.exists(dst):
existing.append(dst)
elif relative:
abs_src = os.path.abspath(src)
dst_dir = os.path.dirname(os.path.abspath(dst))
rel = os.path.relpath(abs_src, dst_dir)
link(rel, dst)
else:
link(src, dst)
for c in existing:
tty.warn("Could not merge: %s" % c)
def unmerge(self, dest_root, ignore=None, remove_file=remove_link):
"""Unlink all files in dest that exist in src.
Unlinks directories in dest if they are empty.
"""
if ignore is None:
ignore = lambda x: False
for src, dst in self.get_file_map(dest_root, ignore).items():
remove_file(src, dst)
self.unmerge_directories(dest_root, ignore)
class MergeConflictError(Exception):
pass
class SingleMergeConflictError(MergeConflictError):
def __init__(self, path):
super(MergeConflictError, self).__init__(
"Package merge blocked by file: %s" % path)
class MergeConflictSummary(MergeConflictError):
def __init__(self, conflicts):
"""
A human-readable summary of file system view merge conflicts (showing only the
first 3 issues.)
"""
msg = "{0} fatal error(s) when merging prefixes:\n".format(len(conflicts))
# show the first 3 merge conflicts.
for conflict in conflicts[:3]:
msg += " `{0}` and `{1}` both project to `{2}`".format(
conflict.src_a, conflict.src_b, conflict.dst)
super(MergeConflictSummary, self).__init__(msg)
|
compass_dirs = {
"N": 0.0,
"NNE": 22.5,
"NE": 45.0,
"ENE": 67.5,
"E": 90.0,
"ESE": 112.5,
"SE": 135.0,
"SSE": 157.5,
"S": 180.0,
"SSW": 202.5,
"SW": 225.0,
"WSW": 247.5,
"W": 270.0,
"WNW": 292.5,
"NW": 315.0,
"NNW": 337.5,
}
def direction(num):
degrees = 22.5 * round(float(num)/ 22.5)
if degrees == 360.0:
return "N"
else:
for name, d in compass_dirs.items():
if d == degrees:
return name
return
|
import sys
sys.path.append('/usr/users/oliverren/meng/check-worthy')
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Embedding, LSTM, Bidirectional, Dropout, Dense, Input, Merge
from keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1D, Flatten
from keras.models import Model
from keras.optimizers import Adam
from keras import Sequential
from src.data import debates
from src.models import models
import numpy as np
data_sets = debates.get_for_crossvalidation()
texts = [sentence.text for sentence in data_sets[0][1]]
tokenizer, word_index = models.create_tokenizer(texts)
MAX_SEQUENCE_LENGTH = max([len(sentence.split()) for sentence in texts])
inputs = Input(shape=(MAX_SEQUENCE_LENGTH, ))
encoder1 = models.create_embedding('/usr/users/oliverren/meng/check-worthy/data/glove/glove.6B.50d.txt', word_index, trainable=False, INPUT_LENGTH = MAX_SEQUENCE_LENGTH)(inputs)
encoder2 = LSTM(128)(encoder1) |
#!/usr/bin/env python3
import argparse
import functools
import logging
import os
import re
import socket
import sys
import threading
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
# Domains that are not indended to be blocked
# and which will be filtered out from block lists.
# Some of these are contained in the original OS hosts list,
# which is prepended to the generated block list.
NO_BLOCK_DOMAINS = [
'localhost',
'localhost.localdomain',
'local',
'0.0.0.0'
]
# Number of concurrent connections we can handle
HTTP_SERVER_THREADS = 10
# Number of connections to additionally keep
# in the queue without disconnecting clients
HTTP_SERVER_QUEUE = 5
class HostsRequestHandler(BaseHTTPRequestHandler):
def __init__(self, default_hosts, hosts_lists, *args, **kwargs):
self.default_hosts = default_hosts
self.hosts_lists = hosts_lists
super().__init__(*args, **kwargs)
def do_GET(self):
try:
# Check if the endpoint looks like a valid list combination
if not re.fullmatch(r'/[0-9]+(?:-[0-9]+)*', self.path):
logging.warning(f'Invalid request to endpoint: {self.path}')
self.send_error(404, 'invalid identifier')
return
# Get the host list ids
list_ids = set([int(id) for id in self.path[1:].split('-')])
# Make sure we have all those lists loaded
for id in list_ids:
if id not in self.hosts_lists:
logging.warning(f'Invalid list id {id} in request: {self.path}')
self.send_error(404, f'invalid list id: {id}')
return
# We're confident we can handle this request,
# so let's send the default hosts list to give
# the client some first breadcrumbs
logging.info(f'Handling request: {self.path}')
self.send_response(200)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write(self.default_hosts.encode())
# Generate the block list and send it
time_start = time.time()
block_list = combine_lists(self.hosts_lists, list_ids)
time_end = time.time()
time_diff = int((time_end - time_start) * 1000)
logging.debug(f'Blocklist generation took {time_diff} ms')
self.wfile.write(block_list.encode())
except (ConnectionResetError, BrokenPipeError) as err:
logging.info(str(err))
def log_message(self, format, *args):
logging.info(self.address_string() + ' - ' + format % args)
def http_server_thread(thread_id, addr, sock, default_hosts, hosts_lists):
logging.info(f'Starting http server thread {thread_id+1}...')
request_handler = functools.partial(HostsRequestHandler, default_hosts, hosts_lists)
httpd = HTTPServer(addr, request_handler, False)
# Prevent the HTTP server from re-binding every handler.
# https://stackoverflow.com/questions/46210672/
httpd.socket = sock
httpd.server_bind = lambda self: None
httpd.serve_forever()
def start_http_server(addr, port, default_hosts, hosts_lists):
logging.info(f'Starting http server on {addr}:{port}...')
# Create a single socket
sock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((addr, port))
sock.listen(HTTP_SERVER_QUEUE)
# Start n-1 http handler threads
for thread_idx in range(HTTP_SERVER_THREADS-1):
thread_args = (thread_idx, addr, sock, default_hosts, hosts_lists)
threading.Thread(target=http_server_thread, args=thread_args, daemon=True).start()
# The last http handler will run in the current thread
http_server_thread(HTTP_SERVER_THREADS-1, addr, sock, default_hosts, hosts_lists)
def load_hosts_domains(file_path):
# Read all blocked domains from a hosts file
with open(file_path, 'r') as f:
block_domains = set()
# Read the file line by line
for line in f.readlines():
# Rules start with one of two IPs they resolve blocked domains to
if not (line.startswith('127.0.0.1 ') or line.startswith('0.0.0.0 ')):
continue
# The domain name starts somewhere after the IP...
domain_idx_start = line.index(' ') + 1
# ...and ends before an optional comment or at line end.
domain_idx_end = line.index('#') if '#' in line else len(line)
domain = line[domain_idx_start:domain_idx_end].strip()
# Some block lists include rules for localhost, etc.
# We don't need those.
if domain in NO_BLOCK_DOMAINS:
continue
block_domains.add(domain)
return block_domains
def combine_lists(hosts_lists, list_ids):
# Combine the domains from all chosen block lists
domains = set()
for list_id in list_ids:
domains.update(hosts_lists[list_id])
# Generate the block list in a plain text string
combination_id = '-'.join(sorted(map(str, list_ids)))
prefix = f'# uAdBlock generated block list ({combination_id})\n0.0.0.0 '
return prefix + '\n0.0.0.0 '.join(sorted(domains)) + '\n'
def main():
# Parse arguments
arg_parser = argparse.ArgumentParser(description='Serves hosts files for uAdBlock')
arg_parser.add_argument('-a', '--addr', action='store', default='0.0.0.0', help='Address to bind to')
arg_parser.add_argument('-p', '--port', action='store', default=8080, type=int, help='Port to bind to')
arg_parser.add_argument('-d', '--hosts-dir', action='store', default='lists', help='Directory containing the hosts lists')
args = arg_parser.parse_args()
# Configure logging
logging.basicConfig(format='%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s', level=logging.DEBUG)
logging.info('Starting...')
# Load default hosts file (this will be prepended to generated lists)
logging.info('Loading default hosts...')
with open('hosts.01-ubuntu-default', 'r') as f:
ubuntu_default_hosts = f.read()
# Block files should be placed in the lists directory
if not os.path.isdir(args.hosts_dir):
logging.error(f'Lists directory "{args.hosts_dir}" doesn\'t exist. Path correct? Maybe run update_lists.sh first.')
sys.exit(1)
# Load all domains from the individual hosts files
hosts_lists = {}
for hosts_file in sorted(os.listdir(args.hosts_dir)):
hosts_file_path = os.path.join(args.hosts_dir, hosts_file)
# Each file name should start with <list_id>_
file_match = re.match(r'([0-9]+)_', hosts_file)
if os.path.isfile(hosts_file_path) and file_match:
logging.info(f'Loading block list: {hosts_file_path}')
list_id = int(file_match.group(1))
list_domains = load_hosts_domains(hosts_file_path)
hosts_lists[list_id] = list_domains
# Start the http server (blocks forever)
start_http_server(args.addr, args.port, ubuntu_default_hosts, hosts_lists)
if __name__ == '__main__':
main() |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
works = pd.read_csv("./works.csv")
m = works[works['gender'] == 'Мужской']['salary']
fm = works[works['gender'] == 'Женский']['salary']
pr = [i / 10 for i in range(1, 11)]
ax = plt.subplots()[1]
print("Мужчины:\n")
ax.plot(m.quantile(pr))
plt.show()
print("Женщины:\n")
ax.plot(fm.quantile(pr))
plt.show()
|
'''
Testing the get_style_sheets function.
'''
import os
from qtstyles import get_style_sheets, Sheet
class TestGetStyleSheets(object):
''' The get_style_sheets() function gets a list of styles
and their associated sheet objects. '''
def setup_method(self, method):
''' Run the get_style_sheets function and store results. '''
self.results_dict = get_style_sheets()
def teardown_method(self, method):
''' Clear attributes. '''
del self.results_dict
def test_all_keys(self):
''' Test that all of the keys (style sheet names)
are or the type str.
'''
checks_list = [isinstance(key, str) for key in self.results_dict]
assert False not in checks_list
def test_all_values(self):
''' All of the values should be instance of Sheet. '''
checks_list = [
isinstance(value, Sheet)
for value in self.results_dict.values()
]
assert False not in checks_list
def test_sheet_files_exist(self):
''' All of the sheet instance should point to
'.qss' files that exist.
'''
paths_list = [value.path for value in self.results_dict.values()]
checks_list = [os.path.isfile(path) for path in paths_list]
assert False not in checks_list
|
def calculate_1D_consolidation(y_coord, H, T_v):
"""
Calculates the analytical solution for 1d consolidation on linear elastic soil [@@ref@@]
:param y_coord: vertical coordinate
:param H: sample height
:param T_v: dimensionless time factor
:return: relative excess pore pressure
"""
from math import fabs, cos, pi, exp
convergence_criterion = 1e-10
j = 1
rel_p_old = 1
rel_p = 0
max_iterations = 1001
min_iterations=20
min_iterations_reached = False
while fabs(rel_p_old - rel_p) > convergence_criterion and j < max_iterations or not min_iterations_reached:
rel_p_old = rel_p
rel_p = (-1) ** (j - 1) / (2 * j - 1) * cos((2 * j - 1) * pi / 2 * y_coord / H) * exp(
-1 * (2 * j - 1) ** 2 * pi ** 2 / 4 * T_v) + rel_p_old
j += 1
if (j > min_iterations):
min_iterations_reached = True
rel_p = 4.0 / pi * rel_p
return rel_p
def rigid_footing(x, B, delta, G, nu, settlement):
"""
Calculates analytical solution for reaction pressure of settlement controlled rigid footing on linear elastic soil
[@@ref@@]
:param x: x-coordinate
:param B: width footing
:param delta: geometry dependent factor
:param G: shear modulus
:param nu: poison ratio
:param settlement: settlement
:return: vertical reaction pressure
"""
from math import pi, sqrt
reaction_force = settlement * 2.0 * (1.0 + nu) * G / delta
sigma_v = -2.0 / pi * reaction_force / 2.0 / (B * sqrt(1.0 - (x / B) ** 2.0))
return sigma_v
def calculate_max_deflections_ring(force, r, young, m_inertia):
"""
todo Extend description
ref: http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.912.904&rep=rep1&type=pdf
:param force: Point load
:param r: radius
:param young: Young's modulus
:param m_inertia: area moment of inertia
:return: relative increase in horizontal and vertical diameter
"""
from math import pi
eps_h = (1/2 - 2/pi) * force * r ** 3 / (young*m_inertia)
eps_v = (pi/4 - 2/pi) * force * r ** 3 / (young*m_inertia)
return eps_h, eps_v
def calculate_bending_moments_ring(force, r, theta):
"""
todo extend description
ref http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.912.904&rep=rep1&type=pdf
:param force: point load
:param r: radius
:param theta: angle
:return: bending moment
"""
from math import cos, pi
moment = force * r * (1/pi - cos(theta)/2)
return moment
|
import subprocess
import sys
def npm(*npm_args):
call_args = [__get_executable_name()] + list(npm_args)
subprocess.check_call(call_args)
def __get_executable_name():
executable = 'npm'
if sys.platform == 'win32':
executable += '.cmd'
return executable
if __name__ == '__main__':
npm(*sys.argv[1:])
|
# =============================================================================
# SaltStack utils File
#
# NAME: _utils/convert_to_epoch_time.py
# WRITTEN BY: Alek Tant of SmartAlek Solutions
# DATE : 2016.07.11
#
# PURPOSE: Convert Salt Event Times into Epoch Times
#
from __future__ import absolute_import
from calendar import timegm
from time import strptime
def __virtual__():
return "convert_to_epoch_time"
def convert_to_epoch_time(salt_event_time):
'''
Convert Salt Event Times into Epoch times.
'''
date_time = salt_event_time.split('.')[0]
pattern = '%Y-%m-%dT%H:%M:%S'
epoch_time = int(timegm(strptime(date_time, pattern)))
return epoch_time
|
#
# @lc app=leetcode id=83 lang=python3
#
# [83] Remove Duplicates from Sorted List
#
# https://leetcode.com/problems/remove-duplicates-from-sorted-list/description/
#
# algorithms
# Easy (41.95%)
# Likes: 774
# Dislikes: 82
# Total Accepted: 333.1K
# Total Submissions: 779.9K
# Testcase Example: '[1,1,2]'
#
# Given a sorted linked list, delete all duplicates such that each element
# appear only once.
#
# Example 1:
#
#
# Input: 1->1->2
# Output: 1->2
#
#
# Example 2:
#
#
# Input: 1->1->2->3->3
# Output: 1->2->3
#
#
#
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if not head:
return head
p, q = head, head.next
while q:
if p.val == q.val:
p.next = q.next
q = q.next
else:
p = p.next
return head
|
import csv
import os
all_dialogs = os.getcwd() + '/dialogs'
os.chdir(all_dialogs)
list_dialogs = os.listdir(all_dialogs)
for i_dialogs in list_dialogs:
ith_dialogs = os.getcwd() + "/" + i_dialogs
os.chdir(ith_dialogs)
ith_list = os.listdir(ith_dialogs)
print("Dialogs Folder {} # of tsv files: {}".format(i_dialogs, len(ith_list)))
for m_tsv in ith_list:
with open(m_tsv) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
name_col = []
for row in reader:
name_col.append(row[1])
if len(set(name_col)) <= 2:
print("Invalid tsv. Speakers < 3. Removing " + m_tsv)
os.remove(m_tsv)
os.chdir(all_dialogs)
|
#!/usr/bin/env python3
from fpylll import BKZ, IntegerMatrix
inv_mod = lambda a, m : pow(a, m-2, m)
class Curve():
def __init__(self, p, a, b, q, x0, y0):
self.p = p
self.a = a
self.b = b
self.b2 = 2*b % self.p
self.b4 = 2*self.b2 % self.p
self.q = q
self.base = (x0, y0)
def is_on_curve(self, point):
xx, yy = point
tmp = xx**2 % self.p
tmp = xx*tmp % self.p
tmp = (tmp + xx*self.a + self.b) % self.p
return (yy**2 - tmp) % self.p == 0
def single_mul(self, n, P):
def dbl_xz(self, point):
# 2P
x1, z1 = point
A, B = x1**2 % self.p, z1**2 % self.p
C = 2*((x1 + z1)**2 - (A + B)) % self.p
D = self.a*B % self.p
return ((A - D)**2 - self.b2*B*C) % self.p, ((A + D)*C + self.b4*B**2) % self.p
def add_xz(self, R0, R1, x0):
# P + Q
x1, z1 = R0
x2, z2 = R1
A, B = x1*x2 % self.p, z1*z2 % self.p
C, D = x1*z2 % self.p, x2*z1 % self.p
A = 2*(C + D)*(A + self.a*B) % self.p
B = 4*self.b*B**2 % self.p
C = (C - D)**2 % self.p
return (A + B - x0*C) % self.p, C
def y_recovery(self, P, R0, R1):
x0, y0 = P[:2]
x1, z1 = R0
x2, z2 = R1
A = x0*z1 % self.p
B = (A - x1)**2 % self.p
C = x0*x1 % self.p
D = self.a*z1 % self.p
A = (A + x1)*(C + D) % self.p
C = z1*z2 % self.p
D = 2*y0*C % self.p
C = self.b2*C % self.p
return D*x1 % self.p, (C*z1 + A*z2 - x2*B) % self.p, D*z1 % self.p
# single scalar mult: n*P
assert (n > 0 and n < self.q)
# special case
if n == self.q - 1: return P[0], self.p - P[1]
l = n.bit_length()
x1, z1 = P[0], 1
x2, z2 = dbl_xz(self, [x1, z1])
R = [(x1, z1), (x2, z2)]
for i in range(2, l+1):
bit = (n >> (l-i)) & 1
R[1-bit] = add_xz(self, R[0], R[1], P[0])
R[bit] = dbl_xz(self, R[bit])
x3, y3, z3 = y_recovery(self, P, R[0], R[1])
t = inv_mod(z3, self.p)
return x3*t % self.p, y3*t % self.p
def double_mul(self, n, P, m, Q):
def dbl_jac(point):
x1, y1, z1 = point
A, B = x1**2 % self.p, y1**2 % self.p
C, D = B**2 % self.p, z1**2 % self.p
E = 2*((x1 + B)**2 - A - C) % self.p
A = (3*A + self.a*D**2) % self.p
F = (A**2 - 2*E) % self.p
return F, (A*(E - F) - 8*C) % self.p, ((y1 + z1)**2 - B - D) % self.p
def add_jac(point1, point2):
x1, y1, z1 = point1
x2, y2, z2 = point2
# infinity cases
if z1 == 0: return point2
if z2 == 0: return point1
A, B = z1**2 % self.p, z2**2 % self.p
C, D = x1*B % self.p, z2*B % self.p
D = y1*D % self.p
E = z1*A % self.p
E = y2*E % self.p
F = (x2*A - C) % self.p
G = 4*F**2 % self.p
H = F*G % self.p
E = E-D % self.p
# doubling case
if F == 0 and E == 0: return dbl_jac(P1)
E = 2*E % self.p
C = C*G % self.p
x3 = (E**2 - H - 2*C) % self.p
return x3, (E*(C - x3) - 2*D*H) % self.p, ((z1+z2)**2 - A - B)*F % self.p
# double scalar mult: n*P + m*Q
l = max(n.bit_length(), m.bit_length())
PP = [P[0], P[1], 1]
QQ = [Q[0], Q[1], 1]
PQ = add_jac(PP, QQ)
L = [PP, QQ, PQ]
T = [1,1,0]
for i in range(l):
T = dbl_jac(T)
val = ((m >> (l-1-i)) & 1) << 1 | ((n >> (l-1-i)) & 1)
if val != 0:
T = add_jac(T, L[val-1])
t = inv_mod(T[2], self.p)
tsqr = t**2 % self.p
tcub = tsqr*t % self.p
return T[0]*tsqr % self.p, T[1]*tcub % self.p
####################
## predefined curves
####################
## P-192
p192 = 0xfffffffffffffffffffffffffffffffeffffffffffffffff
q192 = 0xffffffffffffffffffffffff99def836146bc9b1b4d22831
b192 = 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
x0192 = 0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012
y0192 = 0x7192b95ffc8da78631011ed6b24cdd573f977a11e794811
secp192r1 = Curve(p192, -3, b192, q192, x0192, y0192)
## P-224
p224 = 0xffffffffffffffffffffffffffffffff000000000000000000000001
q224 = 0xffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d
b224 = 0xb4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4
x0224 = 0xb70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21
y0224 = 0xbd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34
secp224r1 = Curve(p224, -3, b224, q224, x0224, y0224)
## P-256
p256 = 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff
q256 = 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551
b256 = 0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b
x0256 = 0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296
y0256 = 0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5
secp256r1 = Curve(p256, -3, b256, q256, x0256, y0256)
## P-384
p384 = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff
q384 = 0xffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973
b384 = 0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef
x0384 = 0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7
y0384 = 0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f
secp384r1 = Curve(p384, -3, b384, q384, x0384, y0384)
## P-521
p521 = 0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
q521 = 0x1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409
b521 = 0x51953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00
x0521 = 0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66
y0521 = 0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650
secp521r1 = Curve(p521, -3, b521, q521, x0521, y0521)
'''
inputs: q: curve parameter (cardinality)
signatures: list of signatures (m,r,s) given as integers
m: hash of signed message
(r,s): signature
l: number of msb or lsb set to 0 of the nonces
msb: True for msb, False for lsb
output: M: basis matrix of a lattice
'''
def genMatrix_HNP(q, signatures, l, msb):
n = len(signatures)
if msb:
T = [(signatures[i][1]*inv_mod(signatures[i][2], q)) % q for i in range(n)]
U = [(-inv_mod(signatures[i][2], q)*signatures[i][0]) % q for i in range(n)]
else:
T = [(signatures[i][1]*inv_mod(signatures[i][2]*2**l, q)) % q for i in range(n)]
U = [(-inv_mod(signatures[i][2]*2**l, q)*signatures[i][0]) % q for i in range(n)]
M = IntegerMatrix(n+2, n+2)
M[n, n] = 1
M[n+1, n+1] = q
for i in range(n):
M[i, i] = 2**(l+1)*q
M[n, i] = 2**(l+1)*T[i]
if msb:
M[n+1, i] = 2**(l+1)*U[i] + 2**q.bit_length()
else:
M[n+1, i] = 2**(l+1)*U[i] + q
return M
'''
inputs: M: basis matrix of a lattice
curve: instance of the class Curve
pubkey_point: public point of the signer
output: private key of the signer or -1
'''
def findPrivateKey_HNP(M, curve, pubkey_point, block_size=25):
Mred = BKZ.reduction(M, BKZ.Param(block_size=block_size))
for i in range(Mred.nrows):
row = Mred[i]
guess = row[-2] % curve.q
if guess == 0:
continue
Q = curve.single_mul(guess, curve.base)
if Q[0] == pubkey_point[0]:
if Q[1] == pubkey_point[1]:
return guess
else:
return curve.q - guess
return -1
'''
check the validity of the signature
'''
def check_signature(curve, pubkey_point, signature):
m, r, s = signature
s_inv = inv_mod(s, curve.q)
u, v = m*s_inv % curve.q, r*s_inv % curve.q
R = curve.double_mul(u, curve.base, v, pubkey_point)
return R[0] % curve.q == r
'''
Given a curve, a public point and signatures where the l most
(or least) significant bits of the nonces are set to 0,
returns the private key or -1 if not found
'''
def findkey(curve, pubkey_point, signatures, msb, l, block_size=25):
M = genMatrix_HNP(curve.q, signatures, l, msb)
return findPrivateKey_HNP(M, curve, pubkey_point, block_size)
|
import os
import sys
import tempfile
import unittest
from cssdeadwood.dom_match import match_selectors_against_html_string
class CssMatchTest(unittest.TestCase):
def testSimple(self):
html = '<html><head></head><body><p>hello world</p></body></html>'
selectors = set(['p'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set(['p']))
selectors = set(['div'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set([]))
selectors = set(['p', 'div'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set(['p']))
def testPseudoClasses(self):
html = '<html><head></head><body><p>hello <a href="/world">world</a></p></body></html>'
selectors = set(['a', 'p:hover', 'h4:hover'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set(['a', 'p:hover']))
selectors = set(['a:focus', 'a:visited'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set(['a:visited', 'a:focus']))
def testPseudoChildSelectors(self):
html = '<html><head></head><body><ol><li>one</li></ol><ul></ul></body></html>'
selectors = set(['ol li:first-child'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set(['ol li:first-child']))
selectors = set(['ul li:first-child'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set())
def testDirectChilds(self):
html = '<html><head></head><body><p>hello <a href="/world">world</a></p></body></html>'
selectors = set(['p > a', 'p a'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set(['p > a', 'p a']))
selectors = set(['p>a', 'p a'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set(['p>a', 'p a']))
def testPseudoElements(self):
html = '<html><head></head><body><p>hello <a href="/world">world</a></p></body></html>'
selectors = set(['p:before'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set(['p:before']))
selectors = set(['p:after'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set(['p:after']))
selectors = set(['p:first-line'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set(['p:first-line']))
selectors = set(['p:first-letter'])
result = match_selectors_against_html_string(selectors, html)
self.assertEqual(result, set(['p:first-letter']))
|
class Solution(object):
def XXX(self, nums):
self.ret = []
def dfs(path):
if len(path) == len(nums):
temp = []
for i,n in enumerate(path):
if n==1:
temp.append(nums[i])
pass
self.ret.append(temp)
return
path.append(0)
dfs(path)
path.pop()
path.append(1)
dfs(path)
path.pop()
pass
dfs([])
return self.ret
|
# coding=utf-8
from pyecharts.chart import Chart
from pyecharts.option import get_all_options
from pyecharts.constants import (CITY_GEO_COORDS, SYMBOL)
class GeoLines(Chart):
"""
<<< 地理坐标系线图 >>>
用于带有起点和终点信息的线数据的绘制,主要用于地图上的航线,路线的可视化。
"""
def __init__(self, title="", subtitle="", **kwargs):
super(GeoLines, self).__init__(title, subtitle, **kwargs)
self._zlevel = 1
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
def __add(self, name, data,
maptype='china',
symbol=None,
symbol_size=12,
border_color="#111",
geo_normal_color="#323c48",
geo_emphasis_color="#2a333d",
geo_cities_coords=None,
geo_effect_period=6,
geo_effect_traillength=0,
geo_effect_color='#fff',
geo_effect_symbol='circle',
geo_effect_symbolsize=5,
is_geo_effect_show=True,
is_roam=True,
**kwargs):
"""
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选。
:param data:
数据项,数据中,每一行是一个『数据项』,每一列属于一个『维度』。每一行包含两个数据,
如 ["广州", "北京"],则指定从广州到北京。
:param maptype:
地图类型。 支持 china、world、安徽、澳门、北京、重庆、福建、福建、甘肃、
广东,广西、广州、海南、河北、黑龙江、河南、湖北、湖南、江苏、江西、吉林、
辽宁、内蒙古、宁夏、青海、山东、上海、陕西、山西、四川、台湾、天津、香港、
新疆、西藏、云南、浙江,以及 [363个二线城市](https://github.com/chfw/
echarts-china-cities-js#featuring-citiesor-for-single-download]地图。
提醒:
在画市级地图的时候,城市名字后面的‘市’要省去了,比如,石家庄市的
‘市’不要提,即‘石家庄’就可以了。
:param symbol:
线两端的标记类型,可以是一个数组分别指定两端,也可以是单个统一指定。
:param symbol_size:
线两端的标记大小,可以是一个数组分别指定两端,也可以是单个统一指定。
:param border_color:
地图边界颜色。
:param geo_normal_color:
正常状态下地图区域的颜色。
:param geo_emphasis_color:
高亮状态下地图区域的颜色。
:param geo_cities_coords:
用户自定义地区经纬度,类似如 {'阿城': [126.58, 45.32],} 这样的字典,当用
于提供了该参数时,将会覆盖原有预存的区域坐标信息。
:param geo_effect_period:
特效动画的时间,单位为 s。
:param geo_effect_traillength:
特效尾迹的长度。取从 0 到 1 的值,数值越大尾迹越长。
:param geo_effect_color:
特效标记的颜色。
:param geo_effect_symbol:
特效图形的标记。有 'circle', 'rect', 'roundRect', 'triangle', 'diamond',
'pin', 'arrow', 'plane' 可选。
:param geo_effect_symbolsize:
特效标记的大小,可以设置成诸如 10 这样单一的数字,也可以用数组分开表示高和宽,
例如 [20, 10] 表示标记宽为20,高为 10。
:param is_geo_effect_show:
是否显示特效。
:param is_roam:
是否开启鼠标缩放和平移漫游。默认为 True。
如果只想要开启缩放或者平移,可以设置成'scale'或者'move'。设置成 True 为都开启。
:param kwargs:
"""
chart = get_all_options(**kwargs)
self._zlevel += 1
if geo_cities_coords:
_geo_cities_coords = geo_cities_coords
else:
_geo_cities_coords = CITY_GEO_COORDS
if geo_effect_symbol == "plane":
geo_effect_symbol = SYMBOL['plane']
_data_lines, _data_scatter = [], []
for d in data:
_from_name, _to_name = d
_data_lines.append({
"fromName": _from_name,
"toName": _to_name,
"coords": [
_geo_cities_coords.get(_from_name, []),
_geo_cities_coords.get(_to_name, [])
]
})
_from_v = _geo_cities_coords.get(_from_name, [])
_data_scatter.append({
"name": _from_name,
"value": _from_v + [0]
})
_to_v = _geo_cities_coords.get(_to_name, [])
_data_scatter.append({
"name": _to_name,
"value": _to_v + [0]
})
self._option.update(
geo={
"map": maptype,
"roam": is_roam,
"label": {
"emphasis": {
"show": True,
"textStyle": {
"color": "#eee"
}
}},
"itemStyle": {
"normal": {
"areaColor": geo_normal_color,
"borderColor": border_color
},
"emphasis": {
"areaColor": geo_emphasis_color
}}
})
self._option.get('legend')[0].get('data').append(name)
self._option.get('series').append({
"type": "lines",
"name": name,
"zlevel": self._zlevel,
"effect": {
"show": is_geo_effect_show,
"period": geo_effect_period,
"trailLength": geo_effect_traillength,
"color": geo_effect_color,
"symbol": geo_effect_symbol,
"symbolSize": geo_effect_symbolsize
},
"symbol": symbol or ["none", "arrow"],
"symbolSize": symbol_size,
"data": _data_lines,
"lineStyle": chart['line_style']
})
self._option.get('series').append({
"type": "scatter",
"name": name,
"zlevel": self._zlevel,
"coordinateSystem": 'geo',
"symbolSize": 10,
"data": _data_scatter,
"label": chart['label'],
})
self._add_chinese_map(maptype)
self._config_components(**kwargs)
|
import requests
import json
import os
user = "root"
pwd = os.environ['GITLAB_ROOT_PASSWORD']
gitlab_external_url = "http://localhost:"+os.environ['GITLAB_PORT']
login_url=gitlab_external_url+"/oauth/token"
loginRequest = {
"grant_type" : "password",
"username": user,
"password" : pwd,
}
raw_response = requests.post(login_url, data = loginRequest)
if raw_response.status_code != 200:
print("Login to "+gitlab_external_url+" failed with status code:"+str(raw_response.status_code))
print(raw_response.text)
exit(1)
response = json.loads(raw_response.text)
# expecting {
# "access_token" : "",
# "refresh_token" : "",
# "token_type" : "",
# "created_at" : "",
# }
assert response.get("access_token") is not None
print("Login test successful")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.