content stringlengths 5 1.05M |
|---|
from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse
from .models import Post
def post_list(request):
posts = Post.objects.all()[:20]
data = {'results': list(posts.values('title', 'author__username', 'created_at'))}
return JsonResponse(data)
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
data = {'results': {
'title': post.title,
'author': post.author.username,
'created_at': post.created_at
}}
return JsonResponse(data) |
from typing import Iterable
from google.protobuf import field_mask_pb2
class FieldMask:
'''
A local wrapper for official protobuf FieldMask.
'''
def __init__(self, paths: Iterable[str]) -> None:
self.paths = set(paths)
@classmethod
def from_pb(cls, pb_obj: field_mask_pb2.FieldMask):
return cls(paths=list(pb_obj.paths))
def has(self, field: str) -> bool:
return field in self.paths
def discard(self, field: str) -> None:
self.paths.discard(field)
|
"""
Airline settings & info views.
"""
from aiohttp import web_exceptions
import simplejson as json
from sunhead.rest.views import JSONView
from aeroport.management.utils import get_airlines_list, get_airline
class BaseAirlineView(JSONView):
@property
def requested_airline(self):
return self.request.match_info.get("airline", None)
@property
def requested_origin(self):
return self.request.match_info.get("origin", None)
def json_response(self, context_data=None):
context_data.update(
{
"requested_airline": self.requested_airline,
"requested_origin": self.requested_origin,
}
)
return super().json_response(context_data)
class AirlinesListView(BaseAirlineView):
async def get(self):
airlines_data = [
{
"name": airline.name,
"module_path": airline.module_path
} for airline in get_airlines_list()
]
ctx = {
"airlines": airlines_data,
}
return self.json_response(ctx)
class AirlineView(BaseAirlineView):
async def get(self):
airline = get_airline(self.requested_airline)
origins_data = [
{
"name": origin.name,
"module_path": origin.module_path
} for origin in airline.get_origin_list()
]
ctx = {
"airline": {
"name": airline.name,
"title": airline.title,
},
"origins": origins_data,
"enabled": await airline.is_enabled(),
"schedule": await airline.get_schedule(),
"targets": {},
}
return self.json_response(ctx)
async def put(self):
airline = get_airline(self.requested_airline)
data = await self.request.post()
schedule_json = data.get("schedule", None)
if schedule_json is not None:
schedule = json.loads(schedule_json)
await airline.set_schedule(schedule)
enabled = data.get("enabled", None)
if enabled is not None:
value = str(enabled).lower() in {"true", "1"}
await airline.set_is_enabled(value)
raise web_exceptions.HTTPNoContent
class OriginView(BaseAirlineView):
async def get(self):
airline = get_airline(self.requested_airline)
origin = airline.get_origin(self.requested_origin)
ctx = {
"airline": {
"name": airline.name,
"title": airline.title,
},
"origin": {
"name": self.requested_origin, # Fixme: Add property name in Origin object
},
"schedule": await airline.get_schedule(self.requested_origin),
}
return self.json_response(ctx)
|
import copy
from numpy import array
from utils import NgramScore, beam_search, load_chain, sample_next
def z_read(line: str, chain: dict, num_solutions: int) -> None:
columns = line.lower().split()
loader = NgramScore("../data/statistic/english_1grams.txt")
# list for probabilities as monograms of characters in first column
first_layer = [loader.score(i) for i in columns[0]]
# list for probabilities as bigrams of characters in first and second columns
second_layer = [copy.deepcopy([first_layer]) for _ in range(len(columns[0]))] # extend of first layer
loader = NgramScore("../data/statistic/english_2grams.txt")
for i, j in enumerate(second_layer, start=0):
second_layer[i][0][i] *= 0.1 # activation first layer
j.append(copy.deepcopy([loader.score(columns[0][i] + sym) for sym in columns[1]]))
third_layer = list() # list for probabilities as trigrams of characters in first, second and third columns
for i in range(len(columns[0])):
for _ in range(len(columns[1])):
third_layer.append(copy.deepcopy(second_layer[i])) # extend of second layer
loader = NgramScore("../data/statistic/english_3grams.txt")
for i, j in enumerate(third_layer, start=0):
k = i % len(columns[1])
t = 0
third_layer[i][1][k] *= 0.1 # activation second layer
probabilities = list()
for l in columns[2]:
probabilities.append(loader.score(columns[0][t] + columns[1][k] + l))
j.append(copy.deepcopy(probabilities))
if k % len(columns[0]) == 0 and k != 0:
t += 1
best_decision = list()
for table in third_layer:
table = array(table, dtype=object)
best_decision.append(beam_search(table, 2))
best_decision = array(best_decision, dtype=object)
best_decision = best_decision.reshape(-1, 2)
best_decision = sorted(best_decision, key=lambda list_: list_[1])
best_decision = best_decision[:num_solutions]
sent_starts = list()
for i in range(len(best_decision)):
pos = best_decision[i][0]
sent = str()
for i, j in enumerate(pos, start=0):
sent += columns[i][j]
sent_starts.append(sent)
gen_sent = list()
for i in sent_starts:
for line in columns[len(i):]:
probabilities = sample_next(i, chain, 3)
prob = {}
for sym in line:
if sym in probabilities.keys():
prob[sym] = probabilities[sym]
new_sym = max(prob, key=prob.get)
i += new_sym
gen_sent.append(i)
print(f"{num_solutions} best solutions:")
for i in gen_sent:
print(i)
def main() -> None:
# str_ = "hyjs eacfn fdl glb uto".upper()
str_true = "thehighwayroundsacurveandtransitionstondstreetrunningeastwardthroughmorefarmlandasthetrunklineappro" \
"achesneway"
str_ = "twer hvb rye hfj idf g fdhh ghw kja ghjy r rtyo u nfgh dhjk s a cghfhf u r vfgh e a fn d t r afgh n " \
"s i tfgh i ghjo n srt t o nghj d smn t rkl e edfg t fdr u fdn n iret hn rtyg e adfsg s t wdfg a r vbd " \
"t hvbcnv r o u iopg xcvh zxm sdo qwr dfge frety a dfgr m l kla ern drt auio jks vbnt bvnh e fght r u " \
"dsfn ikk bnl gfi kbn fe ea hgp dsfp feir bnco ajkl etc dfh ehjd s dgn e dfw dfka yghp"
data = [str_]
with open("ztext.txt", mode="r") as file:
data = file.readlines()
chain = load_chain("../data/statistic/model_3.pkl")
num_solutions = 5
for line in data:
z_read(line, chain, num_solutions)
if __name__ == '__main__':
main()
|
import frappe
@frappe.whitelist()
def update_serial_no():
serial_no_list = [
'0201200V000DH0200211',
'0201200V000DH0200212',
'0201200V000DH0200213',
'0201200V000DH0200214',
'0201200V000DH0200215',
'0201200V000DH0200216',
'0201200V000DH0200217',
'0201200V000DH0200218',
'0201200V000DH0200219',
'0201200V000DI1501543',
'0201200V000DI1501544',
'0201200V000DI1501545',
'0201200V000DI1501546',
'0201200V000DI1501547',
'0201200V000DI1501548',
'0201200V000DI1501549',
'0201200V000DI1501550',
'0201200V000DI1501551',
'0201200V000DI1501552',
'0201200V000DI1501553',
'0201200V000DI1501554',
'0201200V000DI1501555',
'0201200V000DI1501556'
]
for row in serial_no_list:
frappe.db.sql("""UPDATE `tabSerial No`
SET purchase_document_type='Stock Entry',
purchase_document_no='MAT-STE-2022-00140',
purchase_date='2022-10-01',
purchase_time='16:13:06',
purchase_rate='227.55',
supplier='',
supplier_name=''
WHERE name=%s""", str(row))
def update_serial_no_document_wise(doctype, name, item_code):
document_details = frappe.get_doc(doctype, name)
for row in document_details.items:
if row.item_code == item_code:
for s_row in row.serial_no.split("\n"):
print(s_row)
if frappe.db.exists("Serial No", s_row):
s_doc = frappe.get_doc("Serial No", s_row)
if not s_doc.purchase_document_no:
print('updated Serial No')
print(s_row)
if doctype == "Stock Entry":
row.rate = row.basic_rate
frappe.db.sql("""UPDATE `tabSerial No`
SET purchase_document_type=%s,
purchase_document_no=%s,
purchase_date=%s,
purchase_time=%s,
purchase_rate=%s
WHERE name=%s""", (
document_details.doctype, document_details.name, document_details.posting_date, document_details.posting_time, row.rate, s_row))
else:
print(f"Serial No not exists {s_row}")
@frappe.whitelist()
def update_sn():
update_serial_no()
print("starting first")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00043", "AW-CSD381")
print("ending first")
print("Starting second")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00044", "AW-CFP2166-04C")
print("ending second")
print("Starting third")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00004", "AW-CSS2166-2")
print("ending third")
print("Starting fourth")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-CTD382")
print("ending fourth")
print("Starting fifth")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00153", "AW-D135C")
print("ending fifth")
print("Starting six")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00019", "AW-CSD311")
print("ending six")
print("Starting 7")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00022", "AW-CMC2166-06")
print("ending 7")
print("Starting 8")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00155", "AW-D135C")
print("ending 8")
print("Starting 9")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-CTD321")
print("ending 9")
print("Starting 10")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-CTD382")
print("ending 10")
print("Starting 10")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00155", "AW-D135C")
print("ending 10")
print("Starting 11")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-D135C")
print("ending 11")
print("Starting 12")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-D101")
print("ending 12")
print("Starting 13")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00044", "AW-CFP2166-02C")
print("ending 13")
print("Starting 14")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-SSD606D")
print("ending 14")
print("Starting 15")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00022", "AW-CMC2166-06")
print("ending 15")
print("Starting 15")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00022", "AW-CSD381")
print("ending 15")
print("Starting 16")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00022", "AW-CSD311")
print("ending 16")
print("Starting 17")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00022", "AW-CSD311")
print("ending 17")
print("Starting 18")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00043", "AW-CRP2166-GSM")
print("ending 18")
print("Starting 19")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-D106")
print("ending 19")
print("Starting 20")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-D105")
print("ending 20")
print("Starting 21")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-CTD382")
print("ending 21")
print("Starting 22")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00043", "AW-D111")
print("ending 22")
print("Starting 23")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-CBL2166-08")
print("ending 23")
print("Starting 24")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-D102")
print("ending 24")
print("Starting 25")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00022", "AW-CBL2166-06")
print("ending 25")
print("Starting 26")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00013", "AW-CSD311")
print("ending 26")
print("Starting 27")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00153", "AW-D135C")
print("ending 27")
print("Starting 28")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00043", "AW-CSD381")
print("ending 28")
print("Starting 29")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-CTD321")
print("ending 29")
print("Starting 30")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00140", "AW-CTD321")
print("ending 30")
print("Starting 31")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00153", "AW-D135C")
print("ending 31")
print("Starting 32")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00052-1", "AW-CSS2166-2")
print("ending 32")
print("Starting 33")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00050", "AW-CBL2166-06")
print("ending 33")
print("Starting 34")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00052-1", "AW-D117")
print("ending 34")
print("Starting 35")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00038", "AW-D110")
print("ending 35")
print("Starting 36")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00050", "AW-CSD311")
print("ending 36")
print("Starting 37")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00050", "AW-CBL2166-06")
print("ending 37")
print("Starting 38")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00052-1", "AW-CSD381")
print("ending 38")
print("Starting 39")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00052-1", "AW-CSS2166-2")
print("ending 39")
print("Starting 40")
update_serial_no_document_wise(
"Stock Entry", "MAT-STE-2022-00044", "AW-D117")
print("ending 40")
print("Starting 41")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00052-1", "AW-CSD311")
print("ending 41")
print("Starting 42")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00052-1", "AW-CFP2166-04C")
print("ending 42")
print("Starting 43")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00052-1", "AW-CSD311")
print("ending 43")
print("Starting 44")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00052-1", "AW-CFP2166-06C")
print("ending 44")
print("Starting 45")
update_serial_no_document_wise(
"Purchase Receipt", "MAT-PRE-2022-00052-1", "AW-CFP2166-06C")
print("ending 45")
@frappe.whitelist()
def update_sn_all():
sn_list = frappe.get_all("Serial No",filters=[["purchase_document_no","is","not set"]],fields=["name"])
for row in sn_list:
sl_entry = frappe.get_all("Stock Ledger Entry",filters=[["serial_no","like",f"%{row.name}%"],["voucher_type","in",["Purchase Receipt","Stock Entry"]]],fields=["*"],order_by="posting_date asc")
if len(sl_entry) >= 1:
print(sl_entry[0].name)
frappe.db.sql("""UPDATE `tabSerial No`
SET purchase_document_type=%s,
purchase_document_no=%s,
purchase_date=%s,
purchase_time=%s,
purchase_rate=%s
WHERE name=%s""", (sl_entry[0].voucher_type, sl_entry[0].voucher_no, sl_entry[0].posting_date, sl_entry[0].posting_time, sl_entry[0].incoming_rate, row.name)) |
#
# author: Jungtaek Kim (jtkim@postech.ac.kr)
# last updated: February 8, 2021
#
import numpy as np
from bayeso_benchmarks.benchmark_base import Function
def fun_target(bx, dim_bx):
assert len(bx.shape) == 1
assert bx.shape[0] == dim_bx
y = (1.5 - bx[0] + bx[0] * bx[1])**2 + (2.25 - bx[0] + bx[0] * bx[1]**2)**2 + (2.625 - bx[0] + bx[0] * bx[1]**3)**2
return y
class Beale(Function):
def __init__(self, seed=None):
assert isinstance(seed, (type(None), int))
dim_bx = 2
bounds = np.array([
[-4.5, 4.5],
[-4.5, 4.5],
])
global_minimizers = np.array([
[3.0, 0.5],
])
global_minimum = 0.0
function = lambda bx: fun_target(bx, dim_bx)
Function.__init__(self, dim_bx, bounds, global_minimizers, global_minimum, function, seed=seed)
|
# Generated by Django 2.2.8 on 2020-02-08 14:46
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20200202_1625'),
('accounts', '0003_buyer_cart'),
('purchase', '0002_auto_20200204_2221'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='products',
field=models.ManyToManyField(blank=True, null=True, to='products.Product', verbose_name='محصول'),
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_price', models.IntegerField(verbose_name='مبلغ')),
('payment_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='زمان پرداخت')),
('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Buyer', verbose_name='خریدار')),
('order', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='products.Product', verbose_name='محصول')),
],
options={
'verbose_name': 'پرداخت',
'verbose_name_plural': 'پرداخت\u200cها',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=2000, verbose_name='آدرس')),
('phone_number', models.CharField(max_length=11, verbose_name='شماره تلفن')),
('delivery_date', models.DateField(verbose_name='زمان تحویل')),
('status', models.IntegerField(choices=[(1, 'در انتظار'), (2, 'پرداخت شده')], default=1, verbose_name='وضعیت')),
('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Buyer', verbose_name='خریدار')),
('products', models.ManyToManyField(to='products.Product', verbose_name='محصول')),
],
options={
'verbose_name': 'سفارش',
'verbose_name_plural': 'سفارش\u200cها',
},
),
]
|
animals_orig = [['dogs', 'puppies'], ['cats', 'kittens']]
# Shallow Copy using slice operator -- can get confusing with mutation
animals_copy = animals_orig[:]
# Deep Copy using accumulation and nested iteration
animals_deep_copy = []
for lst in animals_orig:
copied_inner_list = []
animals_deep_copy.append(copied_inner_list)
for animal in lst:
copied_inner_list.append(animal)
print('---- prints same items as orig ----')
print('###shallow###')
print(animals_copy)
print('###deep###')
print(animals_deep_copy)
print('---- they are not the same object ----')
print('###shallow###')
print(animals_copy is animals_orig)
print('###deep###')
print(animals_deep_copy is animals_orig)
print('---- they do have the same values ----')
print('###shallow###')
print(animals_copy == animals_orig)
print('###deep###')
print(animals_deep_copy == animals_orig)
# mutate orignal
animals_orig[0].append(['canines'])
print('---- print mutated orig ----')
print(animals_orig)
print('---- print mutated copy due to shallow copy ---')
print(animals_copy)
print('---- unaltered deep copy ----')
print(animals_deep_copy) |
#!/bin/env python3
# -*- coding: utf-8 -*
'''
项目名称: JD-Script / jd_getFollowGift
Author: Curtin
功能:
Date: 2021/6/6 上午7:57
建议cron: 0 9 * * * python3 jd_getFollowGift.py
new Env('关注有礼');
'''
##################################
#cookie填写,注意:#ck 优先读取【JDCookies.txt】 文件内的ck 再到 ENV的 变量 JD_COOKIE='ck1&ck2' 最后才到脚本内 cookies=ck
cookies = ''
#TG 推送
# tg机器人token
TG_BOT_TOKEN = ''
#tg用户id
TG_USER_ID = ''
TG_PROXY_IP = ''
TG_PROXY_PORT = ''
TG_API_HOST = ''
#微信推送加
PUSH_PLUS_TOKEN = ''
#Bark 推送
BARK = ''
#企业微信推送
QYWX_AM = ''
#######################################
version = 'v1.0.0 Beta'
readmes = """
# JD 关注有礼
## 目录结构
JD-Script/ #主仓库
|-- getFollowGifts # 主目录
| |-- jd_getFollowGift.py # 主代码 (必要)
| |-- JDCookies.txt # 存放JD cookie,一行一个ck
| |-- Readme.md # 说明书
| `-- start.sh # shell脚本(非必要)
`-- README.md
### `【兼容环境】`
1.Python3.6+ 环境
2.兼容ios设备软件:Pythonista 3、Pyto(已测试正常跑,其他软件自行测试)
3.Windows exe
安装依赖模块 :
pip3 install requests
执行:
python3 jd_getFollowGift.py
## `【更新记录】`
2021.6.6:(v1.0.0 Beta)
* Test
###### [GitHub仓库 https://github.com/curtinlv/JD-Script](https://github.com/curtinlv/JD-Script)
###### [TG频道 https://t.me/TopStyle2021](https://t.me/TopStyle2021)
###### [TG群 https://t.me/topStyle996](https://t.me/topStyle996)
###### 关注公众号【TopStyle】

#
@Last Version: %s
@Last Time: 2021-06-06 07:57
@Author: Curtin
#### **仅以学习交流为主,请勿商业用途、禁止违反国家法律 ,转载请留个名字,谢谢!**
# End.
[回到顶部](#readme)
""" % version
################################ 【Main】################################
import time, os, sys, datetime
import requests
import re, json, base64
from urllib.parse import unquote, quote_plus
# 获取当前工作目录
pwd = os.path.dirname(os.path.abspath(__file__)) + os.sep
# 定义一些要用到参数
requests.packages.urllib3.disable_warnings()
scriptHeader = """
════════════════════════════════════════
║ ║
║ JD 关 注 有 礼 ║
║ ║
════════════════════════════════════════
@Version: {}""".format(version)
remarks = '\n\n\tTG交流 : https://t.me/topstyle996\n\n\tTG频道 : https://t.me/TopStyle2021\n\n\t公众号 : TopStyle\n\n\t\t\t--By Curtin\n'
######JD Cookie (多账号&分隔)
#######
notify_mode = []
message_info = ''''''
usergetGiftinfo = {}
class getJDCookie(object):
# 适配各种平台环境ck
def getckfile(self):
if os.path.exists(pwd + 'JDCookies.txt'):
return pwd + 'JDCookies.txt'
elif os.path.exists('/ql/config/env.sh'):
print("当前环境青龙面板新版")
return '/ql/config/env.sh'
elif os.path.exists('/ql/config/cookie.sh'):
print("当前环境青龙面板旧版")
return '/ql/config/env.sh'
elif os.path.exists('/jd/config/config.sh'):
print("当前环境V4")
return '/jd/config/config.sh'
elif os.path.exists(pwd + 'JDCookies.txt'):
return pwd + 'JDCookies.txt'
return pwd + 'JDCookies.txt'
# 获取cookie
def getCookie(self):
global cookies
ckfile = self.getckfile()
try:
if os.path.exists(ckfile):
with open(ckfile, "r", encoding="utf-8") as f:
cks = f.read()
f.close()
if 'pt_key=' in cks and 'pt_pin=' in cks:
r = re.compile(r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
cks = r.findall(cks)
if len(cks) > 0:
if 'JDCookies.txt' in ckfile:
print("当前获取使用 JDCookies.txt 的cookie")
cookies = ''
for i in cks:
cookies += i
return
else:
with open(pwd + 'JDCookies.txt', "w", encoding="utf-8") as f:
cks = "#多账号换行,以下示例:(通过正则获取此文件的ck,理论上可以自定义名字标记ck,也可以随意摆放ck)\n账号1【Curtinlv】cookie1;\n账号2【TopStyle】cookie2;"
f.write(cks)
f.close()
if "JD_COOKIE" in os.environ:
if len(os.environ["JD_COOKIE"]) > 10:
cookies = os.environ["JD_COOKIE"]
print("已获取并使用Env环境 Cookie")
except Exception as e:
print(f"【getCookie Error】{e}")
# 检测cookie格式是否正确
def getUserInfo(self, ck, pinName, userNum):
url = 'https://me-api.jd.com/user_new/info/GetJDUserInfoUnion?orgFlag=JD_PinGou_New&callSource=mainorder&channel=4&isHomewhite=0&sceneval=2&sceneval=2&callback=GetJDUserInfoUnion'
headers = {
'Cookie': ck,
'Accept': '*/*',
'Connection': 'close',
'Referer': 'https://home.m.jd.com/myJd/home.action',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'me-api.jd.com',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Mobile/15E148 Safari/604.1',
'Accept-Language': 'zh-cn'
}
try:
resp = requests.get(url=url, verify=False, headers=headers, timeout=60).text
r = re.compile(r'GetJDUserInfoUnion.*?\((.*?)\)')
result = r.findall(resp)
userInfo = json.loads(result[0])
nickname = userInfo['data']['userInfo']['baseInfo']['nickname']
return ck, nickname
except Exception:
context = f"账号{userNum}【{pinName}】Cookie 已失效!请重新获取。"
print(context)
return ck, False
def iscookie(self):
"""
:return: cookiesList,userNameList,pinNameList
"""
cookiesList = []
userNameList = []
pinNameList = []
if 'pt_key=' in cookies and 'pt_pin=' in cookies:
r = re.compile(r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
result = r.findall(cookies)
if len(result) >= 1:
print("您已配置{}个账号".format(len(result)))
u = 1
for i in result:
r = re.compile(r"pt_pin=(.*?);")
pinName = r.findall(i)
pinName = unquote(pinName[0])
# 获取账号名
ck, nickname = self.getUserInfo(i, pinName, u)
if nickname != False:
cookiesList.append(ck)
userNameList.append(nickname)
pinNameList.append(pinName)
else:
u += 1
continue
u += 1
if len(cookiesList) > 0 and len(userNameList) > 0:
return cookiesList, userNameList, pinNameList
else:
print("没有可用Cookie,已退出")
exit(3)
else:
print("cookie 格式错误!...本次操作已退出")
exit(4)
else:
print("cookie 格式错误!...本次操作已退出")
exit(4)
getCk = getJDCookie()
getCk.getCookie()
# 获取TG_BOT_TOKEN
if "TG_BOT_TOKEN" in os.environ:
if len(os.environ["TG_BOT_TOKEN"]) > 1:
TG_BOT_TOKEN = os.environ["TG_BOT_TOKEN"]
print("已获取并使用Env环境 TG_BOT_TOKEN")
# 获取TG_USER_ID
if "TG_USER_ID" in os.environ:
if len(os.environ["TG_USER_ID"]) > 1:
TG_USER_ID = os.environ["TG_USER_ID"]
print("已获取并使用Env环境 TG_USER_ID")
# 获取代理ip
if "TG_PROXY_IP" in os.environ:
if len(os.environ["TG_PROXY_IP"]) > 1:
TG_PROXY_IP = os.environ["TG_PROXY_IP"]
print("已获取并使用Env环境 TG_PROXY_IP")
# 获取TG 代理端口
if "TG_PROXY_PORT" in os.environ:
if len(os.environ["TG_PROXY_PORT"]) > 1:
TG_PROXY_PORT = os.environ["TG_PROXY_PORT"]
print("已获取并使用Env环境 TG_PROXY_PORT")
elif not TG_PROXY_PORT:
TG_PROXY_PORT = ''
# 获取TG TG_API_HOST
if "TG_API_HOST" in os.environ:
if len(os.environ["TG_API_HOST"]) > 1:
TG_API_HOST = os.environ["TG_API_HOST"]
print("已获取并使用Env环境 TG_API_HOST")
# 获取pushplus+ PUSH_PLUS_TOKEN
if "PUSH_PLUS_TOKEN" in os.environ:
if len(os.environ["PUSH_PLUS_TOKEN"]) > 1:
PUSH_PLUS_TOKEN = os.environ["PUSH_PLUS_TOKEN"]
print("已获取并使用Env环境 PUSH_PLUS_TOKEN")
# 获取企业微信应用推送 QYWX_AM
if "QYWX_AM" in os.environ:
if len(os.environ["QYWX_AM"]) > 1:
QYWX_AM = os.environ["QYWX_AM"]
print("已获取并使用Env环境 QYWX_AM")
if "BARK" in os.environ:
if len(os.environ["BARK"]) > 1:
BARK = os.environ["BARK"]
print("已获取并使用Env环境 BARK")
def message(str_msg):
global message_info
print(str_msg)
message_info = "{}\n{}".format(message_info,str_msg)
sys.stdout.flush()
def exitCodeFun(code):
try:
# exitCode = input()
if sys.platform == 'win32' or sys.platform == 'cygwin':
print("进程睡眠10分钟后自动退出。")
time.sleep(600)
exit(code)
except:
time.sleep(3)
exit(code)
def nowtime():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#获取通知,
if PUSH_PLUS_TOKEN:
notify_mode.append('pushplus')
if TG_BOT_TOKEN and TG_USER_ID:
notify_mode.append('telegram_bot')
if BARK:
notify_mode.append('bark')
if QYWX_AM:
notify_mode.append('wecom_app')
#tg通知
def telegram_bot(title, content):
try:
print("\n")
bot_token = TG_BOT_TOKEN
user_id = TG_USER_ID
if not bot_token or not user_id:
print("tg服务的bot_token或者user_id未设置!!\n取消推送")
return
print("tg服务启动")
if TG_API_HOST:
url = f"{TG_API_HOST}/bot{TG_BOT_TOKEN}/sendMessage"
else:
url = f"https://api.telegram.org/bot{TG_BOT_TOKEN}/sendMessage"
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
payload = {'chat_id': str(TG_USER_ID), 'text': f'{title}\n\n{content}', 'disable_web_page_preview': 'true'}
proxies = None
if TG_PROXY_IP and TG_PROXY_PORT:
proxyStr = "http://{}:{}".format(TG_PROXY_IP, TG_PROXY_PORT)
proxies = {"http": proxyStr, "https": proxyStr}
try:
response = requests.post(url=url, headers=headers, params=payload, proxies=proxies).json()
except:
print('推送失败!')
if response['ok']:
print('推送成功!')
else:
print('推送失败!')
except Exception as e:
print(e)
# 企业微信 APP 推送
def wecom_app(title, content):
try:
if not QYWX_AM:
print("QYWX_AM 并未设置!!\n取消推送")
return
QYWX_AM_AY = re.split(',',QYWX_AM)
if 4 < len(QYWX_AM_AY) > 5:
print("QYWX_AM 设置错误!!\n取消推送")
return
corpid=QYWX_AM_AY[0]
corpsecret=QYWX_AM_AY[1]
touser=QYWX_AM_AY[2]
agentid=QYWX_AM_AY[3]
try:
media_id=QYWX_AM_AY[4]
except :
media_id=''
wx=WeCom(corpid, corpsecret, agentid)
# 如果没有配置 media_id 默认就以 text 方式发送
if not media_id:
message=title + '\n\n' + content
response=wx.send_text(message, touser)
else:
response=wx.send_mpnews(title, content, media_id, touser)
if response == 'ok':
print('推送成功!')
else:
print('推送失败!错误信息如下:\n',response)
except Exception as e:
print(e)
class WeCom:
def __init__(self, corpid, corpsecret, agentid):
self.CORPID = corpid
self.CORPSECRET = corpsecret
self.AGENTID = agentid
def get_access_token(self):
url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken'
values = {'corpid': self.CORPID,
'corpsecret': self.CORPSECRET,
}
req = requests.post(url, params=values)
data = json.loads(req.text)
return data["access_token"]
def send_text(self, message, touser="@all"):
send_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=' + self.get_access_token()
send_values = {
"touser": touser,
"msgtype": "text",
"agentid": self.AGENTID,
"text": {
"content": message
},
"safe": "0"
}
send_msges=(bytes(json.dumps(send_values), 'utf-8'))
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
def send_mpnews(self, title, message, media_id, touser="@all"):
send_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=' + self.get_access_token()
send_values = {
"touser": touser,
"msgtype": "mpnews",
"agentid": self.AGENTID,
"mpnews": {
"articles":[
{
"title": title,
"thumb_media_id": media_id,
"author": "Author",
"content_source_url": "",
"content": message.replace('\n','<br/>'),
"digest": message
}
]
}
}
send_msges=(bytes(json.dumps(send_values), 'utf-8'))
respone = requests.post(send_url, send_msges)
respone = respone.json()
return respone["errmsg"]
#push推送
def pushplus_bot(title, content):
try:
print("\n")
if not PUSH_PLUS_TOKEN:
print("PUSHPLUS服务的token未设置!!\n取消推送")
return
print("PUSHPLUS服务启动")
url = 'http://www.pushplus.plus/send'
data = {
"token": PUSH_PLUS_TOKEN,
"title": title,
"content": content
}
body = json.dumps(data).encode(encoding='utf-8')
headers = {'Content-Type':'application/json'}
response = requests.post(url=url, data=body, headers=headers).json()
if response['code'] == 200:
print('推送成功!')
else:
print('推送失败!')
except Exception as e:
print(e)
# BARK
def bark_push(title, content):
print("\n")
if not BARK:
print("bark服务的bark_token未设置!!\n取消推送")
return
print("bark服务启动")
try:
response = requests.get('''https://api.day.app/{0}/{1}/{2}'''.format(BARK,title,quote_plus(content))).json()
if response['code'] == 200:
print('推送成功!')
else:
print('推送失败!')
except Exception as e:
print(e)
print('Bark推送失败!')
def send(title, content):
"""
使用 bark, telegram bot, dingding bot, serverJ 发送手机推送
:param title:
:param content:
:return:
"""
content = content + "\n\n" + footer
for i in notify_mode:
if i == 'telegram_bot':
if TG_BOT_TOKEN and TG_USER_ID:
telegram_bot(title=title, content=content)
else:
print('未启用 telegram机器人')
continue
elif i == 'pushplus':
if PUSH_PLUS_TOKEN:
pushplus_bot(title=title, content=content)
else:
print('未启用 PUSHPLUS机器人')
continue
elif i == 'bark':
if BARK:
bark_push(title=title, content=content)
else:
print('未启用Bark APP应用消息推送')
continue
elif i == 'wecom_app':
if QYWX_AM:
wecom_app(title=title, content=content)
else:
print('未启用企业微信应用消息推送')
continue
else:
print('此类推送方式不存在')
# 检查是否有更新版本
def gettext(url):
try:
resp = requests.get(url, timeout=60).text
if '该内容无法显示' in resp or '违规' in resp:
return gettext(url)
return resp
except Exception as e:
print(e)
def isUpdate():
global footer,readme,uPversion,scriptName
url = base64.decodebytes(
b"aHR0cHM6Ly9naXRlZS5jb20vY3VydGlubHYvUHVibGljL3Jhdy9tYXN0ZXIvRm9sbG93R2lmdHMvdXBkYXRlLmpzb24=")
try:
result = gettext(url)
result = json.loads(result)
scriptName = result['name']
isEnable = result['isEnable']
uPversion = result['version']
info = result['info']
readme = result['readme']
pError = result['m']
footer = result['footer']
getWait = result['s']
if isEnable > 50 and isEnable < 150:
if version != uPversion:
print(f"\n当前最新版本:【{uPversion}】\n\n{info}\n")
message(f"{readme}")
exitCodeFun(888)
else:
message(f"{readme}")
time.sleep(getWait)
else:
print(pError)
exitCodeFun(888)
except:
message("请检查您的环境/版本是否正常!")
exitCodeFun(888)
def outfile(filename, context):
with open(filename, "w+", encoding="utf-8") as f1:
f1.write(context)
f1.close()
def getRemoteShopid():
url = base64.decodebytes(
b"aHR0cHM6Ly9naXRlZS5jb20vY3VydGlubHYvUHVibGljL3Jhdy9tYXN0ZXIvRm9sbG93R2lmdHMvc2hvcGlkLnR4dA==")
try:
rShopid = gettext(url)
rShopid = rShopid.split("\n")
return rShopid
except:
print("无法从远程获取shopid")
exitCodeFun(999)
def createShopidList():
global shopidNum ,shopidList
shopidList = []
shopids = getRemoteShopid()
shopidNum = len(shopids) - 1
for i in range(shopidNum):
shopid = shopids[i]
shopid = eval(shopid)
shopidList.append(shopid)
def memoryFun(pinName,bean):
global usergetGiftinfo
try:
try:
usergetGiftinfo['{}'.format(pinName)]
usergetGiftinfo['{}'.format(pinName)] += bean
except:
usergetGiftinfo['{}'.format(pinName)] = bean
except Exception as e:
print(e)
def buildBody(data):
shopid = data['shopid']
venderId = data['venderId']
activityId = data['activityId']
signbody = data['signbody']
body = 'body={"follow":0,"shopId":"' + shopid + '","activityId":"' + activityId + '","sourceRpc":"shop_app_home_window","venderId":"'+ venderId + '"}&build=167863&client=apple&clientVersion=10.2.2&d_brand=apple&d_model=iPhone8,1&ef=1&eid=&ep={"ciphertype":5,"cipher":{"screen":"DzUmAtOzCzG=","area":"CJvpCJYmCV8zDtCzXzYzCtGz","wifiBssid":"","osVersion":"CJCkDm==","uuid":"aQf1ZRdxb2r4ovZ1EJZhcxYlVNZSZz09","adid":"","openudid":"Y2O2ZWS5CWO4ENrsZJG4EQYnEJHsEWG5CtO2Y2Y3CJPuZNPsCtSnYG=="},"ts":1636156765,"hdid":"","version":"","appname":"","ridx":-1}&' + signbody
return body
def drawShopGift(cookie, data):
try:
url = 'https://api.m.jd.com/client.action?functionId=drawShopGift'
body = data
headers = {
'J-E-H' : '%7B%22ciphertype%22:5,%22cipher%22:%7B%22User-Agent%22:%22IuG0aVLeb25vBzO2Dzq2CyUyCMrfUQrlbwU7TJSmaU9JTJSmCJCkDzivCtLJY2PiZI8yBtKmAG==%22%7D,%22ts%22:1636156765,%22hdid%22:%22JM9F1ywUPwflvMIpYPok0tt5k9kW4ArJEU3lfLhxBqw=%22,%22version%22:%221.0.3%22,%22appname%22:%22com.360buy.jdmobile%22,%22ridx%22:-1%7D',
'Accept-Encoding': 'gzip, deflate, br',
'Cookie': cookie,
'Connection': 'close',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': '*/*',
'Host': 'api.m.jd.com',
'User-Agent': 'JD4iPhone/167685 (iPhone; iOS 14.3; Scale/3.00)',
'Referer': '',
'J-E-C' : '%7B%22ciphertype%22:5,%22cipher%22:%7B%22pin%22:%22TUU5TJuyTJvQTUU3TUOnTJu1TUU1TUSmTUSnTUU2TJu4TUPQTUU0TUS4TJrOTUU1TUSmTJq2TUU1TUSmTUSn%22%7D,%22ts%22:1636157606,%22hdid%22:%22JM9F1ywUPwflvMIpYPok0tt5k9kW4ArJEU3lfLhxBqw=%22,%22version%22:%221.0.3%22,%22appname%22:%22com.360buy.jdmobile%22,%22ridx%22:-1%7D',
'Accept-Language': 'zh-Hans-CN;q=1'
}
response = requests.post(url, headers=headers, verify=False, data=body, timeout=60)
if 'isSuccess' in response.text:
return response.json()
else:
return 9
except Exception as e:
print(e)
return 9
def getGiftresult(result, nickname, pinName, uNum):
try:
if result['isSuccess']:
if result['result']:
followDesc = result['result']['followDesc']
giftDesc = result['result']['giftDesc']
print(f"\t└账号{uNum}【{nickname}】{followDesc}>{giftDesc}")
if result['result']['giftCode'] == '200':
try:
alreadyReceivedGifts = result['result']['alreadyReceivedGifts']
for g in alreadyReceivedGifts:
if g['prizeType'] == 4:
bean = g['redWord']
memoryFun(pinName, int(bean))
print(f"\t\t└获得{g['rearWord']}:{g['redWord']}")
except:
pass
except Exception as e:
print(f"getGiftresult Error {e}")
def start():
print(scriptHeader)
isUpdate()
outfile("Readme.md", readmes)
cookiesList, userNameList, pinNameList = getCk.iscookie()
userNum = len(cookiesList)
message(f"有效账号{userNum}个")
message(f"开始:{scriptName}")
createShopidList()
message(f"获取到店铺:{shopidNum}")
# print(shopidList)
starttime = time.perf_counter() # 记录时间开始
for i in shopidList:
body = buildBody(i)
print(f"关注店铺【{i['shopid']}】")
uNum = 1
for ck, nickname, pinName in zip(cookiesList, userNameList, pinNameList):
result = drawShopGift(ck, body)
if result != 9:
getGiftresult(result, nickname, pinName, uNum)
else:
uNum += 1
break
uNum += 1
endtime = time.perf_counter() # 记录时间结束
message("\n###【本次统计 {}】###\n".format(nowtime()))
all_get_bean = 0
n = 1
for name, pinname in zip(userNameList, pinNameList):
try:
userCountBean = usergetGiftinfo['{}'.format(pinname)]
message(f"账号{n}:【{name}】\n\t└收获 {userCountBean} 京豆")
all_get_bean += userCountBean
except Exception as e:
message(f"账号{n}:【{name}】\n\t└收获 0 京豆")
n += 1
message(f"\n本次总累计获得:{all_get_bean} 京豆")
message("\n------- 总耗时 : %.03f 秒 seconds -------" % (endtime - starttime))
print("{0}\n{1}\n{2}".format("*" * 30, scriptHeader, remarks))
send(f"【{scriptName}】", message_info)
exitCodeFun(0)
if __name__ == '__main__':
start()
|
# coding: utf-8
import datetime
import numpy as np
from ..nonlinear import ConstantTurn
from ....types.state import State
def test_ctmodel(CT_model):
""" ConstantTurn Transition Model test """
state = State(np.array([[3.0], [1.0], [2.0], [1.0], [-0.05]]))
linear_noise_coeffs = np.array([0.1, 0.1])
turn_noise_coeff = 0.01
base(CT_model, ConstantTurn, state, linear_noise_coeffs, turn_noise_coeff)
def base(CT_model, model, state, linear_noise_coeffs, turn_noise_coeff):
""" Base test for n-dimensional ConstantAcceleration Transition Models """
# Create an ConstantTurn model object
model = model
model_obj = model(linear_noise_coeffs=linear_noise_coeffs,
turn_noise_coeff=turn_noise_coeff)
# State related variables
old_timestamp = datetime.datetime.now()
timediff = 1 # 1sec
new_timestamp = old_timestamp + datetime.timedelta(seconds=timediff)
time_interval = datetime.timedelta(seconds=timediff)
# Model-related components
F = CT_model.function(state, time_interval=time_interval)
Q = CT_model.covar(linear_noise_coeffs, turn_noise_coeff, time_interval)
# Ensure ```model_obj.covar(time_interval)``` returns Q
assert np.array_equal(Q, model_obj.covar(
timestamp=new_timestamp, time_interval=time_interval))
# Propagate a state vector through the mode (without noise)
new_state_vec_wo_noise = model_obj.function(
state,
timestamp=new_timestamp,
time_interval=time_interval)
assert np.array_equal(new_state_vec_wo_noise, F)
# Eliminated the pdf based tests since for nonlinear models these will no
# longer be Gaussian
# Propagate a state vector throughout the model
# (with internal noise)
new_state_vec_w_inoise = model_obj.function(
state,
noise=True,
timestamp=new_timestamp,
time_interval=time_interval)
assert not np.array_equal(new_state_vec_w_inoise, F)
# Propagate a state vector through the model
# (with external noise)
noise = model_obj.rvs(timestamp=new_timestamp, time_interval=time_interval)
new_state_vec_w_enoise = model_obj.function(
state,
timestamp=new_timestamp,
time_interval=time_interval,
noise=noise)
assert np.array_equal(new_state_vec_w_enoise, F + noise)
|
import pint
ureg = pint.UnitRegistry(auto_reduce_dimensions=True)
ureg.define("reference_frame = [_reference_frame]")
ureg.define("@alias grade = gradian")
ureg.define("@alias astronomical_unit = ua")
ureg.define("line = inch / 12")
ureg.define("millitorr = torr / 1000 = mTorr")
ureg.define("@alias torr = Torr")
|
import math
import numpy as np
from copy import deepcopy
from charcad.draw.coordinates import Coordinates
from charcad.draw.utils import DrawingChars
_EMPTY_GRID = np.array([[' ']])
class GraphicObject:
def __init__(self, x=0, y=0, transparent=True):
self.set_coordinates(x, y)
self.graph = None
self.set_transparency(transparent)
@property
def x(self):
return self.coord.x
@property
def y(self):
return self.coord.y
def set_coordinates(self, x, y=None):
if isinstance(x, Coordinates):
coord = x
elif isinstance(x, (list, tuple)):
coord = Coordinates(*x)
else:
coord = Coordinates(x, y)
self.coord = coord
def set_transparency(self, transparent):
self.transparent = transparent
def set_x(self, x):
self.coord.x = x
def set_y(self, y):
self.coord.y = y
def show(self, axes=False, frame=False, frame_formatter=None):
self.graph.print(axes=axes, frame=frame,
frame_formatter=frame_formatter)
class GraphicObjectArray:
def __init__(self):
self._objects = dict()
self._counter = 0
def __getitem__(self, key):
if isinstance(key, str):
self._objects[key]
else:
key_aux = self.keys
key_aux.sort()
if isinstance(key, slice):
item = [self._objects[k] for k in key_aux[key]]
else:
item = self._objects[key_aux[key]]
return item
def __len__(self):
return len(self._objects)
def __repr__(self):
z = zip(self.keys, self.values)
p = int(math.log10(len(self)) + 1)
return 'Graphic Object Array[\n %s\n]' % ',\n '\
.join([(str(i).zfill(p) + '\t' + k + ': %s') % v
for i, (k, v) in enumerate(z)])
@property
def keys(self):
return list(self._objects.keys())
@property
def values(self):
return list(self._objects.values())
def add(self, obj):
name = 'object_{:04d}'.format(self._counter)
self._check_duplicates(obj)
self._objects.update({name: obj})
self._counter += 1
def _check_duplicates(self, obj):
[delete_object(o) for o in self if obj == o]
def remove(self, key):
if isinstance(key, (list, tuple)):
key = list(key)
key.sort(reverse=True)
[self.remove(k) for k in key]
else:
if isinstance(key, str):
pass
elif isinstance(key, int):
key = self.keys[key]
else:
raise TypeError("key must be integer, string or iterable.")
del self._objects[key]
def update(self, item):
self._objects.update(item)
class Graph:
def __init__(self, grid=None, w=0, h=0):
if grid is None:
grid = np.full((h, w), ' ', dtype=object)
elif isinstance(grid, str):
grid = np.array([[grid]])
elif isinstance(grid, np.ndarray):
pass
else:
raise TypeError('grid cant be numeric.')
self.grid = grid
def __repr__(self):
return self.get_str()
def __len__(self):
return len(self.grid)
def __getitem__(self, key):
return Graph(self.grid[key])
def __setitem__(self, key, value):
# i utterly hate this
key = list(key)
for i, k in enumerate(key):
if not isinstance(k, slice):
key[i] = slice(k, k+1)
key = tuple(key)
self.grid[key] = value.grid
@property
def h(self):
return self.grid.shape[0]
@property
def w(self):
return self.grid.shape[1]
@property
def shape(self):
return (self.h, self.w)
def add_objects(self, graphicArray):
for obj in graphicArray:
if isinstance(obj, GraphicObjectArray):
self.add_objects(obj)
elif isinstance(obj, GraphicObject):
self.add_graph(obj)
else:
raise TypeError(
"obj must be GraphicObject or GraphicObjectArray.")
def add_graph(self, obj, x=0, y=0):
if isinstance(x, Coordinates):
coord_other = x
else:
coord_other = Coordinates(x, y)
coord = obj.coord + coord_other
xi = coord.x
xf = coord.x + obj.graph.w
yf = self.flipud(coord.y - 1)
yi = self.flipud(coord.y - 1 + obj.graph.h)
x_slice = slice(xi, xf)
y_slice = slice(yi, yf)
transparent = obj.transparent
if transparent:
new_subgraph = self[y_slice, x_slice].no_background_draw(obj.graph)
else:
new_subgraph = obj.graph
self[y_slice, x_slice] = new_subgraph
def copy(self):
grid = self.grid.copy()
return Graph(grid=grid)
def flipud(self, yi):
return self.h - yi - 1
def get_str(self):
return '\n'.join([''.join(row) for row in self.grid])
def inspect(self):
[print(item) for item in [row for row in self.grid]]
def isempty(self):
return self.grid == _EMPTY_GRID
def no_background_draw(self, other, rng=None):
gph_out = self.copy()
for i in range(self.h):
for j in range(self.w):
iother = other[i, j]
if not iother.isempty():
gph_out[i, j] = iother[0, 0]
return gph_out
def print(self, frame=False, axes=False, frame_formatter=None):
grph = deepcopy(self)
if frame:
grph = add_frame(grph, frame_formatter)
if axes:
grph = add_axes(grph, self.w, self.h)
print(grph)
def add_axes(grph, w, h):
# axis order of gratness
yg = int(math.log10(grph.h) + 1)
xg = int(math.log10(grph.w) + 1)
# check offset (caused by frame)
offset_h = int((grph.h - h) / 2)
offset_w = int((grph.w - w) / 2)
# y axis (with frame and axis compensation)
yaxis = (
[' ' * yg] * offset_h
+ [str(h - y - 1).zfill(yg) for y in range(h)]
+ [' ' * yg] * offset_h
)
# x axis (with frame and axis compensation)
xaxis = list()
for i in range(xg):
n = 10**i
xaxis.append(
[' ' * yg]
+ [' '] * offset_w
+ [str(int(x/n) % 10) if (x % n) == 0
else ' ' for x in range(w)]
+ [' '] * offset_w
)
# add to original grid
grph.grid = np.c_[yaxis, grph.grid]
grph.grid = np.r_[grph.grid, xaxis]
return grph
def add_frame(grph, formatter=None):
chrs = DrawingChars()
if formatter is not None:
[chrs.__dict__.update({k: formatter.format(ch)})
for k, ch in chrs.__dict__.items()]
li = [[chrs.ur] + grph.w * [chrs.lr] + [chrs.rd]]
lf = [[chrs.dr] + grph.w * [chrs.lr] + [chrs.ru]]
grph.grid = np.c_[[chrs.ud] * grph.h, grph.grid, [chrs.ud] * grph.h]
grph.grid = np.r_[li, grph.grid, lf]
return grph
def delete_object(obj):
del obj
|
#!/usr/bin/python3.5
# -*-coding: utf-8 -*
from collections import defaultdict
import concurrent.futures
from queue import Queue
import threading
import uuid
from LspAlgorithms.GeneticAlgorithms.LocalSearch.LocalSearchEngine import LocalSearchEngine
from LspAlgorithms.GeneticAlgorithms.PopulationEvaluator import PopulationEvaluator
from LspRuntimeMonitor import LspRuntimeMonitor
from ..PopInitialization.PopInitializer import PopInitializer
class GeneticAlgorithm:
"""
"""
def __init__(self):
"""
"""
self.popInitializer = PopInitializer()
# Creating a deamon thread to perform local search
self.daemonThreads = defaultdict(lambda: None)
self.dThreadPipelines = defaultdict(lambda: {"input": Queue(), "output": Queue()})
self.popEvaluator = PopulationEvaluator()
def daemonTask(self, mainThreadUUID):
"""
"""
dThreadPipelines = self.dThreadPipelines[mainThreadUUID]
while True:
if not dThreadPipelines["input"].empty():
chromosome = dThreadPipelines["input"].get()
result = (LocalSearchEngine().process(chromosome, "positive_mutation"))
dThreadPipelines["output"].put(result)
def process(self, population):
"""
"""
generationIndex = 0
#
dThreadPipelines = self.dThreadPipelines[population.lineageIdentifier]
self.daemonThreads[population.lineageIdentifier] = threading.Thread(target=self.daemonTask, args=(population.lineageIdentifier,), daemon=True)
# (self.daemonThreads[threadUUID]).start()
population.dThreadOutputPipeline = dThreadPipelines["output"]
while self.popEvaluator.evaluate(population, dThreadPipelines["input"], generationIndex) != "TERMINATE":
# if generationIndex == 1:
# break
population = population.evolve()
LspRuntimeMonitor.output("Population --> " + str(population))
generationIndex += 1
# (self.daemonThreads[threadUUID]).
def solve(self):
"""
"""
populations = self.popInitializer.process()
with concurrent.futures.ThreadPoolExecutor() as executor:
print(list(executor.map(self.process, populations)))
|
from rest_framework import permissions
class IsAutoRepairShopOrStaff(permissions.BasePermission):
message = "Auto Repair Shop restricted or Staff member"
def has_permission(self, request, view):
return request.user and (request.user.is_from_auto_repair_shop or request.user.is_staff)
class IsInspectorOrStaff(permissions.BasePermission):
message = "Inspector restricted or Staff member"
def has_permission(self, request, view):
return request.user and (request.user.is_inspector or request.user.is_staff)
class IsInsuranceOrStaff(permissions.BasePermission):
message = "Insurance restricted or Staff member"
def has_permission(self, request, view):
return request.user and (request.user.is_from_insurance or request.user.is_staff)
class IsStaff(permissions.BasePermission):
message = "Staff restricted"
def has_permission(self, request, view):
return request.user and request.user.is_staff
|
from rx import Observable
def subscribe(observer):
observer.on_next(1)
observer.on_next(1)
observer.on_next(2)
observer.on_next(3)
observer.on_next(2)
observer.on_completed()
print("\n== distinct ==")
values = Observable.create(subscribe)
subscription = values.distinct().subscribe(
on_next=print,
on_error=lambda e: print("Error: {}".format(e)),
on_completed=lambda: print("Completed")
)
def subscribe2(observer):
observer.on_next("First")
observer.on_next("Second")
observer.on_next("Third")
observer.on_next("Fourth")
observer.on_next("Fifth")
observer.on_completed()
print("")
values = Observable.create(subscribe2)
subscription = values.distinct(lambda v: v[0]).subscribe(
on_next=print,
on_error=lambda e: print("Error: {}".format(e)),
on_completed=lambda: print("Completed")
)
print("\n== distinctUntilChanged ==")
values = Observable.create(subscribe)
subscription = values.distinct_until_changed().subscribe(
on_next=print,
on_error=lambda e: print("Error: {}".format(e)),
on_completed=lambda: print("Completed")
)
print("")
values = Observable.create(subscribe2)
subscription = values.distinct_until_changed(lambda v: v[0]).subscribe(
on_next=print,
on_error=lambda e: print("Error: {}".format(e)),
on_completed=lambda: print("Completed")
)
|
import json
import sys
def defineKey(key):
if key == "buildings":
return 'buid'
if key == "floorplans":
return 'fuid'
if key == "pois":
return 'puid'
def compareObjs(couchObjects, mongoObjects, type):
keys = ["is_building_entrance","floor_number","pois_type","buid","image","coordinates_lon","coordinates_lat","floor_name","description","name","is_door","is_published","username_creator","puid","url"]
possibleEmpty = ["url", "username_creator", "description", "name"]
isSame = 1
emptyFields = 0
emptyUrl = 0
emptyUsernameCreator = 0
emptyDescription = 0
emptyName = 0
uniqueKey = defineKey(type)
couchCount = 0
mongoCount = 0
for jsCouch in couchObjects:
couchCount += 1
for jsMongo in mongoObjects:
mongoCount += 1
if couchCount != mongoCount:
isSame = 2
print("CouchDB total json objects: ", couchCount)
print("MongoDB total json objects: ", mongoCount)
for jsCouch in couchObjects:
for jsMongo in mongoObjects:
if jsCouch[uniqueKey] == jsMongo[uniqueKey]:
for key in keys:
if key in jsCouch:
if key in jsMongo:
if jsCouch[key] != jsMongo[key]:
print(jsCouch[uniqueKey], "differ at ", key)
print("CouchDB: ", jsCouch[key])
print("MongoDB: ", jsMongo[key])
isSame = 3
# return 0
else:
if key not in possibleEmpty:
print(key, "cant be - or \"\" or null")
# print(key, "is - or \"\" or null")
emptyFields += 1
if key == "name":
emptyName += 1
elif key == "description":
emptyDescription += 1
elif key == "username_creator":
emptyUsernameCreator +=1
elif key == "url":
emptyUrl += 1
print("There are", emptyFields, "- or \"\" or null fields")
print("description", emptyDescription)
print("url", emptyUrl)
print("name", emptyName)
print("username_creator", emptyUsernameCreator)
return isSame
def parseEndpoint(file):
try:
file = open(file, encoding="utf8")
except:
print("Path was not correct.")
exit()
jsonKey = "pois"
return json.loads(file.readline())[jsonKey]
# main
if len(sys.argv) - 1 != 1:
print("CompareJsons::Provide type of endpoint.")
exit()
couchObjects = parseEndpoint("couch.json")
mongoObjects = parseEndpoint("mongo.json")
# isSame: 1 = same, 2 = different # of objects, 3 = at least 1 object has different values
isSame = compareObjs(couchObjects, mongoObjects, sys.argv[1])
if isSame == 1:
print("Files are same.")
elif isSame == 2:
print("Different number of Jsons")
elif isSame == 3:
print("At least one CouchDB json object has different key-value from MongoDB")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-10-12 00:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bot', '0007_auto_20171012_0030'),
]
operations = [
migrations.AlterField(
model_name='vidurls',
name='launch',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vid_urls', to='bot.Launch'),
),
migrations.AlterField(
model_name='vidurls',
name='vid_url',
field=models.URLField(),
),
]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import time
from faker import Faker
from CustClass_full import Customer
#import transition_matrix
transition_matrix = pd.read_csv("data/transition_matrix.csv", index_col=0)
# In[54]:
# class Customer:
# ''' a single customer that moves through the supermarket in a MCMC simulation'''
# def __init__(self, name, state, c_id = 1, budget=np.random.randint(100,500, dtype=int)):
# self.c_id = c_id
# self.name = name
# self.state = state
# self.budget = budget
# self.transition_matrix = transition_matrix
# self.hist = [self.state]
# self.money = [self.budget]
# def __repr__(self):
# if self.hist[-1] == 'entrance' or self.hist[-1] == 'checkout':
# return f'Customer {self.name} in {self.state} with {self.budget} $'
# else:
# return f'Customer {self.name} in {self.state} with {self.budget} $ left, was in {self.hist[-2]}'
# def is_active(self):
# """
# Returns True if the customer has not reached the checkout
# for the second time yet, False otherwise.
# """
# if self.state == 'checkout':
# return f'Customer {self.name} has been churned and spent {cust1.money[0] - cust1.money[-1]}$!'
# else:
# return f'Customer {self.name} is still active.'
# def next_state(self):
# '''
# Propagates the customer to the next state.
# '''
# next_loc = np.random.choice(self.transition_matrix.columns.values, p=self.transition_matrix.loc[self.state])
# #p = Probs
# #hard coded the revenue
# if next_loc == 'fruit':
# self.budget -= 4
# elif next_loc == 'spices':
# self.budget -= 3
# elif next_loc == 'dairy':
# self.budget -= 5
# elif next_loc == 'drinks':
# self.budget -= 6
# #list for 'path-history'
# self.state = next_loc
# self.hist.append(self.state)
# #list for money spend
# self.budget = self.budget
# self.money.append(self.budget)
# In[76]:
class Supermarket:
"""manages multiple Customer instances that are currently in the market.
"""
def __init__(self, hour=7):
# a list of Customer objects
self.customers = []
self.minutes = 0
self.current_time = 0
self.data = pd.DataFrame()
self.timestamp = []
#self.time = time #open
self.hour = hour
def __repr__(self):
return f'Supermarket("{self.customers}", "{self.current_time}")'
def get_time(self):
"""current time in HH:MM format,
"""
#self.time = time.strftime("%H:%M:%S") #current time
#the supermarket opens at 7
hour = self.hour
minutes = self.minutes
timestamp = f'{str(hour)}:{str(minutes)}'
self.timestamp.append(timestamp)
def print_customers(self):
"""print all customers with the current time and id in CSV format.
"""
self.current_time = self.get_time()
return f'Supermarket("{self.customers}", "{self.current_time}")'
def next_minute(self):
"""propagates all customers to the next state.
"""
self.minutes += 1
self.move_customers()
self.add_new_customers()
self.remove_existing_customers()
return self.customers
def move_customers(self):
for cust in self.customers:
cust.next_state()
def add_new_customers(self):
"""randomly creates new customers.
"""
fake = Faker()
self.customers.append(Customer(fake.name(), 'entrance', np.random.randint(1,500))) #self.c_id,
def remove_existing_customers(self):
"""removes every customer that is not active any more.
"""
for cust in self.customers:
if cust.state == 'checkout':
self.customers.remove(cust) #removes the first matching element (which is passed as an argument) from the list
print(f'Customer {cust.name} has been churned!')
def write_all_cust(self):
for cust in self.customers:
self.data = pd.DataFrame.append(cust.name, cust.state, cust.budget, self.current_time)
# In[77]:
# Supermarket1=Supermarket()
# Supermarket1.next_minute()
# In[75]:
# In[23]:
# In[12]:
# In[ ]:
# for i in customers: #move all customers
# i.next_state()
# if i.state == 'checkout':
# print(f'Customer {i.name} has been churned!') #adding time?
# customers.remove(i)
# else:
# print(i)
# if len(customers) < 20: #new_customers
# x = 20 - len(customers)
# #new_cust = []
# new_cust = ([Customer(fake.name(), 'entrance', np.random.randint(1,500)) for i in range(x)])
# for c in new_cust:
# customers.append(c)
# In[20]:
#print(Supermarket)
# In[24]:
#rint_customers()
# In[ ]:
# def add_new_customers(self, stop, id_suffix, terrain_map, image, x, y):
# """randomly creates new customers.
# """
# for i in range(stop):
# cust = Customer(str(i) + "_" + str(id_suffix), "entrance", transition_matrix,
# terrain_map=terrain_map, image=image, x=x, y=y)
# self.customers.append(cust)
# self.id_suffix += 1
# In[5]:
# In[ ]:
# In[29]:
# import datetime
# # In[33]:
# datetitime(7, 26)
# # In[16]:
# datetime.datetime.now().time()
# # In[35]:
# import time
# # In[37]:
# time.time(7, 26)
# # In[18]:
# time.strftime("%a, %d %b %Y %H:%M:%S")
# # In[19]:
# time.strftime("%H:%M:%S")
|
######## map
# Transform each object of list
# i.e. multiple each object by 3 in [0,1,2,3]
x = range(5)
print(list(x))
y = map(lambda x: x*3,x)
def multiply_5(num):
return num*5
print(list(y))
y = map(multiply_5,x)
print(list(y))
######## filter
# Removed items from list based on condition
y = filter(lambda i: i%2==0, x)
print(list(y))
######## reduce
from functools import reduce
y = reduce(lambda a,b: a+b, x)
print(y)
####### Play around with OS module.
import os
import time
print(time.time())
print(os.getcwd())
print(os.listdir())
x = ["ABC","ABCD","PQR"]
x_lower = list(map(str.lower, x))
print(x_lower)
print([w for w in x if w.startswith('A')])
x = [2,4,6,8,10]
x_2 = list(map(lambda i: i/2, x))
print(x_2)
value = list(map(lambda x: str(x).startswith('p'), os.listdir()))
print(value)
print(list(filter(lambda x: str(x).find("cwd") > 0, dir(os))))
print([x for x in dir(os) if x.find("cwd") > 0])
######## del keyword
x= [1,2,3]
print(x)
del x[1]
print(x)
x = {"key1":"Value1","key2":"Value2"}
print(x)
del x['key1']
print(x)
######## in keyword
print("a" in ["a","b"])
print("a" in "abc")
x = {"a":1}
print("a" in x)
x = {"key":"a"}
print("a" in x.values())
|
import os
from collections import Counter
def main():
file = open('input.txt', 'r')
lines = [line.replace('->', ',').strip().replace(' ', '') for line in file.readlines()]
solutions(lines)
def solutions(lines):
points, diagonals = [], []
for line in lines:
x1, y1, x2, y2 = map(int, line.split(','))
if x1 == x2:
points += [(x1, y) for y in range(min(y1, y2), max(y1, y2) + 1)]
elif y1 == y2:
points += [(x, y1) for x in range(min(x1, x2), max(x1, x2) + 1)]
elif y1 < y2 and x1 < x2:
diagonals += [(x, y1 + i) for i, x in enumerate(range(x1, x2 + 1))]
elif y1 < y2 and x1 > x2:
diagonals += [(x, y2 - i) for i, x in enumerate(range(x2, x1 + 1))]
elif y1 > y2 and x1 < x2:
diagonals += [(x, y1 - i) for i, x in enumerate(range(x1, x2 + 1))]
elif y1 > y2 and x1 > x2:
diagonals += [(x, y2 + i) for i, x in enumerate(range(x2, x1 + 1))]
### part one
print(len([point for point, count in Counter(points).items() if count > 1]))
### part two
print(len([point for point, count in Counter(points + diagonals).items() if count > 1]))
if __name__ == "__main__":
main()
|
"""Pytest-compatible classes and functions testing the Few Acres
classes"""
import pprint
import re
import time
import pytest
from few_acres_of_snow.few_acres_classes import \
FewAcresOfSnowController, FewAcresOfSnowHistory
from few_acres_of_snow.tests.test_moves import moves9575653_fr
from site_yucata.classify_games import YucataDownloader
class TestFewAcresAnalyzer():
pass
class TestFewAcresHistory():
pass
def test_basic_report():
try:
with open('few_acres_of_snow/ph_9575653.js', 'r') as f:
full_html = f.read()
except:
with open('ph_9575653.js', 'r') as f:
full_html = f.read()
history = FewAcresOfSnowHistory(full_html)
print(history.basic_report())
def test_via_stdout(capsys):
# https://www.yucata.de/en/Game/FewAcresOfSnow/9575653
analyzer = FewAcresOfSnowController(moves_list=moves9575653_fr)
all_moves = analyzer.iterate_through_moves()
time.sleep(0.5) # let the debugs finish before printing out
for i in range(0, len(moves9575653_fr)):
print(i)
pprint.pprint(all_moves[i])
captured = capsys.readouterr()
patt = """84
\\('uk',
\\["develop: St. Mary's; St. Mary's; Boston",
'money from: New Haven',"""
assert re.search(patt, captured.out, re.MULTILINE)
def test3():
# Trader: Gaspé, Montreal, Tadoussac = 33, 30, 23
# s = '»Ì°²µ,½Ð'
# Successful ambush by Rangers on free Reg Inf in reserve:
s = '¶ãT˰'
analyzer = FewAcresOfSnowController(moves9575653_fr[:2])
pprint.pprint(analyzer.move_to_actions(s))
def test_priest():
code = '¸×TC±'
analyzer = FewAcresOfSnowController(['°Í³¶µ,½Ø', code])
pprint.pprint(analyzer.move_to_actions('°Í³¶µ,½Ø'))
pprint.pprint(analyzer.move_to_actions(code))
def test_download_games():
dl = YucataDownloader('FewAcresOfSnow')
rv = dl.request_top_players()
print(pprint.pformat(rv))
@pytest.mark.skip("Logged in interaction may not be possible without browser-driving.")
def test_get_my_id():
id = YucataDownloader.user_id_for('philip9999')
assert id == 5
def main():
# test1()
# test_priest()
test_via_stdout()
# test3()
# TESTS OF DOWNLOADING GAMES
# test_download_games()
# test_get_my_id()
if __name__ == "__main__":
main() |
#import logging
import os
import time
DEBUG = False
API_URL_PREFIX = "/anuvaad-etl/document-processor/gv-document-digitization"
HOST = '0.0.0.0'
PORT = 5001
BASE_DIR = 'upload'
#BASE_DIR = '/home/naresh/anuvaad/anuvaad-etl/anuvaad-extractor/document-processor/ocr/ocr-gv-server/'
download_folder = 'upload'
ENABLE_CORS = False
# kafka
# ENABLE_CORS = False
# # kafka
input_topic_default = 'anuvaad-dp-tools-ocr-google-vision-input-v15'
input_topic_identifier = 'KAFKA_ANUVAAD_DP_TOOLS_OCR_GOOGLE_VISION_INPUT_V15'
input_topic = os.environ.get(input_topic_identifier, input_topic_default)
output_topic_default = 'anuvaad-dp-tools-ocr-google-vision-output-v15'
output_topic_identifier = 'KAFKA_ANUVAAD_DP_TOOLS_OCR_GOOGLE_VISION_OUTPUT_V15'
output_topic = os.environ.get(output_topic_identifier, output_topic_default)
kf_local_server = 'localhost:9092'
kafka_ip_host = 'KAFKA_BOOTSTRAP_SERVER_HOST'
bootstrap_server = os.environ.get(kafka_ip_host, kf_local_server)
TASK_STAT = 'GOOGLE-VISION-OCR-15'
CONSUMER_GROUP_default = 'anuvaad-etl-gvocr-15-consumer-group'
CONSUMER_GROUP_identifier = 'ANUVAAD_ETL_GVOCR_CONSUMER_GROUP_V15'
CONSUMER_GROUP = os.environ.get(CONSUMER_GROUP_identifier,CONSUMER_GROUP_default)
download_folder = 'upload'
#
#
# logging.basicConfig(
# filename=os.getenv("SERVICE_LOG", "server.log"),
# level=logging.DEBUG,
# format="%(levelname)s: %(asctime)s \
# pid:%(process)s module:%(module)s %(message)s",
# datefmt="%d/%m/%y %H:%M:%S",
# )
#SAVE_URL = "https://auth.anuvaad.org/anuvaad/ocr-content-handler/v0/ocr/save-document"
SAVE_VAR = "OCR_CH_URL"
SAVE_DEFAULT = "http://gateway_anuvaad-ocr-content-handler:5001//anuvaad/ocr-content-handler/v0/ocr/save-document"
SAVE_URL = os.environ.get(SAVE_VAR,SAVE_DEFAULT)
#print(SAVE_URL)
#SAVE_URL = "http://172.30.0.232:5009//anuvaad/ocr-content-handler/v0/ocr/save-document"
SAVE_NO_PAGE = 1
IS_DYNAMIC =True
EXRACTION_RESOLUTION = 300 |
from django.http import JsonResponse
from dplhooks import settings
class AuthenticationMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
is_private = not request.path.startswith('/p')
if is_private and ('AUTHORIZATION' not in request.META or request.META['AUTHORIZATION'] != settings.API_BEARER):
return JsonResponse(data={'details': 'Need authorization.'}, status=401)
return self.get_response(request)
|
import argparse
import gzip
from rdkit import Chem
import tqdm
from rdkit import RDLogger
from rdkit.Chem.MolStandardize import rdMolStandardize
def main():
"""
takes sdf file of initial training molecules and
sdf file of training molecules (both optionally gzipped)
and returns only those initial training molecules
not contained in the training molecules file as sdf file.
"""
RDLogger.DisableLog("rdApp.*")
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="input filename, type: .sdf.gz or .sdf")
parser.add_argument("--filter", help="filter filename, type: .sdf or .sdf.gz")
parser.add_argument("--output", help="output filename, type: .sdf.gz or .sdf")
args = parser.parse_args()
input_zipped = False
print("inputfile:", args.input)
print("outputfile:", args.output)
ini_list = []
smi_list = []
# start with generating INCHI and SMILES for mols in the filter set
for i in args.filter.split(","):
# test if it's gzipped
with gzip.open(i, "r") as fh:
try:
fh.read(1)
input_zipped = True
except gzip.BadGzipFile:
input_zipped = False
if input_zipped:
with gzip.open(i, "r") as fh:
suppl = Chem.ForwardSDMolSupplier(fh, removeHs=True)
ini_list.extend(ini_filter(suppl))
with gzip.open(i, "r") as fh:
suppl = Chem.ForwardSDMolSupplier(fh, removeHs=True)
smi_list.extend(smi_filter(suppl))
else:
with open(i, "rb") as fh:
suppl = Chem.ForwardSDMolSupplier(fh, removeHs=True)
ini_list.extend(ini_filter(suppl))
with open(i, "rb") as fh:
suppl = Chem.ForwardSDMolSupplier(fh, removeHs=True)
smi_list.extend(smi_filter(suppl))
print(f"{len(ini_list)} inchi test molecules found")
print(f"{len(smi_list)} smi test molecules found")
# test if it's gzipped
with gzip.open(args.input, "r") as fh:
try:
fh.read(1)
input_zipped = True
except gzip.BadGzipFile:
input_zipped = False
if input_zipped:
with gzip.open(args.input, "r") as fh:
suppl = Chem.ForwardSDMolSupplier(fh, removeHs=True)
processing(suppl, args, ini_list, smi_list)
else:
with open(args.input, "rb") as fh:
suppl = Chem.ForwardSDMolSupplier(fh, removeHs=True)
processing(suppl, args, ini_list, smi_list)
def smi_filter(suppl):
un = rdMolStandardize.Uncharger()
smi_list = []
# SMILES are generated
for mol in suppl:
# the mol is neutralized
mol = un.uncharge(mol)
smi_list.append(Chem.MolToSmiles(mol))
return smi_list
def ini_filter(suppl):
un = rdMolStandardize.Uncharger()
ini_list = []
# InCHIs are generated
for mol in suppl:
# the mol is neutralized
mol = un.uncharge(mol)
ini_list.append(Chem.inchi.MolToInchi(mol))
return ini_list
def processing(suppl, args, exclude_ini_list, exclude_smi_list):
dup = 0
skipped = 0
written = 0
# iterate through dataset for which molecules are filtered
un = rdMolStandardize.Uncharger()
with gzip.open(args.output, "wt+") as sdf_zip:
with Chem.SDWriter(sdf_zip) as writer:
for idx, mol in enumerate(tqdm.tqdm(suppl)):
if mol:
# uncharge
mol_uncharged = un.uncharge(mol)
smiles = Chem.MolToSmiles(mol_uncharged)
try:
inchi = Chem.inchi.MolToInchi(mol_uncharged)
except Chem.rdchem.KekulizeException:
print(smiles)
# test if either an inchi or a smiles are in the exclude lists
if inchi in exclude_ini_list or smiles in exclude_smi_list:
dup += 1
else:
# if not write mol to filtered data set
written += 1
writer.write(mol)
else:
skipped += 1
print(f"{dup} duplicate molecules found and discarted")
print(f"{skipped} molecules skipped")
print(f"{written} molecules")
if __name__ == "__main__":
main()
|
import torch
from torch.nn import functional as F
from openpose.model.matcher import Matcher
from openpose.model.balanced_positive_negative_sampler import (
BalancedPositiveNegativeSampler,
)
from openpose.structures.boxlist_ops import boxlist_iou
from openpose.model.utils import cat
from openpose.layers import smooth_l1_loss
from openpose.structures.boxlist_ops import cat_boxlist
from openpose.structures.body_uv import body_uv_to_heat_map
def project_densepose_to_heatmap(body_uvs,body_uvs_boxes, proposals, discretization_size):
proposals = proposals.convert("xyxy")
#def body_uv_to_heat_map(roi_gts,body_uvs, rois, heatmap_size):
return body_uv_to_heat_map(
body_uvs_boxes,body_uvs, proposals.bbox, discretization_size
)
class UVRCNNLossComputation(object):
def __init__(self, proposal_matcher, fg_bg_sampler, discretization_size):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
discretization_size (int)
"""
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.discretization_size = discretization_size
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Keypoint RCNN needs "labels" and "keypoints "fields for creating the targets
target = target.copy_with_fields(["labels", "body_uv"])
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
body_boxes = []
body_uvs = []
##TODO only use targets with groundtruth uv label.
for proposals_per_image, targets_per_image in zip(proposals, targets):
#filtered targets with only visible ones
#* Only match proposals to targets with uv annotations
#*================functions start===================
targets_uv = targets_per_image.get_field('body_uv')
targets_uv_mask = targets_uv.dp_masks
valids = []
for rle in targets_uv_mask:
if any(rle):
valids.append(1)
else:
valids.append(0)
valids = torch.as_tensor(valids).to(dtype=torch.uint8)
targets_per_image = targets_per_image[valids]
#* ============functions end ========================
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# this can probably be removed, but is left here for clarity
# and completeness
# TODO check if this is the right one, as BELOW_THRESHOLD
neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[neg_inds] = 0
uv_per_image = matched_targets.get_field("body_uv")
body_boxes.append(matched_targets.bbox)
body_uvs.append(uv_per_image)
labels.append(labels_per_image)
return labels, body_uvs, body_boxes
def subsample(self, proposals, targets):
"""
This method performs the positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
labels, body_uvs, body_boxes = self.prepare_targets(proposals, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
proposals = list(proposals)
# add corresponding label and regression_targets information to the bounding boxes
for labels_per_image, uv_per_image, box_per_image, proposals_per_image in zip(
labels, body_uvs,body_boxes,proposals
):
proposals_per_image.add_field("labels", labels_per_image)
proposals_per_image.add_field("body_uv", uv_per_image)
proposals_per_image.add_field("body_uv_box",box_per_image)
# distributed sampled proposals, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds)
):
img_sampled_inds = torch.nonzero(pos_inds_img).squeeze(1)
proposals_per_image = proposals[img_idx][img_sampled_inds]
proposals[img_idx] = proposals_per_image
self._proposals = proposals
return proposals
def __call__(self, proposals, heatmaps_total):
sample_grids = []
uv_weights = []
I_weights = []
gt_Us = []
gt_Vs = []
gt_Is = []
mask_dests = []
mask_weights = []
for proposals_per_image in proposals:
uv = proposals_per_image.get_field("body_uv")
box = proposals_per_image.get_field("body_uv_box")
sample_grid,I_weight,uv_weight,gt_U,gt_V,gt_I,mask_dest,mask_weight = project_densepose_to_heatmap(
uv,box, proposals_per_image, self.discretization_size
)
sample_grids.append(sample_grid)
I_weights.append(I_weight)
uv_weights.append(uv_weight)
gt_Us.append(gt_U)
gt_Vs.append(gt_V)
gt_Is.append(gt_I)
mask_dests.append(mask_dest)
mask_weights.append(mask_weight)
#valid.append(valid_per_image.view(-1))
sample_corr = cat(sample_grids,dim=0)
if(sample_corr.numel()==0):
return heatmaps_total[0].sum()*0,heatmaps_total[1].sum()*0,\
heatmaps_total[2].sum()*0,heatmaps_total[3].sum()*0
sample_weight_uv = cat(uv_weights,dim=0)
sample_weight_I = cat(I_weights,dim=0)
u_target = cat(gt_Us,dim=0)
v_target = cat(gt_Vs,dim=0)
I_target = cat(gt_Is,dim=0).to(dtype=torch.int64)
u_target = u_target[:,None,:,:].repeat(1,25,1,1)
v_target = v_target[:,None,:,:].repeat(1,25,1,1)
mask_target = cat(mask_dests,dim=0).to(dtype=torch.int64).cuda()
#scaled by mask_target_w.sum()./mask_target_w.numel()
#Updated: 04/16/2019. No longer useful as we already discard those targets
#loss_scale = mask_target_w.numel()/(mask_target_w.sum()+1e-6)
mask_heat = heatmaps_total[0]
I_heat = heatmaps_total[1]
U_heat = heatmaps_total[2]
V_heat = heatmaps_total[3]
I_detect = F.grid_sample(I_heat,sample_corr)
u_detect = F.grid_sample(U_heat,sample_corr)
v_detect = F.grid_sample(V_heat,sample_corr)
#old loss
#loss_seg = F.cross_entropy(mask_heat,mask_target,reduction='none')*mask_target_w
#loss_seg = loss_seg.sum()/loss_seg.numel()
#new loss
loss_seg = 2.0*F.cross_entropy(mask_heat,mask_target)
# print(sample_weight_I.sum())
# print(sample_weight_I.numel())
#notice that here the loss I,u,v is not scaled properly.
sample_scale = sample_weight_I.numel()/sample_weight_I.sum()
loss_I = F.cross_entropy(I_detect,I_target,reduction='none')*sample_weight_I
loss_I = 0.3*sample_scale*loss_I.sum()/loss_I.numel()
loss_U = 0.1*sample_scale*smooth_l1_loss(u_detect*sample_weight_uv,u_target*sample_weight_uv)
loss_V = 0.1*sample_scale*smooth_l1_loss(v_detect*sample_weight_uv,v_target*sample_weight_uv)
return loss_seg,loss_I,loss_U,loss_V
def make_roi_uv_loss_evaluator(cfg):
# matcher = Matcher(
# cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
# cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
# allow_low_quality_matches=False,
# )
matcher = Matcher(
0.7,
0.7,
allow_low_quality_matches=False,
)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
)
resolution = cfg.MODEL.ROI_UV_HEAD.RESOLUTION
loss_evaluator = UVRCNNLossComputation(matcher, fg_bg_sampler, resolution)
return loss_evaluator
|
import argparse
import os
from pathlib import Path
from random import shuffle
def parse_args():
parser = argparse.ArgumentParser(
description=
'Export Image Annotations Pairs as TXT'
)
parser.add_argument(
'--ds_path',
dest='dataset_path',
help='Base Dataset Path',
type=str,
required=True)
parser.add_argument(
'--split',
dest='split',
help='Desired Split [Train, Validate, Test] e.g. default 0.7, 0.15, 0.15',
nargs=3,
type=float,
required=True,
default=[0.7, 0.15, 0.15]
)
parser.add_argument(
'--image_dir_name',
dest='image_dir_name',
help='Name of Image (Input) Folder.',
type=str,
required=False,
default="images"
)
parser.add_argument(
'--label_dir_name',
dest='label_dir_name',
help='Name of Image (Input) Folder.',
type=str,
required=False,
default="annotations"
)
parser.add_argument(
'--sep',
dest='sep',
help='Separator',
type=str,
required=False,
default=" "
)
return parser.parse_args()
def list_image_annotations_pairs(ds_path, image_dir_name, label_dir_name):
image_file_names = os.listdir(Path(ds_path, image_dir_name))
label_file_names = os.listdir(Path(ds_path, label_dir_name))
assert len(image_file_names) == len(label_file_names), "Len(Images) != Len(Labels)"
def same_file_name(img_lbl, IGNORE_FILE_FORMAT=True):
img, lbl = img_lbl
if IGNORE_FILE_FORMAT:
return img.split(".")[0] == lbl.split(".")[0]
return img == lbl
image_labels = list(zip(image_file_names, label_file_names))
assert all(map(same_file_name, image_labels)), "Annotations != Imgs"
def relative_paths(img_lbl):
img, lbl = img_lbl
return f"{image_dir_name}/{img}", f"{label_dir_name}/{lbl}"
image_labels = map(relative_paths, image_labels)
return list(image_labels)
def split_pairs(pairs, splits, shuffle_pairs=True):
assert sum(splits.values()) == 1.0
if shuffle_pairs:
shuffle(pairs)
train_samples = int(splits["train"] * len(pairs))
validate_samples = int(splits["val"] * len(pairs))
test_samples = int(splits["test"] * len(pairs))
train_samples += (len(pairs) - train_samples - validate_samples - test_samples)
ds = {
"train": pairs[:train_samples],
"val": pairs[train_samples:-validate_samples],
"test": pairs[-validate_samples:]
}
assert (len(ds["train"]) + len(ds["val"]) + len(ds["test"])) == len(pairs)
return ds
def save_pairings_to_txt(_args):
split = {
"train": _args.split[0],
"val": _args.split[1],
"test": _args.split[2]
}
img_annotation_pairs = list_image_annotations_pairs(_args.dataset_path, _args.image_dir_name, _args.label_dir_name)
img_annotation_pairs = list(map(lambda x: _args.sep.join(x) + "\n", img_annotation_pairs))
splitted_data = split_pairs(img_annotation_pairs, split)
for split, pairs in splitted_data.items():
with open(Path(_args.dataset_path, split + ".txt"), 'w+') as f:
f.writelines(pairs)
with open(Path(_args.dataset_path, split + ".txt"), 'r') as f:
assert (len(list(f.readlines()))) == len(pairs)
if __name__ == "__main__":
args = parse_args()
save_pairings_to_txt(args)
|
from collections import deque
def simplemovingaverage(period):
assert period == int(period) and period > 0, "Period must be an integer >0"
summ = n = 0.0
values = deque([0.0] * period) # old value queue
def sma(x):
nonlocal summ, n
values.append(x)
summ += x - values.popleft()
n = min(n+1, period)
return summ / n
return sma
|
# Copyright (c) 2013 Stian Lode
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import random
from triton.vector2d import Vector2d
from triton.sphere import Sphere
from triton.spring_damper_link import SpringDamperLink
from common.systems import *
from common.components import *
def main():
regs = Registry()
for i in range(15):
sphere = Sphere(
mass = 1.0,
radius = 4.0,
pos = Vector2d(random.random()*100.0, random.random()*100.0),
vel = Vector2d(20.0, 0.0),
damping = 0.0,
elasticity = 0.97
)
regs.add_entity(
RigidBody(sphere),
Drawable(),
Movable())
for e1, [r1] in regs.get_components(RigidBody):
for e2, [r2] in regs.get_components(RigidBody):
if e1 == e2:
continue
regs.add_entity(
Link(SpringDamperLink(r1.sphere, r2.sphere, damping=0.1, spring=1.0, length=100.0)),
Drawable())
regs.add_system(InputSystem())
regs.add_system(GravitationalSystem())
regs.add_system(ScreenBounceSystem())
regs.add_system(SimulationSystem())
regs.add_system(RenderSystem())
regs.add_system(GameLoopSystem())
while True:
regs.process()
if __name__ == '__main__':
main()
|
###################################################################################
# Copyright (c) 2021 Rhombus Systems #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
###################################################################################
from typing import List, Dict
import RhombusAPI as rapi
from rhombus_types.human_event import HumanEvent
from rhombus_types.vector import Vec2
from rhombus_types.camera import Camera
def get_human_events(api_client: rapi.ApiClient, camera: Camera, start_time: int, duration: int) -> Dict[int, List[HumanEvent]]:
"""Get human events from a camera
:param api_client: The API Client for sending requests to Rhombus
:param camera: The camera to get the human events for
:param start_time: The start time in seconds to start getting human events
:param duration: The duration in seconds of time since the start time to look for events
:return: Returns a map of object ID to HumanEvent array
"""
# Create the api
api = rapi.CameraWebserviceApi(api_client=api_client)
# Create a map of ID to bounding box to hold the result
ids: Dict[int, List[rapi.FootageBoundingBoxType]] = dict()
# Send the request to Rhombus to get the bounding boxes
get_footage_bounding_box_request = rapi.CameraGetFootageBoundingBoxesWSRequest(camera_uuid=camera.uuid, start_time=start_time, duration=duration)
res = api.get_footage_bounding_boxes(body=get_footage_bounding_box_request)
# Filter the resulting bounding boxes so that we only get human events
raw_events: List[rapi.FootageBoundingBoxType] = filter(lambda event: event.a == rapi.ActivityEnum.MOTION_HUMAN, res.footage_bounding_boxes)
# Loop through all of the raw events
for event in raw_events:
# If for whatever reason the timestamp is before our start time, then don't include it.
# This really shouldn't be necessary, but it seems sometimes the API gets confused and sends back some bounding boxes before the start time.
# Either that or I'm doing something wrong, probably the latter tbh
if event.ts < start_time * 1000:
continue
if event.object_id not in ids:
ids[event.object_id] = [event]
else:
ids[event.object_id].append(event)
events: Dict[int, List[HumanEvent]] = dict()
for object_id in ids:
boxes = ids[object_id]
for box in boxes:
if box.r - box.l < 0.02:
continue
if box.b - box.t < 0.02:
continue
dimensions = Vec2((box.r - box.l) / 10000, (box.b - box.t) / 10000)
position = Vec2((box.r + box.l) / 2 / 10000, (box.b + box.t) / 2 / 10000)
event = HumanEvent(id=box.object_id, position=position, dimensions=dimensions, timestamp=box.ts, camera=camera)
if box.object_id not in events:
events[box.object_id] = [event]
else:
events[box.object_id].append(event)
for boxes in events.values():
boxes.sort(key=lambda human_event: human_event.timestamp)
return events
|
import unittest
import mock
from django.conf import settings
from django.contrib import messages
from django.utils import formats
from django.utils import timezone
from django_cradmin import cradmin_testhelpers
from model_mommy import mommy
from devilry.devilry_group import devilry_group_mommy_factories as group_mommy
from devilry.devilry_group import models
from devilry.devilry_group import models as group_models
class TestFeedbackFeedHeaderMixin(cradmin_testhelpers.TestCaseMixin):
"""
Tests the header of the feedbackfeed and elements that should be rendered inside it.
"""
def test_get_header(self):
# tests that that header exists in header
group = mommy.make('core.AssignmentGroup')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-header'))
def test_get_header_assignment_name(self):
# tests that the name of the assignment exists in header
group = mommy.make('core.AssignmentGroup', parentnode__long_name='Test Assignment')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group)
assignment_name = mockresponse.selector.one('.devilry-group-feedbackfeed-header-assignment').alltext_normalized
self.assertEqual(assignment_name, 'Test Assignment')
def test_get_header_subject_name(self):
# tests that the name of the subject exists in header
group = mommy.make('core.AssignmentGroup', parentnode__parentnode__parentnode__long_name='some_subject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group)
subject_name = mockresponse.selector.one('.devilry-group-feedbackfeed-header-subject').alltext_normalized
self.assertEqual(subject_name, group.assignment.period.subject.long_name)
def test_get_header_period_name(self):
# tests that period name exists in header
group = mommy.make('core.AssignmentGroup', parentnode__parentnode__long_name='some_period')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group)
period_name = mockresponse.selector.one('.devilry-group-feedbackfeed-header-period').text_normalized
self.assertEqual(period_name, group.assignment.period.long_name)
def _get_mock_cradmin_instance():
"""
If the subclass that implements a mixin view is :class:`devilry.devilry_group.views.AdminFeedbackFeedView`
we need a admin devilry role for the cradmininstance, so we just mock a returnvalue for the
:func:`~devilry.devilry_group.cradmin_instances.AdminCrInstance.get_devilry_role_for_requestuser`.
Returns:
Mocked cradmininstance.
"""
mockrequest = mock.MagicMock()
mockrequest.cradmin_instance.get_devilryrole_for_requestuser.return_value = 'subjectadmin'
return mockrequest.cradmin_instance
class TestFeedbackFeedGroupCommentMixin(cradmin_testhelpers.TestCaseMixin):
"""
Tests the rendering of GroupComment in a feedbackfeed.
"""
def test_get_feedbackfeed_candidate_user_deleted(self):
testassignment = mommy.make('core.Assignment')
testgroup = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testfeedbackset = group_mommy.feedbackset_first_attempt_unpublished(group=testgroup)
mommy.make('devilry_group.GroupComment',
user_role='student',
user=None,
feedback_set=testfeedbackset)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testgroup)
self.assertEqual('USER DELETED',
mockresponse.selector.one('.devilry-group-comment-user-deleted').alltext_normalized)
def test_get_feedbackfeed_examiner_user_deleted(self):
testassignment = mommy.make('core.Assignment')
testgroup = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testfeedbackset = group_mommy.feedbackset_first_attempt_unpublished(group=testgroup)
mommy.make('devilry_group.GroupComment',
user_role='examiner',
user=None,
feedback_set=testfeedbackset)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testgroup)
self.assertEqual('USER DELETED',
mockresponse.selector.one('.devilry-group-comment-user-deleted').alltext_normalized)
def test_get_feedbackfeed_admin_user_deleted(self):
testassignment = mommy.make('core.Assignment')
testgroup = mommy.make('core.AssignmentGroup', parentnode=testassignment)
testfeedbackset = group_mommy.feedbackset_first_attempt_unpublished(group=testgroup)
mommy.make('devilry_group.GroupComment',
user_role='admin',
user=None,
feedback_set=testfeedbackset)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testgroup)
self.assertEqual('USER DELETED',
mockresponse.selector.one('.devilry-group-comment-user-deleted').alltext_normalized)
def test_get_comment_student(self):
# test that student comment-style is rendered.
group = mommy.make('core.AssignmentGroup')
candidate = mommy.make('core.Candidate',
relatedstudent=mommy.make('core.RelatedStudent'),
assignment_group=group)
mommy.make('devilry_group.GroupComment',
user_role='student',
user=candidate.relatedstudent.user,
feedback_set=group.feedbackset_set.first())
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group,
cradmin_instance=_get_mock_cradmin_instance())
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-comment-student'))
def test_get_comment_examiner(self):
# test that examiner comment-style is rendered.
group = mommy.make('core.AssignmentGroup')
examiner = mommy.make('core.Examiner',
assignmentgroup=group)
mommy.make('devilry_group.GroupComment',
user_role='examiner',
user=examiner.relatedexaminer.user,
feedback_set=group.feedbackset_set.first(),
visibility=models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group,
cradmin_instance=_get_mock_cradmin_instance())
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-comment-examiner'))
def test_get_comment_admin(self):
# test that student comment-style is rendered.
group = mommy.make('core.AssignmentGroup')
mommy.make('devilry_group.GroupComment',
feedback_set=group.feedbackset_set.first(),
user_role='admin',
visibility=models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-comment-admin'))
def test_get_comment_poster_fullname(self):
# tests that the comment-posters fullname is rendered
group = mommy.make('core.AssignmentGroup')
candidate = mommy.make('core.Candidate',
relatedstudent=mommy.make('core.RelatedStudent', user__fullname='Jane Doe'),
assignment_group=group)
comment = mommy.make('devilry_group.GroupComment',
user=candidate.relatedstudent.user,
user_role='student',
feedback_set=group.feedbackset_set.first(),
visibility=models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=comment.feedback_set.group,
cradmin_instance=_get_mock_cradmin_instance())
self.assertTrue(comment.user.fullname, mockresponse.selector.one('.devilry-user-verbose-inline-fullname'))
def test_get_comment_poster_shortname(self):
# tests that the comment-posters shortname is rendered
group = mommy.make('core.AssignmentGroup')
candidate = mommy.make('core.Candidate',
relatedstudent=mommy.make('core.RelatedStudent', user__fullname='Jane Doe'),
assignment_group=group)
comment = mommy.make('devilry_group.GroupComment',
user=candidate.relatedstudent.user,
user_role='student',
feedback_set=group.feedbackset_set.first(),
visibility=models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=comment.feedback_set.group,
cradmin_instance=_get_mock_cradmin_instance())
self.assertTrue(comment.user.shortname, mockresponse.selector.one('.devilry-user-verbose-inline-shortname'))
def test_get_comment_student_user_role(self):
# tests that the role of a student comment is 'student'
group = mommy.make('core.AssignmentGroup')
candidate = mommy.make('core.Candidate',
relatedstudent=mommy.make('core.RelatedStudent'),
assignment_group=group)
comment = mommy.make('devilry_group.GroupComment',
user_role='student',
user=candidate.relatedstudent.user,
feedback_set=group.feedbackset_set.first(),
visibility=models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=comment.feedback_set.group,
cradmin_instance=_get_mock_cradmin_instance())
self.assertEqual('(student)', mockresponse.selector.one('.comment-created-by-role-text').alltext_normalized)
def test_get_comment_examiner_user_role(self):
# tests that the role of an examiner comment is 'examiner'
group = mommy.make('core.AssignmentGroup')
comment = mommy.make('devilry_group.GroupComment',
feedback_set=group.feedbackset_set.first(),
user_role='examiner',
visibility=models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group,
cradmin_instance=_get_mock_cradmin_instance())
self.assertEqual('(examiner)', mockresponse.selector.one('.comment-created-by-role-text').alltext_normalized)
def test_get_comment_admin_user_role(self):
# tests that the role of an admin comment is 'admin'
group = mommy.make('core.AssignmentGroup')
comment = mommy.make('devilry_group.GroupComment',
feedback_set=group.feedbackset_set.first(),
user_role='admin',
visibility=models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group)
self.assertEqual('(admin)', mockresponse.selector.one('.comment-created-by-role-text').alltext_normalized)
class TestFeedbackFeedMixin(TestFeedbackFeedHeaderMixin, TestFeedbackFeedGroupCommentMixin):
"""
Mixin testclass for all feedbackfeed tests.
Add tests for functionality and ui that all feedbackfeed views share.
"""
viewclass = None # must be implemented in subclass
def test_get(self):
group = mommy.make('core.AssignmentGroup')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group)
self.assertEqual(mockresponse.selector.one('title').alltext_normalized,
group.assignment.get_path())
def test_semester_expired_comment_form_not_rendered(self):
# Test comment/upload form is not rendered if the semester has expired.
testuser = mommy.make(settings.AUTH_USER_MODEL)
test_feedbackset = group_mommy.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=mommy.make_recipe('devilry.apps.core.period_old'))
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=test_feedbackset.group,
requestuser=testuser
)
self.assertFalse(mockresponse.selector.exists('.django-cradmin-form-wrapper'))
def test_semester_expired_comment_form_not_rendered_message_box(self):
# Test comment/upload form is not rendered if the semester has expired.
testuser = mommy.make(settings.AUTH_USER_MODEL)
test_feedbackset = group_mommy.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=mommy.make_recipe('devilry.apps.core.period_old'))
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=test_feedbackset.group,
requestuser=testuser
)
self.assertFalse(mockresponse.selector.exists('.django-cradmin-form-wrapper'))
self.assertTrue(mockresponse.selector.exists('.devilry-feedbackfeed-form-disabled'))
self.assertEqual(mockresponse.selector.one('.devilry-feedbackfeed-form-disabled').alltext_normalized,
'File uploads and comments disabled This assignment is on an inactive semester. '
'File upload and commenting is disabled.')
def test_semester_expired_post_django_message(self):
# Test comment/upload form post django message if the semester has expired.
testuser = mommy.make(settings.AUTH_USER_MODEL)
test_feedbackset = group_mommy.feedbackset_first_attempt_unpublished(
group__parentnode__parentnode=mommy.make_recipe('devilry.apps.core.period_old'))
messagesmock = mock.MagicMock()
self.mock_http302_postrequest(
cradmin_role=test_feedbackset.group,
requestuser=testuser,
viewkwargs={'pk': test_feedbackset.group.id},
messagesmock=messagesmock,
requestkwargs={
'data': {
'text': 'test',
'student_add_comment': 'unused value',
}
})
messagesmock.add.assert_called_once_with(
messages.WARNING, 'This assignment is on an inactive semester. File upload and commenting is disabled.', '')
self.assertEqual(group_models.GroupComment.objects.count(), 0)
def test_get_event_without_any_deadlines_expired(self):
# tests that when a feedbackset has been created and no first deadlines given, either on Assignment
# or FeedbackSet, no 'expired event' is rendered
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_end')
group = mommy.make('core.AssignmentGroup', parentnode=testassignment)
group_mommy.feedbackset_first_attempt_unpublished(group=group)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group)
self.assertFalse(mockresponse.selector.exists('.devilry-group-feedbackfeed-event-message-deadline-expired'))
def test_get_event_with_assignment_first_deadline_expired(self):
# tests that an 'deadline expired'-event occurs when Assignment.first_deadline expires.
# NOTE: FeedbackSet.deadline_datetime is not set.
assignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start')
group = mommy.make('core.AssignmentGroup', parentnode=assignment)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-event-message__deadline-expired'))
def test_get_event_with_feedbackset_deadline_datetime_expired(self):
# tests that an 'deadline expired'-event occurs when FeedbackSet.deadline_datetime expires.
# NOTE: Assignment.first_deadline is not set.
feedbackset = mommy.make('devilry_group.FeedbackSet',
deadline_datetime=timezone.now()-timezone.timedelta(days=1))
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=feedbackset.group)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-event-message__deadline-expired'))
def test_get_event_without_feedbackset_deadline_datetime_expired(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_end')
group = mommy.make('core.AssignmentGroup', parentnode=testassignment)
group_mommy.feedbackset_first_attempt_unpublished(group=group)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=group)
self.assertFalse(mockresponse.selector.exists('.devilry-group-feedbackfeed-event-message__deadline-expired'))
def test_get_event_two_feedbacksets_deadlines_expired_assignment_firstdeadline(self):
assignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start',
first_deadline=timezone.now() - timezone.timedelta(days=4))
testgroup = mommy.make('core.AssignmentGroup', parentnode=assignment)
group_mommy.feedbackset_first_attempt_published(group=testgroup)
group_mommy.feedbackset_new_attempt_unpublished(
group=testgroup,
deadline_datetime=timezone.now() - timezone.timedelta(days=2))
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testgroup)
expired = mockresponse.selector.list('.devilry-group-feedbackfeed-event-message__deadline-expired')
self.assertEqual(2, len(expired))
self.assertEqual(2, group_models.FeedbackSet.objects.count())
def test_get_feedbackset_header(self):
testgroup = mommy.make('core.AssignmentGroup')
group_mommy.feedbackset_first_attempt_unpublished(group=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
)
self.assertTrue(
mockresponse.selector.one('.devilry-group-feedbackfeed-feed__feedbackset-wrapper--header-first-attempt'))
def test_get_feedbackset_header_title(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = mommy.make('core.AssignmentGroup', parentnode=testassignment)
group_mommy.feedbackset_first_attempt_unpublished(group=testgroup)
with self.settings(DATETIME_FORMAT='l j F, Y, H:i', USE_L10N=False):
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
)
self.assertEqual(mockresponse.selector.one('.header-title').alltext_normalized,
'Deadline: Saturday 15 January, 2000, 23:59')
def test_get_feedbackset_header_attempt(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = mommy.make('core.AssignmentGroup', parentnode=testassignment)
group_mommy.feedbackset_first_attempt_unpublished(group=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
)
self.assertEqual(mockresponse.selector.one('.header-attempt-number').alltext_normalized,
'Attempt 1')
def test_get_feedbackset_header_grading_info_waiting_for_feedback(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = mommy.make('core.AssignmentGroup', parentnode=testassignment)
group_mommy.feedbackset_first_attempt_unpublished(group=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
)
self.assertEqual(mockresponse.selector.one('.header-grading-info').alltext_normalized, 'waiting for feedback')
def test_get_feedbackset_header_grading_info_waiting_for_deliveries_for_feedback(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_middle')
testgroup = mommy.make('core.AssignmentGroup', parentnode=testassignment)
group_mommy.feedbackset_first_attempt_unpublished(group=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
)
self.assertEqual(mockresponse.selector.one('.header-grading-info').alltext_normalized,
'waiting for deliveries')
def test_get_feedbackset_header_two_attempts(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = mommy.make('core.AssignmentGroup', parentnode=testassignment)
group_mommy.feedbackset_first_attempt_published(group=testgroup)
group_mommy.feedbackset_new_attempt_unpublished(group=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
)
self.assertEqual(mockresponse.selector.list('.header-attempt-number')[0].alltext_normalized,
'Attempt 1')
self.assertEqual(mockresponse.selector.list('.header-attempt-number')[1].alltext_normalized,
'Attempt 2')
def test_get_feedbackset_deadline_history_username_rendered(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = mommy.make('core.AssignmentGroup', parentnode=testassignment)
group_mommy.feedbackset_first_attempt_unpublished(group=testgroup)
testuser = mommy.make(settings.AUTH_USER_MODEL, shortname='test@example.com', fullname='Test User')
mommy.make('devilry_group.FeedbackSetDeadlineHistory', feedback_set=testgroup.cached_data.first_feedbackset,
changed_by=testuser)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup
)
self.assertEqual(
mockresponse.selector.one('.devilry-group-feedbackfeed-event-message__user_display_name')
.alltext_normalized,
'Test User(test@example.com)'
)
def test_get_feedbackset_grading_updated_one_event_rendered(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = mommy.make('core.AssignmentGroup', parentnode=testassignment)
test_feedbackset = group_mommy.feedbackset_first_attempt_published(group=testgroup, grading_points=1)
testuser = mommy.make(settings.AUTH_USER_MODEL, shortname='test@example.com', fullname='Test User')
mommy.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=1,
updated_by=testuser)
# We add an unpublished new attempt, because the feedback view for examiners requires that the last feedbackset
# is not published.
group_mommy.feedbackset_new_attempt_unpublished(group=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup
)
self.assertEqual(mockresponse.selector.count('.devilry-group-event__grading_updated'), 1)
def test_get_feedbackset_grading_updated_multiple_events_rendered(self):
testassignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start')
testgroup = mommy.make('core.AssignmentGroup', parentnode=testassignment)
test_feedbackset = group_mommy.feedbackset_first_attempt_published(group=testgroup, grading_points=1)
testuser = mommy.make(settings.AUTH_USER_MODEL, shortname='test@example.com', fullname='Test User')
mommy.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=1,
updated_by=testuser)
mommy.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=0,
updated_by=testuser)
mommy.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=1,
updated_by=testuser)
mommy.make('devilry_group.FeedbackSetGradingUpdateHistory', feedback_set=test_feedbackset, old_grading_points=0,
updated_by=testuser)
# We add an unpublished new attempt, because the feedback view for examiners requires that the last feedbackset
# is not published.
group_mommy.feedbackset_new_attempt_unpublished(group=testgroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup
)
self.assertEqual(mockresponse.selector.count('.devilry-group-event__grading_updated'), 4)
|
from .sub_models.models_accounts import *
from .sub_models.models_verticals import *
from .sub_models.models_spiders import *
from .sub_models.models_contacts import *
|
import pvtoolslib
# Build the s3 filename database and save to file.
pvtoolslib.build_s3_filename_list()
filedata = pvtoolslib.get_s3_filename_df()
print(filedata) |
import tkinter as tk
from random import randint
import time
import math
from project.MouseController import MouseController
class WheelSpinner(tk.Frame):
def __init__(self, master, wheel_options, radius, *args, **kwargs):
super().__init__(master, *args, **kwargs)
self.master = master
self.radius = radius
self.display_label = tk.Label(self, height=2)
self.wheel_options = wheel_options
self.size = radius * 2.1
self.canvas = tk.Canvas(self, width=self.size, height=self.size)
self.drawn_arc = []
self.count = None
self.angle_increment = None
self.winner = None
self.is_rotating = False
self.frame = 0
self.speed = 400
self.display_label.grid(row=0, column=0)
self.canvas.grid(row=1, column=0)
self.canvas.bind("<Button-1>", lambda event: self.verify_click_position(event))
self.canvas.bind("<ButtonRelease-1>", lambda event: self.on_mouse_release(event))
self.__drawn = False
self.__rotation_speed_list = []
self.__is_dragging = False
self.__init_drag_pos = None
self.__current_time = None
self.__delta_time = None
self.__mouse_controller = MouseController(self.canvas)
self.update()
def draw(self):
self.display_label['text'] = "Spin the wheel to find out \nwhat information you'll see!"
self.count = len(self.wheel_options)
angle = 0
self.angle_increment = 360 / self.count
for option in self.wheel_options:
if self.wheel_options[self.count - 1] == option:
self.drawn_arc.append(RotatingArc(self, self.size / 2, self.size / 2, self.radius,
angle, 360, option,
fill=self.generate_random_color(), width=3))
else:
self.drawn_arc.append(RotatingArc(self, self.size / 2, self.size / 2, self.radius,
angle, angle + self.angle_increment, option,
fill=self.generate_random_color(), width=3))
angle = angle + self.angle_increment
self.__drawn = True
def erase(self):
self.canvas.delete('all')
def display_current_winner(self):
winner = None
for arc in self.drawn_arc:
if 90 >= arc.start_angle >= 90 - self.angle_increment:
winner = arc.text
if winner is not None:
self.display_label['text'] = winner
def update(self):
if not self.__drawn:
self.after(33, self.update)
return
if self.__current_time is None:
self.__delta_time = 1 / 30
else:
self.__delta_time = time.time() - self.__current_time
if self.is_rotating:
self.rotate_all_with_speed()
self.calculate_new_speed()
self.display_current_winner()
if self.__is_dragging:
self.drag()
self.after(33, self.update)
def verify_click_position(self, event):
if self.__is_dragging or self.is_rotating or not self.__drawn:
return
x, y = event.x, event.y
# self.is_rotating = True
if math.sqrt(
math.pow(self.size / 2 - x, 2) + math.pow(self.size / 2 - y, 2)) <= self.radius:
self.__is_dragging = True
self.__rotation_speed_list = []
self.__init_drag_pos = x, y
def drag(self):
x0, y0 = self.__init_drag_pos
x, y = self.__mouse_controller.get_absolute_position()
angle_to_rotate = math.atan2(y - self.size / 2, x - self.size / 2) - \
math.atan2(y0 - self.size / 2, x0 - self.size / 2)
if abs(angle_to_rotate) > math.pi:
angle_to_rotate = -math.copysign(1, angle_to_rotate) * 2 * math.pi + angle_to_rotate
print(angle_to_rotate)
self.rotate_all(-angle_to_rotate / math.pi * 180)
self.__rotation_speed_list.append((angle_to_rotate / math.pi * 180 / self.__delta_time))
self.__init_drag_pos = x, y
def on_mouse_release(self, event):
if self.__is_dragging:
self.__is_dragging = False
self.__calculate_initial_speed()
def __calculate_initial_speed(self):
if len(self.__rotation_speed_list) <= 1:
self.display_label['text'] = "SPIN HARDER!"
return
self.speed = -self.__rotation_speed_list[-1]
if abs(self.speed) < 300:
self.display_label['text'] = "SPIN HARDER!"
else:
self.is_rotating = True
def rotate_all(self, degree):
for arc in self.drawn_arc:
arc.rotate(degree)
def rotate_all_with_speed(self):
for arc in self.drawn_arc:
arc.rotate(self.speed * self.__delta_time)
def calculate_new_speed(self):
print(self.speed)
speed_pos = abs(self.speed)
if speed_pos >= 2000:
acceleration = 1200 * -math.copysign(1, self.speed)
elif speed_pos >= 1000:
acceleration = 500 * -math.copysign(1, self.speed)
elif speed_pos >= 600:
acceleration = 250 * -math.copysign(1, self.speed)
elif speed_pos >= 350:
acceleration = 120 * -math.copysign(1, self.speed)
elif speed_pos >= 200:
acceleration = 50 * -math.copysign(1, self.speed)
elif speed_pos >= 100:
acceleration = 20 * -math.copysign(1, self.speed)
else:
acceleration = 10 * -math.copysign(1, self.speed)
if math.copysign(1, self.speed) != math.copysign(1,
self.speed + acceleration *
self.__delta_time):
self.speed = 0
self.finish_rotation()
else:
self.speed = self.speed + acceleration * self.__delta_time
print(self.speed)
def finish_rotation(self):
self.winner = self.display_label['text']
self.is_rotating = False
self.erase()
self.__drawn = False
self.master.event_generate("<<Finish Spinning Wheel>>", when="tail")
self.master.show_winning_info()
def __get_elapsed_time(self):
"""
This function returns the elapsed time since the last time we updated the rotating animation
If it's the first frame of the animation, we assume the elapsed_time is 0.033s which
correspond to 30 fps.
:return:
"""
if self.is_rotating is None:
elapsed_time = 0.0333
else:
elapsed_time = time.time() - self.is_rotating
return elapsed_time
def create_circle_arc(self, x, y, r, **kwargs) -> classmethod:
"""
This method draws a circle arc on the canvas
:param x: X position of the center of the arc
:param y: Y position of the center of the arc
:param r: Radius of the arc
:param kwargs: important parameter are 'start' and 'end' to setup the angle where the arc is
positioned.
:return: Returns the modified create_arc methods that will actually draw a circle_arc with
the given parameters.
"""
if "start" in kwargs and "end" in kwargs:
kwargs["extent"] = kwargs["end"] - kwargs["start"]
del kwargs["end"]
return self.canvas.create_arc(x - r, y - r, x + r, y + r, **kwargs)
@staticmethod
def generate_random_color():
r = randint(0, 255)
g = randint(0, 255)
b = randint(0, 255)
return "#%02x%02x%02x" % (r, g, b)
class RotatingArc:
def __init__(self, frame, position_x, position_y, radius, start_angle, end_angle, text, *args,
**kwargs):
self.frame_parent = frame
self.canvas_parent = frame.canvas
self.radius = radius
self.position_x = position_x
self.position_y = position_y
self.start_angle = start_angle
self.end_angle = end_angle
self.item = None
self.text = text
self.draw(*args, **kwargs)
def draw(self, *args, **kwargs):
self.item = self.frame_parent.create_circle_arc(self.position_x, self.position_y,
self.radius,
start=self.start_angle, end=self.end_angle,
*args, **kwargs)
def rotate(self, angle, *args):
self.canvas_parent.itemconfigure(self.item, start=self.start_angle + angle)
self.start_angle += angle
if self.start_angle >= 360:
self.start_angle -= 360
if self.start_angle < 0:
self.start_angle += 360
if __name__ == '__main__':
root = tk.Tk()
options = ['Name', 'Home Phone Numbers', 'Work Phone Numbers', 'Personal Phone Numbers',
'Emails', 'Home Addresses', 'Notes']
WheelSpinner(root, options, width=300, height=500, radius=150)
root.mainloop()
|
from multipledispatch import dispatch
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .colour import PAL
a = (list, np.ndarray)
@dispatch(np.ndarray, function)
def viz(x, f):
# Now condition on the observations to make predictions.
plt.plot(x, f(x), c=PAL[0])
plt.show()
return
|
#!/usr/bin/env python
'''
mcu: Modeling and Crystallographic Utilities
Copyright (C) 2019 Hung Q. Pham. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Email: Hung Q. Pham <pqh3.14@gmail.com>
'''
'''This module is modied from the vaspwfc.py from QijingZheng's project.
ref: https://github.com/QijingZheng/VaspBandUnfolding/blob/master/vaspwfc.py)
'''
import numpy as np
from mcu.vasp import utils, vasp_io, const
from scipy.fftpack import fftfreq, fftn, ifftn
class main:
def __init__(self, file="WAVECAR", lsorbit=False):
'''The wavecar manipulation is modied from the vaspwfc.py from QijingZheng's project
ref: https://github.com/QijingZheng/VaspBandUnfolding/blob/master/vaspwfc.py)
'''
if not utils.check_exist(file):
print('Cannot find the WAVECAR file. Check the path:', file)
self.success = False
else:
self._wavecar = open(file, 'rb')
self._lsorbit = lsorbit
self.success = True
self.read_header()
self.get_band()
def read_header(self):
'''Reading haeder + calc info'''
self._wavecar.seek(0)
self.recl, self.nspin, self.rtag = np.array(np.fromfile(self._wavecar, dtype=np.float64, count=3),dtype=int)
if self.rtag == 45200:
self.prec = np.complex64
elif self.rtag == 45210:
self.prec = np.complex128
else:
raise ValueError("Invalid TAG values: {}".format(self.rtag))
# Get kpts, bands, encut, cell info
self._wavecar.seek(self.recl)
dump = np.fromfile(self._wavecar, dtype=np.float64, count=12)
self.nkpts = int(dump[0]) # No. of k-points
self.nbands = int(dump[1]) # No. of bands
self.encut = dump[2] # Energy cutoff
lattice = dump[3:].reshape((3,3)) # real space supercell basis
volume = np.linalg.det(lattice) # real space supercell volume, unit: A^-3
recip_lattice = 2*np.pi*np.linalg.inv(lattice).T # reciprocal space supercell volume, unit: A^-1
self.cell = (lattice, recip_lattice, None, volume)
# Estimating FFT grid size,
# old version: 2 * CUTOFF + 1, relax this condition works for the LSORBIT
norm = np.linalg.norm(lattice, axis=1)
CUTOFF = np.ceil(np.sqrt(self.encut/const.RYTOEV) / (2*np.pi/(norm / const.AUTOA)))
self.ngrid = np.array(2 * CUTOFF + 3, dtype=np.int64)
def get_band(self):
'''Extract band, occ'''
self.nplws = np.zeros(self.nkpts, dtype=np.int64)
self.kpts = np.zeros((self.nkpts, 3), dtype=np.float64)
self.band = np.zeros((self.nspin, self.nkpts, self.nbands), dtype=np.float64)
self.co_occ = np.zeros((self.nspin, self.nkpts, self.nbands), dtype=np.float64)
for spin in range(self.nspin):
cg_spin = []
for kpt in range(self.nkpts):
# Read eigenvalues + occ
rec = 2 + spin*self.nkpts*(self.nbands + 1) + kpt*(self.nbands + 1)
self._wavecar.seek(rec * self.recl)
dump = np.fromfile(self._wavecar, dtype=np.float64, count=4+3*self.nbands)
if spin == 0:
self.nplws[kpt] = int(dump[0])
self.kpts[kpt] = dump[1:4]
dump = dump[4:].reshape((-1, 3))
self.band[spin,kpt,:] = dump[:,0]
self.co_occ[spin,kpt,:] = dump[:,2]
def get_coeff(self, spin=0, kpt=1, norm=False):
'''Extract plw coefficients of the wfn'''
assert 0 < kpt <= self.nkpts, 'Invalid kpoint index!'
kpt -= 1
if kpt > self.nkpts:
raise ValueError("kpt must be smaller than the maximum index for kpt", self.nkpts)
#TODO: check spin value
cg = []
for band in range(self.nbands):
rec = 3 + spin*self.nkpts*(self.nbands + 1) + kpt*(self.nbands + 1) + band
self._wavecar.seek(rec * self.recl)
dump = np.fromfile(self._wavecar, dtype=self.prec, count=self.nplws[kpt])
cg.append(np.asarray(dump, dtype=np.complex128))
cg = np.asarray(cg)
if norm:
norm_factor = np.sqrt((cg.conj()*cg).sum(axis=1).real.reshape(self.nbands,-1))
cg = cg/norm_factor
return cg
def get_gvec(self, kpt=1):
'''
Generate the G-vectors that satisfies the following relation
(G + k)**2 / 2 < ENCUT
'''
assert 0 < kpt <= self.nkpts, 'Invalid kpoint index!'
kpt -= 1
kvec = self.kpts[kpt]
# Fast algorithm without for loop
fx = np.hstack([np.arange(self.ngrid[0]//2 + 2), -np.flip(np.arange(1,self.ngrid[0]//2))])
fy = np.hstack([np.arange(self.ngrid[1]//2 + 2), -np.flip(np.arange(1,self.ngrid[1]//2))])
fz = np.hstack([np.arange(self.ngrid[2]//2 + 2), -np.flip(np.arange(1,self.ngrid[2]//2))])
y, z, x = np.meshgrid(fy, fz, fx, indexing='xy')
kgrid = np.asarray(np.hstack([x.reshape(-1,1),y.reshape(-1,1),z.reshape(-1,1)]))
# Kinetic_Energy = (G + k)**2 / 2
# HSQDTM = hbar**2/(2*ELECTRON MASS)
recip_lattice = self.cell[1]
KENERGY = const.HSQDTM * np.linalg.norm(np.dot(kgrid + kvec[np.newaxis,:] , recip_lattice), axis=1)**2
# find Gvectors where (G + k)**2 / 2 < ENCUT
Gvec = kgrid[np.where(KENERGY < self.encut)[0]]
# Check if the Gvec is consistent with Gvec generated by VASP
n = 1
if self._lsorbit: n = 2 # the No. of plw is two times larger for a SOC wfn (up + down)
assert Gvec.shape[0] == self.nplws[kpt] / n, 'No. of planewaves not consistent! %d %d %d' % \
(Gvec.shape[0], self.nplws[kpt], np.prod(self.ngrid))
return Gvec
def get_unk(self, spin=0, kpt=1, band=1, Gp=[0,0,0], ngrid=None, norm=False, norm_c=False):
'''
Obtain the pseudo periodic part of the Bloch function in real space
Attributes:
spin : spin index of the desired KS states, starting from 1
kpt : k-point index of the desired KS states, starting from 1
band : band index of the desired KS states, starting from 1
Gp : shift the G vectors by Gp
gvec : the G-vectors correspond to the plane-wave coefficients
Cg : the plane-wave coefficients. If None, read from WAVECAR
ngrid : the FFT grid size
norm : thr normalized factorl, False: 1/sqrt(N), True: < unk | unk > = 1
norm_c : whether to normalzie cg
norm=False and norm_c=False: the unk will be identical to VASP UNK files
u_{n}^{k+Gp}(r) = 1/ norm * \sum_G u_{n}^{k}(G).e^{i(G-Gp)r}
special case when Gp = 0:
u_{n}^{k}(r) = 1/ norm * \sum_G u_{n}^{k}(G).e^{iGr}
u_{n}^{k}(r) = FFT(u_{n}^{k}(G))
'''
if ngrid is None:
ngrid = self.ngrid.copy()
else:
ngrid = np.array(ngrid, dtype=np.int64)
assert ngrid.shape[0] == 3, 'Wrong syntax for ngrid'
assert np.alltrue(ngrid >= self.ngrid), "Minium FT grid size: (%d, %d, %d)" % \
(self.ngrid[0], self.ngrid[1], self.ngrid[2])
assert band <= self.nbands, 'The band index is larger than the number of bands'
# The FFT normalization factor
# the iFFT has a factor 1/N_G, unk exported by VASP does not have this factor
Gp = np.int64(Gp)
gvec = self.get_gvec(kpt) - Gp
unk = np.zeros(ngrid, dtype=np.complex128)
gvec %= ngrid[np.newaxis,:]
nx, ny, nz = gvec[:,0], gvec[:,1], gvec[:,2]
if self._lsorbit:
wfc_spinor = []
Cg = self.get_coeff(spin, kpt, norm_c)[band-1]
nplw = Cg.shape[0] // 2
# spinor up
unk[nx, ny, nz] = Cg[:nplw]
wfc_spinor.append(ifftn(unk))
# spinor down
unk[:,:,:] = 0.0j
unk[nx, ny, nz] = Cg[nplw:]
wfc_spinor.append(ifftn(unk))
del Cg
return np.asarray(wfc_spinor)*normfac
else:
unk[nx, ny, nz] = self.get_coeff(spin, kpt, norm_c)[band-1]
unk = ifftn(unk)
# Note: ifftn has a norm factor of 1/N
if norm == False:
return unk * np.prod(ngrid)
else:
return unk / np.linalg.norm(unk)
def get_unk_list(self, spin=0, kpt=1, band_list=None, Gp=[0,0,0], ngrid=None, norm=True, norm_c=True):
'''Get unk for a list of states'''
if band_list is None:
band_list = np.arange(self.nbands) + 1
else:
band_list = np.asarray(band_list)
unk = []
for i, band in enumerate(band_list):
unk.append(self.get_unk(spin=spin, kpt=kpt, band=band, Gp=Gp, ngrid=ngrid, norm=norm, norm_c=norm_c))
return np.asarray(unk)
def write_vesta(self, unk, realonly=False, poscar='POSCAR', filename='unk',
ncol=10):
'''
Save the real space pseudo-wavefunction as vesta format.
'''
nx, ny, nz = unk.shape
try:
pos = open(poscar, 'r')
head = ''
for line in pos:
if line.strip():
head += line
else:
break
head += '\n%5d%5d%5d\n' % (nx, ny, nz)
except:
raise IOError('Failed to open %s' % poscar)
# Faster IO
nrow = unk.size // ncol
nrem = unk.size % ncol
fmt = "%16.8E"
psi = unk.copy()
psi = psi.flatten(order='F')
psi_h = psi[:nrow * ncol].reshape((nrow, ncol))
psi_r = psi[nrow * ncol:]
# Write the real part
with open(filename + '_r.vasp', 'w') as out:
out.write(head)
out.write(
'\n'.join([''.join([fmt % xx for xx in row])
for row in psi_h.real])
)
out.write("\n" + ''.join([fmt % xx for xx in psi_r.real]))
# Write the imaginary part
if not realonly:
with open(filename + '_i.vasp', 'w') as out:
out.write(head)
out.write(
'\n'.join([''.join([fmt % xx for xx in row])
for row in psi_h.imag])
)
out.write("\n" + ''.join([fmt % xx for xx in psi_r.imag]))
def export_unk(self, spin=0, ngrid=None):
'''
Export the periodic part of BF in a real space grid for plotting with wannier90
'''
from scipy.io import FortranFile
if spin == 0:
spin_str = '.1'
else:
spin_str = '.2'
if ngrid is None:
ngrid = self.ngrid.copy()
else:
ngrid = np.array(ngrid, dtype=np.int64)
assert ngrid.shape[0] == 3, 'Wrong syntax for ngrid'
assert np.alltrue(ngrid >= self.ngrid), "Minium FT grid size: (%d, %d, %d)" % \
(self.ngrid[0], self.ngrid[1], self.ngrid[2])
for kpt in range(self.nkpts):
unk_file = FortranFile('UNK' + "%05d" % (kpt + 1) + spin_str, 'w')
unk_file.write_record(np.asarray([ngrid[0], ngrid[1], ngrid[2], kpt + 1, self.nbands], dtype = np.int32))
for band in range(self.nbands):
unk = self.get_unk(spin=spin, kpt=kpt+1, band=band+1, ngrid=ngrid, norm=False, norm_c=False)
unk = unk.T.flatten()
unk_file.write_record(unk)
unk_file.close() |
import os
import re
import html
from udmurt_translit import UdmurtTransliterator
class CsvProcessor:
"""
Contains methods for adding transliterated columns to a CSV file.
"""
rxDir = re.compile('[/\\\\][^/\\\\]+$')
def __init__(self, transliterator, sep='\t',
srcCol=0,
tgtCol=1,
startLine=1):
self.transliterator = transliterator
self.sep = sep
self.srcCol = srcCol
self.tgtCol = tgtCol
self.startLine = startLine
def process_file(self, fnameCsv, fnameCsvOut):
"""
Process one CSV file.
"""
with open(fnameCsv, 'r', encoding='utf-8-sig') as fIn:
lines = [list(line.strip('\r\n').split(self.sep)) for line in fIn.readlines()]
for i in range(self.startLine, len(lines)):
if len(lines[i]) <= self.srcCol:
continue
srcText = lines[i][self.srcCol]
tgtText = self.transliterator.transliterate(srcText)
if len(lines[i]) <= self.tgtCol:
lines[i] += [''] * (self.tgtCol - len(lines) + 1)
lines[i][self.tgtCol] = tgtText
lines = [self.sep.join(line) for line in lines]
with open(fnameCsvOut, 'w', encoding='utf-8-sig') as fOut:
fOut.write('\n'.join(lines))
def process_corpus(self):
if not os.path.exists('csv'):
print('All CSV files should be located in the csv folder.')
return
if not os.path.exists('csv_transliterated'):
os.makedirs('csv_transliterated')
nDocs = 0
for root, dirs, files in os.walk('csv'):
for fname in files:
if not fname.lower().endswith(('.csv', '.tsv')):
continue
fnameCsv = os.path.join(root, fname)
fnameCsvOut = 'csv_transliterated' + fnameCsv[3:]
outDirName = CsvProcessor.rxDir.sub('', fnameCsvOut)
if len(outDirName) > 0 and not os.path.exists(outDirName):
os.makedirs(outDirName)
nDocs += 1
self.process_file(fnameCsv, fnameCsvOut)
print(str(nDocs) + ' documents processed.')
if __name__ == '__main__':
transliterator = UdmurtTransliterator(src='tatyshly_cyr', target='standard',
eafCleanup=True)
cp = CsvProcessor(transliterator, sep='\t', srcCol=1, tgtCol=0, startLine=1)
cp.process_corpus()
|
import importlib
import platform
import sys
from argparse import ArgumentParser
from typing import Iterable, List, Optional, Sequence, Set, Tuple
from . import __version__
from .config import Config
from .linter import Linter
from .problem import ProblemType
IGNORABLE_TYPES = [t.name for t in (ProblemType.URI, ProblemType.FLOAT)]
def main(argv: Optional[Sequence[str]] = None) -> int:
if argv is None:
argv = sys.argv[1:]
parser = _make_arg_parser()
args = parser.parse_args(argv)
config = Config(args.ignore, args.yaml)
if config.is_yaml and not _is_pyyaml_installed():
print("Missing optional dependency 'PyYAML'.", file=sys.stderr)
return 2
all_problems = dict(_get_problems(args.filepath_or_text, config))
if not all_problems:
return 0
printing_input = len(args.filepath_or_text) > 1
for key, problems in all_problems.items():
if printing_input:
print(key)
problems_count = len(problems)
header = "One error" if problems_count == 1 else f"{problems_count} errors"
print(f"{header}:")
for problem in problems:
print(f" {problem}")
return 1
def _get_problems(
input_list: List[str], config: Config
) -> Iterable[Tuple[str, List[str]]]:
for filepath_or_text in input_list:
problems = Linter.validate(filepath_or_text, config)
if problems:
yield filepath_or_text, problems
def _is_pyyaml_installed() -> bool:
try:
importlib.import_module("yaml")
except ImportError:
return False
return True
def _make_arg_parser() -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument(
"filepath_or_text",
nargs="+",
help="ASL file path or ASL JSON/YAML text.",
)
parser.add_argument(
"--ignore",
type=_ignore_types,
help="Comma-separated list of errors to ignore (or skip)."
" For example, `--ignore=URI,FLOAT`."
f" Supported values are {IGNORABLE_TYPES}.",
)
parser.add_argument(
"--yaml",
action="store_true",
help="Parse as YAML instead of JSON.",
)
parser.add_argument(
"--version",
"-V",
action="version",
version=f"{__version__} {_get_python_version()}",
)
return parser
def _ignore_types(value: str) -> Set[ProblemType]:
values = set(v.strip() for v in value.split(","))
if values - set(IGNORABLE_TYPES):
raise ValueError()
return set(ProblemType[v] for v in values)
def _get_python_version() -> str:
return (
f"{platform.python_implementation()} "
f"{platform.python_version()} on {platform.system()}"
)
|
"""
Find reachable nodes given a set of root nodes and properties
TODO: the --subj, --pred, and --obj options should perhaps be renamed to
--node1-column-name, --label-column-name, and --node2-column-name, with
the old options kept as synonyms.
TODO: the root file name should be parsed with parser.add_input_file(...)
"""
from argparse import Namespace, _MutuallyExclusiveGroup
import typing
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
def parser():
return {
'help': 'Find reachable nodes in a graph.'
}
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
_expert: bool = parsed_shared_args._expert
parser.add_input_file(positional=True, who="The KGTK file to find connected components in.")
parser.add_output_file()
# parser.add_argument(action="store", type=str, dest="filename", metavar='filename', help='input filename here')
# parser.add_argument('-o', '--out', action='store', type=str, dest='output', help='File to output the reachable nodes,if empty will be written out to standard output',default=None)
parser.add_argument('--root',action='store',dest='root',type=str, nargs="*",
help='Set of root nodes to use, space- or comma-separated strings. (default=None)')
parser.add_argument('--root-file', '--rootfile',action='store',dest='rootfile',help='Option to specify a file containing the set of root nodes',default=None)
parser.add_argument('--rootfilecolumn',action='store',type=str,dest='rootfilecolumn',
help='Specify the name or number of the root file column with the root nodes. (default=node1 or its alias if edge file, id if node file)')
parser.add_argument("--subj", action="store", type=str, dest="subject_column_name", help='Name of the subject column. (default: node1 or its alias)')
parser.add_argument("--obj", action="store", type=str, dest="object_column_name", help='Name of the object column. (default: label or its alias)')
parser.add_argument("--pred",action="store" ,type=str, dest="predicate_column_name",help='Name of the predicate column. (default: node2 or its alias)')
parser.add_argument("--prop", "--props", action="store", type=str, dest="props", nargs="*",
help='Properties to consider while finding reachable nodes, space- or comma-separated string. (default: all properties)',default=None)
parser.add_argument('--props-file', action='store', dest='props_file',
help='Option to specify a file containing the set of properties',default=None)
parser.add_argument('--propsfilecolumn', action='store', type=str, dest='propsfilecolumn', default=None,
help='Specify the name or number of the props file column with the property names. (default=node1 or its alias if edge file, id if node file)')
parser.add_argument('--inverted', dest="inverted",
help="When True, and when --undirected is False, invert the source and target nodes in the graph. (default=%(default)s)",
type=optional_bool, nargs='?', const=True, default=False, metavar="True|False")
parser.add_argument("--inverted-prop", "--inverted-props", action="store", type=str, dest="inverted_props", nargs="*",
help='Properties to invert, space- or comma-separated string. (default: no properties)',default=None)
parser.add_argument('--inverted-props-file', action='store', dest='inverted_props_file',
help='Option to specify a file containing the set of inverted properties',default=None)
parser.add_argument('--invertedpropsfilecolumn', action='store', type=str, dest='invertedpropsfilecolumn', default=None,
help='Specify the name or number of the inverted props file column with the property names. (default=node1 or its alias if edge file, id if node file)')
parser.add_argument('--undirected', dest="undirected",
help="When True, specify graph as undirected. (default=%(default)s)",
type=optional_bool, nargs='?', const=True, default=False, metavar="True|False")
parser.add_argument("--undirected-prop", "--undirected-props", action="store", type=str, dest="undirected_props", nargs="*",
help='Properties to treat as undirected, space- or comma-separated string. (default: no properties)',default=None)
parser.add_argument('--undirected-props-file', action='store', dest='undirected_props_file',
help='Option to specify a file containing the set of undirected properties',default=None)
parser.add_argument('--undirectedpropsfilecolumn', action='store', type=str, dest='undirectedpropsfilecolumn', default=None,
help='Specify the name or number of the undirected props file column with the property names. (default=node1 or its alias if edge file, id if node file)')
parser.add_argument('--label', action='store', type=str, dest='label', help='The label for the reachable relationship. (default: %(default)s)',default="reachable")
parser.add_argument('--selflink',dest='selflink_bool',
help='When True, include a link from each output node to itself. (default=%(default)s)',
type=optional_bool, nargs='?', const=True, default=False, metavar="True|False")
parser.add_argument('--show-properties',dest='show_properties',
help='When True, show the graph properties. (default=%(default)s)',
type=optional_bool, nargs='?', const=True, default=False, metavar="True|False")
parser.add_argument('--breadth-first',dest='breadth_first',
help='When True, search the graph breadth first. When false, search depth first. (default=%(default)s)',
type=optional_bool, nargs='?', const=True, default=False, metavar="True|False")
parser.add_argument('--depth-limit',dest='depth_limit',
help='An optional depth limit for breadth-first searches. (default=%(default)s)',
type=int, default=None)
KgtkReader.add_debug_arguments(parser, expert=_expert)
KgtkReaderOptions.add_arguments(parser, mode_options=True, expert=_expert)
KgtkReaderOptions.add_arguments(parser, mode_options=True, who="input", expert=_expert, defaults=False)
KgtkReaderOptions.add_arguments(parser, mode_options=True, who="root", expert=_expert, defaults=False)
KgtkReaderOptions.add_arguments(parser, mode_options=True, who="props", expert=_expert, defaults=False)
KgtkReaderOptions.add_arguments(parser, mode_options=True, who="undirected_props", expert=_expert, defaults=False)
KgtkReaderOptions.add_arguments(parser, mode_options=True, who="inverted_props", expert=_expert, defaults=False)
KgtkValueOptions.add_arguments(parser, expert=_expert)
def run(input_file: KGTKFiles,
output_file: KGTKFiles,
root: typing.Optional[typing.List[str]],
rootfile,
rootfilecolumn,
subject_column_name: typing.Optional[str],
object_column_name: typing.Optional[str],
predicate_column_name: typing.Optional[str],
props: typing.Optional[typing.List[str]],
props_file: typing.Optional[str],
propsfilecolumn: typing.Optional[str],
inverted: bool,
inverted_props: typing.Optional[typing.List[str]],
inverted_props_file: typing.Optional[str],
invertedpropsfilecolumn: typing.Optional[str],
undirected: bool,
undirected_props: typing.Optional[typing.List[str]],
undirected_props_file: typing.Optional[str],
undirectedpropsfilecolumn: typing.Optional[str],
label: str,
selflink_bool: bool,
show_properties: bool,
breadth_first: bool,
depth_limit: typing.Optional[int],
errors_to_stdout: bool,
errors_to_stderr: bool,
show_options: bool,
verbose: bool,
very_verbose: bool,
**kwargs, # Whatever KgtkFileOptions and KgtkValueOptions want.
):
import sys
import csv
from pathlib import Path
import time
from graph_tool.search import dfs_iterator, bfs_iterator, bfs_search, BFSVisitor
# from graph_tool import load_graph_from_csv
from graph_tool.util import find_edge
from kgtk.exceptions import KGTKException
from kgtk.cli_argparse import KGTKArgumentParser
from kgtk.gt.gt_load import load_graph_from_kgtk
from kgtk.io.kgtkwriter import KgtkWriter
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
#Graph-tool names columns that are not subject or object c0, c1... This function finds the number that graph tool assigned to the predicate column
def find_pred_position(sub,pred,obj):
if pred < sub and pred < obj:
return pred
elif (pred > sub and pred < obj) or (pred<sub and pred>obj):
return pred-1
else:
return pred-2
def get_edges_by_edge_prop(g, p, v):
return find_edge(g, prop=g.properties[('e', p)], match=v)
input_kgtk_file: Path = KGTKArgumentParser.get_input_file(input_file)
output_kgtk_file: Path = KGTKArgumentParser.get_output_file(output_file)
# Select where to send error messages, defaulting to stderr.
error_file: typing.TextIO = sys.stdout if errors_to_stdout else sys.stderr
# Build the option structures.
input_reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs, who="input", fallback=True)
root_reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs, who="root", fallback=True)
props_reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs, who="props", fallback=True)
undirected_props_reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs, who="undirected_props", fallback=True)
inverted_props_reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs, who="inverted_props", fallback=True)
value_options: KgtkValueOptions = KgtkValueOptions.from_dict(kwargs)
if root is None:
root = [ ] # This simplifies matters.
if props is None:
props = [ ] # This simplifies matters.
if undirected_props is None:
undirected_props = [ ] # This simplifies matters.
if inverted_props is None:
inverted_props = [ ] # This simplifies matters.
if show_options:
if root is not None:
print("--root %s" % " ".join(root), file=error_file)
if rootfile is not None:
print("--rootfile=%s" % rootfile, file=error_file)
if rootfilecolumn is not None:
print("--rootfilecolumn=%s" % rootfilecolumn, file=error_file)
if subject_column_name is not None:
print("--subj=%s" % subject_column_name, file=error_file)
if object_column_name is not None:
print("--obj=%s" % object_column_name, file=error_file)
if predicate_column_name is not None:
print("--pred=%s" % predicate_column_name, file=error_file)
if props is not None:
print("--props=%s" % " ".join(props), file=error_file)
if props_file is not None:
print("--props-file=%s" % props_file, file=error_file)
if propsfilecolumn is not None:
print("--propsfilecolumn=%s" % propsfilecolumn, file=error_file)
print("--inverted=%s" % str(inverted), file=error_file)
if inverted_props is not None:
print("--inverted-props=%s" % " ".join(inverted_props), file=error_file)
if inverted_props_file is not None:
print("--inverted-props-file=%s" % inverted_props_file, file=error_file)
if invertedpropsfilecolumn is not None:
print("--invertedpropsfilecolumn=%s" % invertedpropsfilecolumn, file=error_file)
print("--undirected=%s" % str(undirected), file=error_file)
if undirected_props is not None:
print("--undirected-props=%s" % " ".join(undirected_props), file=error_file)
if undirected_props_file is not None:
print("--undirected-props-file=%s" % undirected_props_file, file=error_file)
if undirectedpropsfilecolumn is not None:
print("--undirectedpropsfilecolumn=%s" % undirectedpropsfilecolumn, file=error_file)
print("--label=%s" % label, file=error_file)
print("--selflink=%s" % str(selflink_bool), file=error_file)
print("--breadth-first=%s" % str(breadth_first), file=error_file)
if depth_limit is not None:
print("--depth-limit=%d" % depth_limit, file=error_file)
input_reader_options.show(out=error_file)
root_reader_options.show(out=error_file)
props_reader_options.show(out=error_file)
undirected_props_reader_options.show(out=error_file)
inverted_props_reader_options.show(out=error_file)
value_options.show(out=error_file)
KgtkReader.show_debug_arguments(errors_to_stdout=errors_to_stdout,
errors_to_stderr=errors_to_stderr,
show_options=show_options,
verbose=verbose,
very_verbose=very_verbose,
out=error_file)
print("=======", file=error_file, flush=True)
if inverted and (len(inverted_props) > 0 or inverted_props_file is not None):
raise KGTKException("--inverted is not allowed with --inverted-props or --inverted-props-file")
if undirected and (len(undirected_props) > 0 or undirected_props_file is not None):
raise KGTKException("--undirected is not allowed with --undirected-props or --undirected-props-file")
if depth_limit is not None:
if not breadth_first:
raise KGTKException("--depth-limit is not allowed without --breadth-first")
if depth_limit <= 0:
raise KGTKException("--depth-limit requires a positive argument")
root_set: typing.Set = set()
if rootfile is not None:
if verbose:
print("Reading the root file %s" % repr(rootfile), file=error_file, flush=True)
try:
root_kr: KgtkReader = KgtkReader.open(Path(rootfile),
error_file=error_file,
who="root",
options=root_reader_options,
value_options=value_options,
verbose=verbose,
very_verbose=very_verbose,
)
except SystemExit:
raise KGTKException("Exiting.")
rootcol: int
if root_kr.is_edge_file:
rootcol = int(rootfilecolumn) if rootfilecolumn is not None and rootfilecolumn.isdigit() else root_kr.get_node1_column_index(rootfilecolumn)
elif root_kr.is_node_file:
rootcol = int(rootfilecolumn) if rootfilecolumn is not None and rootfilecolumn.isdigit() else root_kr.get_id_column_index(rootfilecolumn)
elif rootfilecolumn is not None:
rootcol = int(rootfilecolumn) if rootfilecolumn is not None and rootfilecolumn.isdigit() else root_kr.column_name_map.get(rootfilecolumn, -1)
else:
root_kr.close()
raise KGTKException("The root file is neither an edge nor a node file and the root column name was not supplied.")
if rootcol < 0:
root_kr.close()
raise KGTKException("Unknown root column %s" % repr(rootfilecolumn))
for row in root_kr:
rootnode: str = row[rootcol]
root_set.add(rootnode)
root_kr.close()
if len(root) > 0:
if verbose:
print ("Adding root nodes from the command line.", file=error_file, flush=True)
root_group: str
for root_group in root:
r: str
for r in root_group.split(','):
if verbose:
print("... adding %s" % repr(r), file=error_file, flush=True)
root_set.add(r)
if len(root_set) == 0:
print("Warning: No nodes in the root set, the output file will be empty.", file=error_file, flush=True)
elif verbose:
print("%d nodes in the root set." % len(root_set), file=error_file, flush=True)
property_set: typing.Set[str] = set()
if props_file is not None:
if verbose:
print("Reading the root file %s" % repr(props_file), file=error_file, flush=True)
try:
props_kr: KgtkReader = KgtkReader.open(Path(props_file),
error_file=error_file,
who="props",
options=props_reader_options,
value_options=value_options,
verbose=verbose,
very_verbose=very_verbose,
)
except SystemExit:
raise KGTKException("Exiting.")
propscol: int
if props_kr.is_edge_file:
propscol = int(propsfilecolumn) if propsfilecolumn is not None and propsfilecolumn.isdigit() else props_kr.get_node1_column_index(propsfilecolumn)
elif props_kr.is_node_file:
propscol = int(propsfilecolumn) if propsfilecolumn is not None and propsfilecolumn.isdigit() else props_kr.get_id_column_index(propsfilecolumn)
elif propsfilecolumn is not None:
propscol = int(propsfilecolumn) if propsfilecolumn is not None and propsfilecolumn.isdigit() else props_kr.column_name_map.get(propsfilecolumn, -1)
else:
props_kr.close()
raise KGTKException("The props file is neither an edge nor a node file and the root column name was not supplied.")
if propscol < 0:
props_kr.close()
raise KGTKException("Unknown props column %s" % repr(propsfilecolumn))
for row in props_kr:
property_name: str = row[propscol]
property_set.add(property_name)
props_kr.close()
if len(props) > 0:
# Filter the graph, G, to include only edges where the predicate (label)
# column contains one of the selected properties.
prop_group: str
for prop_group in props:
prop: str
for prop in prop_group.split(','):
property_set.add(prop)
if verbose and len(property_set) > 0:
print("property set=%s" % " ".join(sorted(list(property_set))), file=error_file, flush=True)
undirected_property_set: typing.Set[str] = set()
if undirected_props_file is not None:
if verbose:
print("Reading the undirected properties file %s" % repr(undirected_props_file), file=error_file, flush=True)
try:
undirected_props_kr: KgtkReader = KgtkReader.open(Path(undirected_props_file),
error_file=error_file,
who="undirected_props",
options=undirected_props_reader_options,
value_options=value_options,
verbose=verbose,
very_verbose=very_verbose,
)
except SystemExit:
raise KGTKException("Exiting.")
undirected_props_col: int
if undirected_props_kr.is_edge_file:
undirected_props_col = int(undirectedpropsfilecolumn) if undirectedpropsfilecolumn is not None and undirectedpropsfilecolumn.isdigit() else undirected_props_kr.get_node1_column_index(undirectedpropsfilecolumn)
elif undirected_props_kr.is_node_file:
undirected_props_col = int(undirectedpropsfilecolumn) if undirectedpropsfilecolumn is not None and undirectedpropsfilecolumn.isdigit() else undirected_props_kr.get_id_column_index(undirectedpropsfilecolumn)
elif undirectedpropsfilecolumn is not None:
undirected_props_col = int(undirectedpropsfilecolumn) if undirectedpropsfilecolumn is not None and undirectedpropsfilecolumn.isdigit() else undirected_props_kr.column_name_map.get(undirectedpropsfilecolumn, -1)
else:
undirected_props_kr.close()
raise KGTKException("The undirected props file is neither an edge nor a node file and the root column name was not supplied.")
if undirected_props_col < 0:
undirected_props_kr.close()
raise KGTKException("Unknown undirected properties column %s" % repr(undirectedpropsfilecolumn))
for row in undirected_props_kr:
undirected_property_name: str = row[undirected_props_col]
undirected_property_set.add(undirected_property_name)
undirected_props_kr.close()
if len(undirected_props) > 0:
# Edges where the predicate (label) column contains one of the selected
# properties will be treated as undirected links.
und_prop_group: str
for und_prop_group in undirected_props:
und_prop: str
for und_prop in und_prop_group.split(','):
undirected_property_set.add(und_prop)
if verbose and len(undirected_property_set) > 0:
print("undirected property set=%s" % " ".join(sorted(list(undirected_property_set))), file=error_file, flush=True)
inverted_property_set: typing.Set[str] = set()
if inverted_props_file is not None:
if verbose:
print("Reading the inverted properties file %s" % repr(inverted_props_file), file=error_file, flush=True)
try:
inverted_props_kr: KgtkReader = KgtkReader.open(Path(inverted_props_file),
error_file=error_file,
who="inverted_props",
options=inverted_props_reader_options,
value_options=value_options,
verbose=verbose,
very_verbose=very_verbose,
)
except SystemExit:
raise KGTKException("Exiting.")
inverted_props_col: int
if inverted_props_kr.is_edge_file:
inverted_props_col = int(invertedpropsfilecolumn) if invertedpropsfilecolumn is not None and invertedpropsfilecolumn.isdigit() else inverted_props_kr.get_node1_column_index(invertedpropsfilecolumn)
elif inverted_props_kr.is_node_file:
inverted_props_col = int(invertedpropsfilecolumn) if invertedpropsfilecolumn is not None and invertedpropsfilecolumn.isdigit() else inverted_props_kr.get_id_column_index(invertedpropsfilecolumn)
elif invertedpropsfilecolumn is not None:
inverted_props_col = int(invertedpropsfilecolumn) if invertedpropsfilecolumn is not None and invertedpropsfilecolumn.isdigit() else inverted_props_kr.column_name_map.get(invertedpropsfilecolumn, -1)
else:
inverted_props_kr.close()
raise KGTKException("The inverted props file is neither an edge nor a node file and the root column name was not supplied.")
if inverted_props_col < 0:
inverted_props_kr.close()
raise KGTKException("Unknown inverted properties column %s" % repr(invertedpropsfilecolumn))
for row in inverted_props_kr:
inverted_property_name: str = row[inverted_props_col]
inverted_property_set.add(inverted_property_name)
inverted_props_kr.close()
if len(inverted_props) > 0:
# Edges where the predicate (label) column contains one of the selected
# properties will have the source and target columns swapped.
inv_prop_group: str
for inv_prop_group in inverted_props:
inv_prop: str
for inv_prop in inv_prop_group.split(','):
inverted_property_set.add(inv_prop)
if verbose and len(inverted_property_set):
print("inverted property set=%s" % " ".join(sorted(list(inverted_property_set))), file=error_file, flush=True)
try:
kr: KgtkReader = KgtkReader.open(input_kgtk_file,
error_file=error_file,
who="input",
options=input_reader_options,
value_options=value_options,
verbose=verbose,
very_verbose=very_verbose,
)
except SystemExit:
raise KGTKException("Exiting.")
sub: int = kr.get_node1_column_index(subject_column_name)
if sub < 0:
print("Unknown subject column %s" % repr(subject_column_name), file=error_file, flush=True)
pred: int = kr.get_label_column_index(predicate_column_name)
if pred < 0:
print("Unknown predicate column %s" % repr(predicate_column_name), file=error_file, flush=True)
obj: int = kr.get_node2_column_index(object_column_name)
if obj < 0:
print("Unknown object column %s" % repr(object_column_name), file=error_file, flush=True)
if sub < 0 or pred < 0 or obj < 0:
kr.close()
raise KGTKException("Exiting due to unknown column.")
if verbose:
print("special columns: sub=%d pred=%d obj=%d" % (sub, pred, obj), file=error_file, flush=True)
# G = load_graph_from_csv(filename,not(undirected),skip_first=not(header_bool),hashed=True,csv_options={'delimiter': '\t'},ecols=(sub,obj))
G = load_graph_from_kgtk(kr,
directed=not undirected,
inverted=inverted,
ecols=(sub, obj),
pcol=pred,
pset=property_set,
upset=undirected_property_set,
ipset=inverted_property_set,
verbose=verbose,
out=error_file)
name = G.vp["name"] # Get the vertex name property map (vertex to ndoe1 (subject) name)
if show_properties:
print("Graph name=%s" % repr(name), file=error_file, flush=True)
print("Graph properties:" , file=error_file, flush=True)
key: typing.Any
for key in G.properties:
print(" %s: %s" % (repr(key), repr(G.properties[key])), file=error_file, flush=True)
index_list = []
for v in G.vertices():
if name[v] in root_set:
index_list.append(v)
if len(index_list) == 0:
print("Warning: No root nodes found in the graph, the output file will be empty.", file=error_file, flush=True)
elif verbose:
print("%d root nodes found in the graph." % len(index_list), file=error_file, flush=True)
output_header: typing.List[str] = ['node1','label','node2']
try:
kw: KgtkWriter = KgtkWriter.open(output_header,
output_kgtk_file,
mode=KgtkWriter.Mode.EDGE,
require_all_columns=True,
prohibit_extra_columns=True,
fill_missing_columns=False,
verbose=verbose,
very_verbose=very_verbose)
except SystemExit:
raise KGTKException("Exiting.")
for index in index_list:
if selflink_bool:
kw.writerow([name[index], label, name[index]])
if breadth_first:
if depth_limit is None:
for e in bfs_iterator(G, G.vertex(index)):
kw.writerow([name[index], label, name[e.target()]])
else:
class DepthExceeded(Exception):
pass
class DepthLimitedVisitor(BFSVisitor):
def __init__(self, name, pred, dist):
self.name = name
self.pred = pred
self.dist = dist
def tree_edge(self, e):
self.pred[e.target()] = int(e.source())
newdist = self.dist[e.source()] + 1
if depth_limit is not None and newdist > depth_limit:
raise DepthExceeded
self.dist[e.target()] = newdist
kw.writerow([name[index], label, name[e.target()]])
dist = G.new_vertex_property("int")
pred = G.new_vertex_property("int64_t")
try:
bfs_search(G, G.vertex(index), DepthLimitedVisitor(name, pred, dist))
except DepthExceeded:
pass
else:
for e in dfs_iterator(G, G.vertex(index)):
kw.writerow([name[index], label, name[e.target()]])
kw.close()
kr.close()
|
def mat_rows(s, n):
return zip(*(iter(s), ) * n)
def mat_cols(s, n):
return zip(*(iter(s[n * k:]) for k in xrange(n)))
def mat_null(fn, s, n):
return len(tuple(0 for a in fn(s, n) if a == (None, ) * n))
|
import numpy as np
from unittest import TestCase
from diffprivlib.mechanisms import Bingham
from diffprivlib.utils import global_seed
class TestBingham(TestCase):
def setup_method(self, method):
if method.__name__ .endswith("prob"):
global_seed(314159)
self.mech = Bingham
a = np.random.random((5, 3))
self.random_array = a.T.dot(a)
def teardown_method(self, method):
del self.mech
@staticmethod
def generate_data(d=5, n=10):
a = np.random.random((n, d))
return a.T.dot(a)
def test_class(self):
from diffprivlib.mechanisms import DPMechanism
self.assertTrue(issubclass(Bingham, DPMechanism))
def test_neg_epsilon(self):
with self.assertRaises(ValueError):
self.mech(epsilon=-1)
def test_inf_epsilon(self):
mech = self.mech(epsilon=float("inf"))
for i in range(100):
data = self.generate_data()
eigvals, eigvecs = np.linalg.eigh(data)
true_data = eigvecs[:, eigvals.argmax()]
noisy_data = mech.randomise(data)
self.assertTrue(np.allclose(true_data, noisy_data))
def test_non_zero_delta(self):
mech = self.mech(epsilon=1)
mech.delta = 0.5
with self.assertRaises(ValueError):
mech.randomise(self.generate_data())
def test_default_sensitivity(self):
mech = self.mech(epsilon=1)
self.assertEqual(mech.sensitivity, 1.0)
def test_wrong_sensitivity(self):
with self.assertRaises(TypeError):
self.mech(epsilon=1, sensitivity="1")
with self.assertRaises(ValueError):
self.mech(epsilon=1, sensitivity=-1)
def test_zero_sensitivity(self):
mech = self.mech(epsilon=1, sensitivity=0)
for i in range(100):
data = self.generate_data()
eigvals, eigvecs = np.linalg.eigh(data)
true_data = eigvecs[:, eigvals.argmax()]
noisy_data = mech.randomise(data)
self.assertTrue(np.allclose(true_data, noisy_data))
def test_numeric_input(self):
mech = self.mech(epsilon=1, sensitivity=1)
with self.assertRaises(TypeError):
mech.randomise(1)
def test_string_input(self):
mech = self.mech(epsilon=1, sensitivity=1)
with self.assertRaises(TypeError):
mech.randomise("1")
def test_list_input(self):
mech = self.mech(epsilon=1, sensitivity=1)
with self.assertRaises(TypeError):
mech.randomise([1, 2, 3])
def test_string_array_input(self):
mech = self.mech(epsilon=1, sensitivity=1)
with self.assertRaises(TypeError):
mech.randomise(np.array([["1", "2"], ["3", "4"]]))
def test_scalar_input(self):
mech = self.mech(epsilon=1, sensitivity=1)
with self.assertRaises(ValueError):
mech.randomise(np.array([1]))
def test_scalar_array_input(self):
mech = self.mech(epsilon=1, sensitivity=1)
self.assertIsNotNone(mech.randomise(np.array([[1]])))
def test_vector_input(self):
mech = self.mech(epsilon=1, sensitivity=1)
with self.assertRaises(ValueError):
mech.randomise(np.array([1, 2, 3]))
def test_non_square_input(self):
mech = self.mech(epsilon=1, sensitivity=1)
with self.assertRaises(ValueError):
mech.randomise(np.ones((3, 4)))
def test_non_symmetric_input(self):
mech = self.mech(epsilon=1, sensitivity=1)
data = self.generate_data()
data[0, 1] -= 1
with self.assertRaises(ValueError):
mech.randomise(data)
def test_3D_input(self):
mech = self.mech(epsilon=1, sensitivity=1)
with self.assertRaises(ValueError):
mech.randomise(np.ones((3, 3, 3)))
def test_large_input(self):
X = np.random.randn(10000, 21)
X -= np.mean(X, axis=0)
X /= np.linalg.norm(X, axis=1).max()
XtX = X.T.dot(X)
mech = self.mech(epsilon=1)
self.assertIsNotNone(mech.randomise(XtX))
def test_different_result(self):
mech = self.mech(epsilon=1, sensitivity=1)
data = self.generate_data()
noisy_data = mech.randomise(data)
for i in range(10):
old_noisy_data = noisy_data
noisy_data = mech.randomise(self.generate_data())
self.assertTrue(np.isclose(noisy_data.dot(noisy_data), 1.0))
self.assertFalse(np.allclose(noisy_data, old_noisy_data))
def test_repr(self):
repr_ = repr(self.mech(epsilon=1, sensitivity=1))
self.assertIn(".Bingham(", repr_)
def test_bias(self):
self.assertRaises(NotImplementedError, self.mech(epsilon=1, sensitivity=1).bias, np.array([[1]]))
def test_variance(self):
self.assertRaises(NotImplementedError, self.mech(epsilon=1, sensitivity=1).variance, np.array([[1]]))
|
import httpx
# from pytest import fixture, mark
# @fixture(scope="session")
# def client():
# """
# A shared client for all requests in the test suite.
# """
# return httpx.Client()
client = httpx.Client()
|
"""
config.py
Contains application settings encapsulated using Pydantic BaseSettings.
Settings may be overriden using environment variables.
Example:
override uvicorn_port default setting
export UVICORN_PORT=5200
or
UVICORN_PORT=5200 python bluebutton/main.py
"""
import certifi
import os
import socket
import ssl
from functools import lru_cache
from os.path import dirname, abspath
from pydantic import BaseSettings
host_name = socket.gethostname()
class Settings(BaseSettings):
"""
application settings
"""
# uvicorn settings
uvicorn_app: str = "bluebutton.asgi:app"
uvicorn_host: str = "0.0.0.0"
uvicorn_port: int = 5200
uvicorn_reload: bool = False
# general certificate settings
# path to "standard" CA certificates
certificate_authority_path: str = certifi.where()
certificate_verify: bool = False
# bluebutton package settings
bluebutton_ca_file: str = certifi.where()
bluebutton_ca_path: str = None
bluebutton_cert_name: str = "lfh-bluebutton-client.pem"
bluebutton_cert_key_name: str = "lfh-bluebutton-client.key"
bluebutton_config_directory: str = "/home/lfh/bluebutton/config"
bluebutton_logging_config_path: str = "logging.yaml"
bluebutton_rate_limit: str = "5/second"
bluebutton_timing_enabled: bool = False
# LFH Blue Button 2.0 Client Endpoint
bluebutton_authorize_callback: str = f"https://localhost:{uvicorn_port}/bluebutton/authorize_callback"
# CMS Blue Button 2.0 Endpoints and settings
cms_authorize_url: str = "https://sandbox.bluebutton.cms.gov/v2/o/authorize/"
cms_token_url: str = "https://sandbox.bluebutton.cms.gov/v2/o/token/"
cms_base_url: str = "https://sandbox.bluebutton.cms.gov/v2/fhir/"
cms_scopes: str = "patient/Patient.read patient/Coverage.read patient/ExplanationOfBenefit.read"
cms_client_id: str = "kAMZfgm43Y27HhCTJ2sZyttdV5pFvGyFvaboXqEf"
cms_client_secret: str = "OrKYtcPdgzqWgXLx7Q2YJLvPGaybP4zxuiTTKfRlFrLhVpyZeM8PpUNRnadliV2LlPEOCzmRFiOSKGiD7jZl3RlezSC5g0mTVaCgouLLX5yun8mI3r0LQ0jb65WD6lNR"
return_cms_result: bool = False
# LFH connect FHIR url
lfh_fhir_url = "https://localhost:5000/fhir"
class Config:
case_sensitive = False
env_file = os.path.join(dirname(dirname(abspath(__file__))), ".env")
@lru_cache()
def get_settings() -> Settings:
"""Returns the settings instance"""
return Settings()
@lru_cache()
def get_ssl_context(ssl_purpose: ssl.Purpose) -> ssl.SSLContext:
"""
Returns a SSL Context configured for server auth with the certificate path
:param ssl_purpose:
"""
settings = get_settings()
ssl_context = ssl.create_default_context(ssl_purpose)
ssl_context.load_verify_locations(
cafile=settings.bluebutton_ca_file, capath=settings.bluebutton_ca_path
)
return ssl_context
|
from setuptools import setup
setup(
name='nginx-parser',
packages=['src'],
version='0.1',
description='A simple parser for nginx logs',
author='George Davaris',
author_email='davarisg@gmail.com',
license='MIT',
url='https://github.com/davarisg/nginx-parser', # use the URL to the github repo
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
install_requires=['blessed>=1.14.2', 'PyYAML>=3.12'],
entry_points={
'console_scripts': [
'nginx-parser=src.parser:main',
],
},
)
|
#!/usr/bin/env python
# python 3.8.5
import numpy
from PIL import Image
def genImage(img_width=512, img_height=256):
# Define img width and height as an integer
img_width = int(img_width)
img_height = int(img_height)
# Define key name
filename = 'key.png'
img_array = numpy.random.rand(img_height, img_width, 3) * 255
'''
Make image object from array, if u want to get
grayscale key, use "L" on convert method.
'''
image = Image.fromarray(img_array.astype('uint8')).convert('RGB')
# Save image
image.save(filename)
|
from django.db import models
from datetime import datetime
class Retailer(models.Model):
name = models.CharField(max_length=100, primary_key=True)
address = models.CharField(max_length=100)
sector_choices = (('a','Private'),('b','Government'),('c','Semi-Private'),('d','Other'))
sector = models.CharField(max_length=1, choices=sector_choices)
class Meta:
unique_together = (("name","address"),)
def __str__(self):
return self.name
class Outlet(models.Model):
username = models.CharField(max_length=50, primary_key=True)
password = models.CharField(max_length=50)
name = models.CharField(max_length=100)
category = models.CharField(max_length=100)
address = models.CharField(max_length=100)
def __str__(self):
return self.name |
import malmopy
malmo = malmopy.Malmo()
malmo.start_mission('missions/patrol.xml')
for obs in malmo.observations():
floor = obs['floor']
# (0,1) means in front
# (-1,0) is to the left
# (1,0) is to the right
# front is a string description of the block
front = floor[(0,1)]
# implement the following logic:
# patrol the platform in a clockwise direction by
# keeping the lava to your left
|
""" Common place to document project logging codes. HL7L = HL7 Listener service."""
# ERROR
HL7_MLLP_CONNECT_ERROR = "HL7LERR001: Error connecting to the HL7 MLLP listener port %s"
NATS_CONNECT_ERROR = "HL7LERR002: Error connecting to the NATS server %s"
NATS_NOT_INITIALIZED = "HL7LERR003: NATS not initialized."
HL7_MLLP_INCOMPLETE_READ = (
"HL7LERR004: HL7 MLLP Unexpected incomplete message read error. peername=%s"
)
HL7_MLLP_RECEIVER_ERR = "HL7LERR005: HL7 MLLP Receiver encounterred exception."
HL7_MLLP_MSG_PARSE_ERR = "HL7LERR006: Received HL7 message is not a valid. peername=%s"
HL7_MLLP_UNKNOWN_ERR = (
"HL7LERR007: Unknown error during HL7 receive message processing. peername=%s"
)
# INFO
STARTUP_ENV_VARS = (
"HL7LLOG001: HL7 Listener started with the follow values from the env:\n"
+ 'HL7 MLLP listening host:port="%s:%s"\n'
+ 'NATs Jetstream Connection="%s" and Subject="%s"'
)
HL7_MLLP_CONNECTED = "HL7LLOG002: HL7 Listener connection established. peername=%s"
HL7_MLLP_MSG_RECEIVED = "HL7LLOG003: HL7 Listener received a message."
HL7_MLLP_DISCONNECTED = "HL7LLOG004: HL7 Listener connection closed. peername=%s"
HL7_MLLP_RECEIVER_CANCELLED = (
"HL7LLOG005: HL7 Listener was cancelled. This is not considered an error."
)
NATS_CONNECTED = "HL7LLOG006: Connected to the NATS server URL %s."
SENDING_MSG_TO_NATS = "HL7LLOG007: Sending message to the NATS JetStream server."
NATS_REQUEST_SEND_MSG_RESPONSE = (
"HL7LLOG008: Response from NATS request for sending an HL7 message: %s"
)
HL7_MLLP_CONNECTION_CLOSING = (
"HL7LLOG008: HL7 Listener connection from a sender peer is closing. peername=%s"
)
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from os.path import dirname, abspath, join
from indra.benchmarks import bioprocesses as bp
# from indra.benchmarks import complexes as cp
from indra.benchmarks import phosphorylations as phos
from indra.util import unicode_strs
from nose.plugins.attrib import attr
import unittest
eval_file = join(dirname(abspath(__file__)),
'../benchmarks/assembly_eval/batch4/reach/' +
'reach_stmts_batch_4_eval.pkl')
# Removing due to QuickGO web service reliability issues
#def test_bioprocesses():
# """Smoke test to see if bioprocesses analysis works."""
# bp.analyze(eval_file)
#def test_bioprocesses_get_genes():
# gene_set = bp.get_genes_for_go_id('GO:0006915')
# assert gene_set
# assert unicode_strs(gene_set)
@attr('nonpublic', 'webservice')
@unittest.skip('Complex analysis has been removed, test should be too, later.')
def test_complexes():
"""Smoke test to see if complexes analysis works."""
cp.analyze(eval_file)
|
"""
Standard Windows functions required for the system.
The required APIs are:
* running processes
*
* GUI windows
* mapping a window to a process ID
"""
import sys
import importlib
# Top-level global def
from .funcs_any_win import SHELL__CANCEL_CALLBACK_CHAIN
def __load_functions(modules):
import platform
import struct
# Ensure we're on Windows
assert 'windows' in platform.system().lower()
# Set up environment settings to make inspection of the current
# platform easy for function modules to check.
void_ptr_bits = struct.calcsize('P') * 8
winver = sys.getwindowsversion()
environ = {
'32-bit': void_ptr_bits == 32,
'64-bit': void_ptr_bits == 64,
'release': platform.release(),
'version': platform.version(),
'system': platform.system(),
'version-major': winver.major,
'version-minor': winver.minor,
'version-build': winver.build,
'version-platform': winver.platform,
'version-service_pack': winver.service_pack,
}
ret = {}
for name in modules:
if isinstance(name, str):
try:
mod = importlib.import_module(name, __name__)
except:
print("Problem loading module " + name)
raise
else:
mod = name
mod.load_functions(environ, ret)
return ret
# Defines the list of modules that contains platform specific functions.
# They are loaded in a specific order to overwrite previous, less-specific
# versions of the functions.
__FUNCTIONS = __load_functions([
"petronia.arch.windows.funcs_x86_win",
"petronia.arch.windows.funcs_x64_win",
# any_win must ALWAYS be after the bit ones, because of dependencies.
"petronia.arch.windows.funcs_any_win",
# OS-specific come after the architecture ones
"petronia.arch.windows.funcs_winXP",
"petronia.arch.windows.funcs_winVista",
"petronia.arch.windows.funcs_win7",
"petronia.arch.windows.funcs_win8",
"petronia.arch.windows.funcs_win10",
])
# Special code that loads those final, platform-specific functions into
# the current module namespace.
__current_module = importlib.import_module(__name__)
for __k, __v in __FUNCTIONS.items():
setattr(__current_module, __k, __v)
|
#!/usr/bin/python3
# ==================================================
"""
File: RMedian - Phase 3
Author: Julian Lorenz
"""
# ==================================================
# Import
import math
import random
# ==================================================
def phase3(X, L, C, R, cnt):
n = len(X)
sumL, sumR = 0, 0
for l in L:
sumL += len(l)
for r in R:
sumR += len(r)
s = sumL - sumR
# Det Median
if max(sumL, sumR) > n / 2:
res = 'DET'
if len(X) % 2 == 0:
return (X[int(len(X) / 2 - 1)] + X[int(len(X) / 2)]) / 2, cnt, res, s
else:
return X[int(len(X) / 2 - 0.5)], cnt, res, s
# AKS
if len(C) < math.log(n) / math.log(2):
res = 'AKS'
C.sort()
if len(C) % 2 == 0:
return (C[int(len(C) / 2 - 1)] + C[int(len(C) / 2)]) / 2, cnt, res, s
else:
return C[int(len(C) / 2 - 0.5)], cnt, res, s
# Expand
if s < 0:
rs = []
for r in R:
rs += r
random.shuffle(rs)
for i in range(-s):
C.append(rs[i])
for r in R:
if rs[i] in r:
r.remove(rs[i])
else:
ls = []
for l in L:
ls += l
random.shuffle(ls)
for i in range(s):
C.append(ls[i])
for l in L:
if ls[i] in l:
l.remove(ls[i])
res = 'EXP'
return -1, cnt, res, s
# ==================================================
|
import requests
from requests_ntlm2 import HttpNtlm2Auth
# test for mragaei
session = requests.Session()
# un-authenticated
r1 = session.get("http://52.208.44.235/iisstart.htm", verify=False)
Referer = dict(Referer = "http://192.168.1.20/now")
# set auth handler for authenticted, use same connection
session.auth = HttpNtlm2Auth('WIN-QRD0D23AHH3', 'test', '6N%9rEpFqedKdjGw')
r2 = session.get("http://52.208.44.235/secure/iisstart.htm", verify=False, headers=Referer)
print(r2.status_code)
print(r2.content)
|
from numba import jit
from numpy import exp, zeros, meshgrid
from tqdm import tqdm
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from core import create_dir, make_paths, make_animation, make_video, parse_args
@jit(nopython=True)
def intensity_initialization(n_points, x, y, x_0, y_0, M):
intensity = zeros((n_points, n_points))
for i in range(n_points):
for j in range(n_points):
intensity[i, j] = ((x[i] / x_0) ** 2 + (y[j] / y_0) ** 2) ** M * \
exp(-((x[i] / x_0) ** 2 + (y[j] / y_0) ** 2))
return intensity
def plot_images(**kwargs):
"""Plots intensity distribution in initial condition with different azimuth"""
global_root_dir = kwargs['global_root_dir']
global_results_dir_name = kwargs['global_results_dir_name']
prefix = kwargs['prefix']
M = kwargs['M']
figsize = kwargs.get('figsize', (10, 10))
ext = kwargs.get('ext', 'png')
_, results_dir, _ = make_paths(global_root_dir, global_results_dir_name, prefix)
res_dir = create_dir(path=results_dir)
n_points = 300
x_max, y_max = 600.0, 600.0 # micrometers
x_0, y_0 = x_max / 6, y_max / 6
x, y = zeros(n_points), zeros(n_points)
for i in range(n_points):
x[i], y[i] = i * x_max / n_points - x_max / 2, i * y_max / n_points - y_max / 2
intensity = intensity_initialization(n_points, x, y, x_0, y_0, M)
xx, yy = meshgrid(x, y)
for number, gradus in enumerate(tqdm(range(0, 360, 2))):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(xx, yy, intensity, cmap='jet', rstride=1, cstride=1, linewidth=0, antialiased=False)
ax.view_init(elev=75, azim=int(gradus + 315))
ax.set_axis_off()
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.set_zlim([0, 1.35])
if figsize == (3,3):
bbox = fig.bbox_inches.from_bounds(0.6, 0.5, 2, 2)
elif figsize == (10,10):
bbox = fig.bbox_inches.from_bounds(2.3, 1.7, 5.7, 5.5)
else:
raise Exception('Wrong figsize!')
plt.savefig(res_dir + '/%04d.' % number + ext, bbox_inches=bbox, transparent=True)
plt.close()
return results_dir
def process_initial(M, animation=True, video=True):
args = parse_args()
prefix = 'M=%d' % M
results_dir = plot_images(global_root_dir=args.global_root_dir,
global_results_dir_name=args.global_results_dir_name,
M=M,
prefix=prefix)
if animation:
make_animation(root_dir=results_dir,
name=prefix)
if video:
make_video(root_dir=results_dir,
name=prefix)
process_initial(M=1)
|
def bar(foo_new, i_new):
need_break = False
if i_new > 2:
foo_new = False
need_break = True
return foo_new, need_break
def main(indices):
foo = True
for i in indices:
foo, need_break = bar(foo, i)
if need_break:
break
return foo |
"""
In this file one can find the implementation of helpful class and functions in order to handle the given dataset, in the
aspect of its structure.
Here is the implementation of helpful class and functions that handle the given dataset.
"""
import json
import csv
from scipy.stats import zscore
from torch import Tensor
from torch.nn import ConstantPad2d
from torch.utils.data import Dataset, DataLoader
from collections import Counter
from feature_calculators import FeatureMeta
from features_processor import FeaturesProcessor, log_norm
from graph_features import GraphFeatures
from loggers import PrintLogger
from multi_graph import MultiGraph
from dataset.dataset_external_data import ExternalData
import os
import pandas as pd
import networkx as nx
import pickle
import numpy as np
from vertices.betweenness_centrality import BetweennessCentralityCalculator
from vertices.bfs_moments import BfsMomentsCalculator
from sklearn.preprocessing import MinMaxScaler
# some important shortenings
PKL_DIR = "pkl"
NORM_REDUCED = "NORM_REDUCED"
NORM_REDUCED_SYMMETRIC = "NORM_REDUCED_SYMMETRIC"
IDENTITY = "IDENTITY"
RAW_FORM = "RAW_FORM"
DEG = "DEG"
IN_DEG = "IN_DEG"
OUT_DEG = "OUT_DEG"
CENTRALITY = ("betweenness_centrality", FeatureMeta(BetweennessCentralityCalculator, {"betweenness"}))
BFS = ("bfs_moments", FeatureMeta(BfsMomentsCalculator, {"bfs"}))
class GraphsDataset(Dataset):
def __init__(self, params, external_data: ExternalData = None):
# load the params file (json) in the "graphs_data" section.
self._params = params if type(params) is dict else json.load(open(params, "rt"))
self._dataset_name = self._params["dataset_name"]
self._params = self._params["graphs_data"]
self._logger = PrintLogger("logger")
# path to base directory
self._base_dir = __file__.replace("/", os.sep)
self._base_dir = os.path.join(self._base_dir.rsplit(os.sep, 1)[0], "..")
self._external_data = external_data
# init ftr_meta dictionary and other ftr attributes
self._init_ftrs()
self._src_file_path = os.path.join(self._params["file_path"])
self._multi_graph, self._labels, self._label_to_idx, self._idx_to_label = self._build_multi_graph()
self._data, self._idx_to_name = self._build_data()
@property
def all_labels(self):
return self._idx_to_label
@property
def label_count(self):
return Counter([v[3] for name, v in self._data.items()])
def label(self, idx):
return self._data[self._idx_to_name[idx]][3]
@property
def len_features(self):
return self._data[self._idx_to_name[0]][1].shape[1]
# Initialization of the requested features
def _init_ftrs(self):
self._deg, self._in_deg, self._out_deg, self._is_ftr, self._ftr_meta = False, False, False, False, {}
self._is_external_data = False if self._external_data is None else True
# params.FEATURES contains string and list of two elements (matching to key: value)
# should Deg/In-Deg/Out-Deg be calculated
for ftr in self._params["features"]:
ftr = globals()[ftr]
if ftr == DEG:
self._deg = True
elif ftr == IN_DEG:
self._in_deg = True
elif ftr == OUT_DEG:
self._out_deg = True
else:
self._ftr_meta[ftr[0]] = ftr[1]
# add directories for pickles
if len(self._ftr_meta) > 0:
self._ftr_path = os.path.join(self._base_dir, PKL_DIR, "ftr", self._dataset_name)
if not os.path.exists(self._ftr_path):
os.mkdir(self._ftr_path)
# if there are another features except degrees such as Betweeness
self._is_ftr = True
"""
build multi graph according to csv
each community is a single graph, no consideration to time
"""
def _build_multi_graph(self):
# percentage is the "amount" of the graph we take. For example, percentage=1 means the whole graph is taken,
# percentage=0.6 means 60% of the graph is taken , ....
path_pkl = os.path.join(self._base_dir, PKL_DIR, self._dataset_name + "_split_" +
str(self._params["percentage"]) + "_mg.pkl")
# a path to where the pickles will be was created, if it exists it means the graph has already be built, thus we
# load the pickle and return it
if os.path.exists(path_pkl):
return pickle.load(open(path_pkl, "rb"))
multi_graph_dict = {}
labels = {}
label_to_idx = {}
# open basic data csv (with all edges of all times)
data_df = pd.read_csv(self._src_file_path)
stop = data_df.shape[0] * self._params["percentage"]
for index, edge in data_df.iterrows():
if index > stop:
break
# write edge to dictionary
graph_id = str(edge[self._params["graph_col"]])
src = str(edge[self._params["src_col"]])
dst = str(edge[self._params["dst_col"]])
multi_graph_dict[graph_id] = multi_graph_dict.get(graph_id, []) + [(src, dst)]
label = edge[self._params["label_col"]]
label_to_idx[label] = len(label_to_idx) if label not in label_to_idx else label_to_idx[label]
labels[graph_id] = label_to_idx[label]
mg = MultiGraph(self._dataset_name, graphs_source=multi_graph_dict,
directed=self._params["directed"], logger=self._logger)
idx_to_label = [l for l in sorted(label_to_idx, key=lambda x: label_to_idx[x])]
mg.suspend_logger()
# make directories
os.makedirs(os.path.join(self._base_dir, PKL_DIR), exist_ok=True)
pickle.dump((mg, labels, label_to_idx, idx_to_label), open(path_pkl, "wb"))
mg.wake_logger()
return mg, labels, label_to_idx, idx_to_label
"""
returns a vector x for gnx
basic version returns degree for each node
"""
def _gnx_vec(self, gnx_id, gnx: nx.Graph, node_order):
# final vector that will have matrices of features
final_vec = []
# calculate degree for each node
if self._deg:
degrees = gnx.degree(gnx.nodes)
final_vec.append(np.matrix([np.log(degrees[d] + 1e-3) for d in node_order]).T)
# calculate in degree for each node
if self._in_deg:
degrees = gnx.in_degree(gnx.nodes)
final_vec.append(np.matrix([np.log(degrees[d] + 1e-3) for d in node_order]).T)
# calculate out degree for each node
if self._out_deg:
degrees = gnx.out_degree(gnx.nodes)
final_vec.append(np.matrix([np.log(degrees[d] + 1e-3) for d in node_order]).T)
# if external data is given, add its feature too
if self._is_external_data and self._external_data.is_continuous:
final_vec.append(np.matrix([self._external_data.continuous_feature(gnx_id, d) for d in node_order]))
# if the are more features except degrees and external ones, such as betweeness.
if self._is_ftr:
name = str(gnx_id)
# create a path if it does not exist yet
gnx_dir_path = os.path.join(self._ftr_path, name)
if not os.path.exists(gnx_dir_path):
os.mkdir(gnx_dir_path)
# Graph Feature is a class from the package "graph features" which calculates the given features
raw_ftr = GraphFeatures(gnx, self._ftr_meta, dir_path=gnx_dir_path, is_max_connected=False,
logger=PrintLogger("logger"))
raw_ftr.build(should_dump=True) # build features
final_vec.append(FeaturesProcessor(raw_ftr).as_matrix(norm_func=log_norm))
# the list of all matrices of features is stacked in order to create an only matrix for all features
return np.hstack(final_vec)
# calculate degree matrix
def _degree_matrix(self, gnx, nodelist):
degrees = gnx.degree(gnx.nodes)
return np.diag([degrees[d] for d in nodelist])
# function to standarize the data with zscore, min-max and more
def _standardize_data(self, data):
all_data_continuous_vec = [] # stack all vectors for all graphs
key_to_idx_map = [] # keep ordered list (g_id, num_nodes) according to stack order
# stack
for g_id, (A, gnx_vec, embed_vec, label) in data.items():
all_data_continuous_vec.append(gnx_vec)
key_to_idx_map.append((g_id, gnx_vec.shape[0])) # g_id, number of nodes ... ordered
all_data_continuous_vec = np.vstack(all_data_continuous_vec)
# z-score data
if self._params["standardization"] == "zscore":
standardized_data = zscore(all_data_continuous_vec, axis=0)
# scale data (still hasn't bee implemented)
elif self._params["standardization"] == "scale":
pass
# min-max data
elif self._params["standardization"] == "min_max":
scalar = MinMaxScaler()
standardized_data = scalar.fit_transform(all_data_continuous_vec)
# rebuild data to original form -> split stacked matrix according to <list: (g_id, num_nodes)>
new_data_dict = {}
start_idx = 0
for g_id, num_nodes in key_to_idx_map:
new_data_dict[g_id] = (data[g_id][0], standardized_data[start_idx: start_idx+num_nodes],
data[g_id][2], data[g_id][3])
start_idx += num_nodes
return new_data_dict
# For the GCN the adjacency matrix needs to be normalized
def _norm_adjacency(self, A, gnx, node_order):
if self._params["adjacency_norm"] == NORM_REDUCED:
# D^-0.5 A D^-0.5
D = self._degree_matrix(gnx, nodelist=node_order)
D_sqrt = np.matrix(np.sqrt(D))
adjacency = D_sqrt * np.matrix(A) * D_sqrt
elif self._params["adjacency_norm"] == NORM_REDUCED_SYMMETRIC:
# D^-0.5 [A + A.T + I] D^-0.5
D = self._degree_matrix(gnx, nodelist=node_order)
D_sqrt = np.matrix(np.sqrt(D))
adjacency = D_sqrt * np.matrix(A + A.T + np.identity(A.shape[0])) * D_sqrt
elif self._params["adjacency_norm"] == IDENTITY:
# identity matrix instead of adjacency matrix
adjacency = np.identity(A.shape[0])
elif self._params["adjacency_norm"] == RAW_FORM:
# don't do any normalization
adjacency = A
else:
print("Error in adjacency_norm: " + self._params["adjacency_norm"] + "is not a valid option")
exit(1)
return adjacency
"""
builds a data dictionary
{ ... graph_name: ( A = Adjacency_matrix, x = graph_vec, label ) ... }
We use all the above functions to finally build the whole data model
We use all the above functions to finally build the whole data modelst
"""
def _build_data(self):
ext_data_id = "None" if not self._is_external_data else "_embed_ftr_" + "_".join(self._external_data.embed_headers)\
+ "_continuous_ftr_" + "_".join(self._external_data.continuous_headers) \
+ "standardization_" + self._params["standardization"]
pkl_path = os.path.join(self._base_dir, PKL_DIR, self._dataset_name + ext_data_id + "_data.pkl")
if os.path.exists(pkl_path):
return pickle.load(open(pkl_path, "rb"))
data = {}
idx_to_name = []
for gnx_id, gnx in zip(self._multi_graph.graph_names(), self._multi_graph.graphs()):
# if gnx.number_of_nodes() < 5:
# continue
node_order = list(gnx.nodes)
idx_to_name.append(gnx_id)
adjacency = self._norm_adjacency(nx.adjacency_matrix(gnx, nodelist=node_order).todense(), gnx, node_order)
gnx_vec = self._gnx_vec(gnx_id, gnx, node_order)
embed_vec = [self._external_data.embed_feature(gnx_id, d) for d in node_order] \
if self._is_external_data and self._external_data.is_embed else None
data[gnx_id] = (adjacency, gnx_vec, embed_vec, self._labels[gnx_id])
data = self._standardize_data(data)
pickle.dump((data, idx_to_name), open(pkl_path, "wb"))
return data, idx_to_name
def collate_fn(self, batch):
lengths_sequences = []
# calculate max word len + max char len
for A, x, e, l in batch:
lengths_sequences.append(A.shape[0])
# in order to pad all batch to a single dimension max length is needed
seq_max_len = np.max(lengths_sequences)
# new batch variables
adjacency_batch = []
x_batch = []
embeddings_batch = []
labels_batch = []
for A, x, e, l in batch:
# pad word vectors
adjacency_pad = ConstantPad2d((0, seq_max_len - A.shape[0], 0, seq_max_len - A.shape[0]), 0)
adjacency_batch.append(adjacency_pad(A).tolist())
vec_pad = ConstantPad2d((0, 0, 0, seq_max_len - A.shape[0]), 0)
x_batch.append(vec_pad(x).tolist())
embeddings_batch.append(vec_pad(e).tolist() if self._is_external_data and self._external_data.is_embed else e)
labels_batch.append(l)
return Tensor(adjacency_batch), Tensor(x_batch), Tensor(embeddings_batch).long(), Tensor(labels_batch).long()
def __getitem__(self, index):
gnx_id = self._idx_to_name[index]
A, x, embed, label = self._data[gnx_id]
embed = 0 if embed is None else Tensor(embed).long()
return Tensor(A), Tensor(x), embed, label
def __len__(self):
return len(self._idx_to_name)
|
import sys
import types
from unittest.mock import Mock
module_name = 'arcpy'
arcpy = types.ModuleType(module_name)
sys.modules[module_name] = arcpy
arcpy.da = Mock(name=module_name + '.da')
|
import requests
from util import *
from logger import *
from tgpush import *
class ZJULogin(object):
"""
Attributes:
sess: (requests.Session) 统一的session管理
"""
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
}
BASE_URL = "https://healthreport.zju.edu.cn/ncov/wap/default/index"
LOGIN_URL = "https://zjuam.zju.edu.cn/cas/login?service=http%3A%2F%2Fservice.zju.edu.cn%2F"
def __init__(self, config, delay_run=False):
self.config = config
self.delay_run = delay_run
self.sess = requests.Session()
def login(self):
"""Login to ZJU platform"""
res = self.sess.get(self.LOGIN_URL)
execution = re.search(
'name="execution" value="(.*?)"', res.text).group(1)
res = self.sess.get(
url='https://zjuam.zju.edu.cn/cas/v2/getPubKey').json()
n, e = res['modulus'], res['exponent']
username = self.config['username']
password = self.config['password']
encrypt_password = self._rsa_encrypt(password, e, n)
data = {
'username': username,
'password': encrypt_password,
'execution': execution,
'_eventId': 'submit',
"authcode": ""
}
res = self.sess.post(url=self.LOGIN_URL, data=data)
# check if login successfully
if '用户名或密码错误' in res.content.decode():
raise LoginError('登录失败,请核实账号密码重新登录')
print("统一认证平台登录成功~")
return self.sess
def _rsa_encrypt(self, password_str, e_str, M_str):
password_bytes = bytes(password_str, 'ascii')
password_int = int.from_bytes(password_bytes, 'big')
e_int = int(e_str, 16)
M_int = int(M_str, 16)
result_int = pow(password_int, e_int, M_int)
return hex(result_int)[2:].rjust(128, '0')
class HealthCheckInHelper(ZJULogin):
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
}
REDIRECT_URL = "https://zjuam.zju.edu.cn/cas/login?service=https%3A%2F%2Fhealthreport.zju.edu.cn%2Fa_zju%2Fapi%2Fsso%2Findex%3Fredirect%3Dhttps%253A%252F%252Fhealthreport.zju.edu.cn%252Fncov%252Fwap%252Fdefault%252Findex%26from%3Dwap"
def __init__(self,config,delay_run=False):
super().__init__(config, delay_run=delay_run)
def get_geo_info(self, location: dict):
params = (
('key', '729923f88542d91590470f613adb27b5'),
('s', 'rsv3'),
('language', 'zh_cn'),
('location', '{lng},{lat}'.format(lng=location.get("lng"), lat=location.get("lat"))),
('extensions', 'base'),
('callback', 'jsonp_607701_'),
('platform', 'JS'),
('logversion', '2.0'),
('appname', 'https://healthreport.zju.edu.cn/ncov/wap/default/index'),
('csid', '63157A4E-D820-44E1-B032-A77418183A4C'),
('sdkversion', '1.4.16'),
)
response = self.sess.get('https://restapi.amap.com/v3/geocode/regeo', headers=self.headers, params=params, )
return take_out_json(response.text)
def fill_geo_info(self, geo_info: dict):
formatted_address = geo_info.get("regeocode").get("formatted_address")
address_component = geo_info.get("regeocode").get("addressComponent")
if not formatted_address or not address_component: return
# 获得id和uid参数
res = self.sess.get(self.BASE_URL, headers=self.headers)
#html = res.content.decode()
#new_info_tmp = json.loads(re.findall(r'def = ({[^\n]+})', html)[0])
#new_id = new_info_tmp['id']
#new_uid = new_info_tmp['uid']
# 拼凑geo信息
lng, lat = address_component.get("streetNumber").get("location").split(",")
geo_api_info_dict = {"type": "complete", "info": "SUCCESS", "status": 1, "cEa": "jsonp_859544_",
"position": {"Q": lat, "R": lng, "lng": lng, "lat": lat},
"message": "Get ipLocation success.Get address success.", "location_type": "ip",
"accuracy": "null", "isConverted": "true", "addressComponent": address_component,
"formattedAddress": formatted_address, "roads": [], "crosses": [], "pois": []}
data = {
'sfymqjczrj': '0',
'zjdfgj': '',
'sfyrjjh': '0',
'cfgj': '',
'tjgj': '',
'nrjrq': '0',
'rjka': '',
'jnmddsheng': '',
'jnmddshi': '',
'jnmddqu': '',
'jnmddxiangxi': '',
'rjjtfs': '',
'rjjtfs1': '',
'rjjtgjbc': '',
'jnjtfs': '',
'jnjtfs1': '',
'jnjtgjbc': '',
# 是否确认信息属实
'sfqrxxss': '1',
'sfqtyyqjwdg': '0',
'sffrqjwdg': '0',
'sfhsjc': '',
'zgfx14rfh': '0',
'zgfx14rfhdd': '',
'sfyxjzxgym': '1',
# 是否不宜接种人群
'sfbyjzrq': '5',
'jzxgymqk': '2',
'tw': '0',
'sfcxtz': '0',
'sfjcbh': '0',
'sfcxzysx': '0',
'qksm': '',
'sfyyjc': '0',
'jcjgqr': '0',
'remark': '',
# 浙江省杭州市西湖区三墩镇西湖国家广告产业园西湖广告大厦
# '\u6D59\u6C5F\u7701\u676D\u5DDE\u5E02\u897F\u6E56\u533A\u4E09\u58A9\u9547\u897F\u6E56\u56FD\u5BB6\u5E7F\u544A\u4EA7\u4E1A\u56ED\u897F\u6E56\u5E7F\u544A\u5927\u53A6',
'address': formatted_address,
# {"type":"complete","info":"SUCCESS","status":1,"cEa":"jsonp_859544_","position":{"Q":30.30678,"R":120.06375000000003,"lng":120.06375,"lat":30.30678},"message":"Get ipLocation success.Get address success.","location_type":"ip","accuracy":null,"isConverted":true,"addressComponent":{"citycode":"0571","adcode":"330106","businessAreas":[],"neighborhoodType":"","neighborhood":"","building":"","buildingType":"","street":"西园三路","streetNumber":"1号","country":"中国","province":"浙江省","city":"杭州市","district":"西湖区","township":"三墩镇"},"formattedAddress":"浙江省杭州市西湖区三墩镇西湖国家广告产业园西湖广告大厦","roads":[],"crosses":[],"pois":[]}
# '{"type":"complete","info":"SUCCESS","status":1,"cEa":"jsonp_859544_","position":{"Q":30.30678,"R":120.06375000000003,"lng":120.06375,"lat":30.30678},"message":"Get ipLocation success.Get address success.","location_type":"ip","accuracy":null,"isConverted":true,"addressComponent":{"citycode":"0571","adcode":"330106","businessAreas":[],"neighborhoodType":"","neighborhood":"","building":"","buildingType":"","street":"\u897F\u56ED\u4E09\u8DEF","streetNumber":"1\u53F7","country":"\u4E2D\u56FD","province":"\u6D59\u6C5F\u7701","city":"\u676D\u5DDE\u5E02","district":"\u897F\u6E56\u533A","township":"\u4E09\u58A9\u9547"},"formattedAddress":"\u6D59\u6C5F\u7701\u676D\u5DDE\u5E02\u897F\u6E56\u533A\u4E09\u58A9\u9547\u897F\u6E56\u56FD\u5BB6\u5E7F\u544A\u4EA7\u4E1A\u56ED\u897F\u6E56\u5E7F\u544A\u5927\u53A6","roads":[],"crosses":[],"pois":[]}',
'geo_api_info': geo_api_info_dict,
# 浙江省 杭州市 西湖区
# '\u6D59\u6C5F\u7701 \u676D\u5DDE\u5E02 \u897F\u6E56\u533A'
'area': "{} {} {}".format(address_component.get("province"), address_component.get("city"),
address_component.get("district")),
# 浙江省
# '\u6D59\u6C5F\u7701'
'province': address_component.get("province"),
# 杭州市
# '\u676D\u5DDE\u5E02'
'city': address_component.get("city"),
# 是否在校
'sfzx': '1',
'sfjcwhry': '0',
'sfjchbry': '0',
'sfcyglq': '0',
'gllx': '',
'glksrq': '',
'jcbhlx': '',
'jcbhrq': '',
'bztcyy': '',
'sftjhb': '0',
'sftjwh': '0',
'ismoved': '0',
# 👇-----12.1日修改-----👇
'sfjcqz': '0',
'jcqzrq': '',
# 👆-----12.1日修改-----👆
'jrsfqzys': '',
'jrsfqzfy': '',
'sfyqjzgc': '',
# 是否申领杭州健康码
'sfsqhzjkk': '1',
# 杭州健康吗颜色,1:绿色 2:红色 3:黄色
'sqhzjkkys': '1',
'gwszgzcs': '',
'szgj': '',
'fxyy': '',
'jcjg': '',
# uid每个用户不一致
# 'uid': new_uid,
# id每个用户不一致
# 'id': new_id,
# 下列原来参数都是12.1新版没有的
# 日期
'date': get_date(),
'created': round(time.time()),
'szsqsfybl': '0',
'sfygtjzzfj': '0',
'gtjzzfjsj': '',
'zgfx14rfhsj': '',
'jcqzrq': '',
'gwszdd': '',
'szgjcs': '',
# 'jrdqtlqk[]': 0,
# 'jrdqjcqk[]': 0,
}
response = self.sess.post('https://healthreport.zju.edu.cn/ncov/wap/default/save', data=data,
headers=self.headers)
return response.json()
def run(self):
print("正在为{}健康打卡".format(self.config["username"]))
if self.delay_run:
# 确保定时脚本执行时间不太一致
time.sleep(random.randint(0, 10))
# 拿到Cookies和headers
try:
self.login()
# 拿取eai-sess的cookies信息
self.sess.get(self.REDIRECT_URL)
# 由于IP定位放到服务器上运行后会是服务器的IP定位
lng= self.config["point"]['lng']
lat= self.config["point"]['lat']
location = {'info': 'LOCATE_SUCCESS', 'status': 1, 'lng': lng, 'lat': lat}
geo_info = self.get_geo_info(location)
print(geo_info)
res = self.fill_geo_info(geo_info)
print(res)
post_tg(self.config['push']['telegram'],'浙江大学每日健康打卡 V1.3 '+ " \n\n 签到结果: " + res.get("m"))
except requests.exceptions.ConnectionError as err:
# reraise as KubeException, but log stacktrace.
#调用tg推送模块
post_tg(self.config['push']['telegram'],'统一认证平台登录失败,请检查服务器网络状态')
|
from matplotlib import pyplot as plt
import numpy as np
python = [6, 7, 8, 4, 4]
javascript = [3, 12, 3, 4.1, 6]
x1 = np.arange(len(python))
x2 = [x + 0.25 for x in x1]
plt.bar(x1, python, width=0.25, label = 'Python', color = 'deepskyblue')
plt.bar(x2, javascript, width=0.25, label = 'Javascript', color = 'mediumseagreen')
meses = ['Agosto','Setembro','Outubro','Novembro','Dezembro']
plt.xticks([x + 0.25 for x in range(len(python))], meses)
plt.legend()
plt.title("Uso das linguagens")
plt.show() |
import names
import random
from django.core.management.base import BaseCommand, CommandError
from crm.models import Contact
def phn():
p = list('0000000000')
p[0] = str(random.randint(1, 9))
for i in [1, 2, 6, 7, 8]:
p[i] = str(random.randint(0, 9))
for i in [3, 4]:
p[i] = str(random.randint(0, 8))
if p[3] == p[4] == 0:
p[5] = str(random.randint(1, 8))
else:
p[5] = str(random.randint(0, 8))
n = range(10)
if p[6] == p[7] == p[8]:
n = (i for i in n if i != p[6])
p[9] = str(random.choice(n))
p = ''.join(p)
return p[:3] + '-' + p[3:6] + '-' + p[6:]
class Command(BaseCommand):
args = '<limit>'
help = 'Generate <limit> random contacts'
def handle(self, *args, **options):
if not len(args) == 1:
raise CommandError("Please provide limit number")
for i in range(0, int(args[0])):
first_name = names.get_first_name()
last_name = names.get_last_name()
email = "{0}.{1}@example.com".format(
first_name.lower(), last_name.lower()
)
phone_number = phn()
Contact.objects.create(
title="mr",
first_name=first_name,
last_name=last_name,
email=email,
phone_number=phone_number
)
|
import sys
import pytest
from quart import Quart, Response, url_for
from quart.testing import QuartClient
from werkzeug.datastructures import Headers
from .app import create_app
@pytest.fixture
def app() -> Quart:
# import app factory pattern
app = create_app(graphiql=True)
# pushes an application context manually
# ctx = app.app_context()
# await ctx.push()
return app
@pytest.fixture
def client(app: Quart) -> QuartClient:
return app.test_client()
@pytest.mark.asyncio
async def execute_client(
app: Quart,
client: QuartClient,
method: str = "GET",
headers: Headers = None,
**extra_params
) -> Response:
if sys.version_info >= (3, 7):
test_request_context = app.test_request_context("/", method=method)
else:
test_request_context = app.test_request_context(method, "/")
async with test_request_context:
string = url_for("graphql", **extra_params)
return await client.get(string, headers=headers)
@pytest.mark.asyncio
async def test_graphiql_is_enabled(app: Quart, client: QuartClient):
response = await execute_client(
app, client, headers=Headers({"Accept": "text/html"}), externals=False
)
assert response.status_code == 200
@pytest.mark.asyncio
async def test_graphiql_renders_pretty(app: Quart, client: QuartClient):
response = await execute_client(
app, client, headers=Headers({"Accept": "text/html"}), query="{test}"
)
assert response.status_code == 200
pretty_response = (
"{\n"
' "data": {\n'
' "test": "Hello World"\n'
" }\n"
"}".replace('"', '\\"').replace("\n", "\\n")
)
result = await response.get_data(raw=False)
assert pretty_response in result
@pytest.mark.asyncio
async def test_graphiql_default_title(app: Quart, client: QuartClient):
response = await execute_client(
app, client, headers=Headers({"Accept": "text/html"})
)
result = await response.get_data(raw=False)
assert "<title>GraphiQL</title>" in result
@pytest.mark.asyncio
@pytest.mark.parametrize(
"app", [create_app(graphiql=True, graphiql_html_title="Awesome")]
)
async def test_graphiql_custom_title(app: Quart, client: QuartClient):
response = await execute_client(
app, client, headers=Headers({"Accept": "text/html"})
)
result = await response.get_data(raw=False)
assert "<title>Awesome</title>" in result
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import rospy
from robotican_demos_upgrade.srv import *
def pick_unknown_client():
rospy.wait_for_service('pick_unknown')
try:
pick_unknown_req = rospy.ServiceProxy('pick_unknown', pick_unknown)
resp1 = pick_unknown_req("armadillo", "can", "discrete_location")
print("Responding to pick unknown!")
return resp1.response
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
if __name__ == "__main__":
resp = pick_unknown_client()
print('The response is: ', resp)
if(resp == "success" or resp == "failure"):
print("Picked successfully!!")
sys.exit()
|
# -*- coding=utf-8 -*-
from .file_io import *
from .time_it import TimeIt
from .util_doc import pkuseg_postag_loader
from .zip_file import zip_file, unzip_file
|
from math import cos
from math import pi
from math import sin
from typing import List
from random import Random
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def plot_clusters(clusters: List[List], centroids: List[List], labels: List[int]) -> None:
"""Plot custer data.
Args:
clusters: Cluster data to plot.
centroids: Centroids of clusters
labels: The cluster each point belongs to.
Returns:
None
"""
# Setup needed to construct plot
num_clusters = len(set(labels))
markers = get_markers(num_clusters)
palette = get_palette(num_clusters)
columns = ['x', 'y']
# Get dataframe for data
df = pd.DataFrame(clusters, columns=columns)
df['labels'] = pd.Series(labels, index=df.index) # Add labels as a column for coloring
# Add centroids to dataframe
centroids_df = pd.DataFrame(centroids, columns=columns)
centroids_df['labels'] = ['centroid' for _ in range(len(centroids))]
df = df.append(centroids_df, ignore_index=True)
# Plot
sns.lmplot(*columns, data=df, fit_reg=False, legend=False,
hue='labels', palette=palette, markers=markers,
scatter_kws={'s': 50})
plt.show()
def get_markers(num_clusters):
random = Random(0)
markers = ['*', 'o', '^', '+']
markers = random.sample(markers, num_clusters)
markers.append('x')
return markers
def get_palette(num_clusters):
random = Random(0)
colors = ['blue', 'orange', 'green', 'purple']
colors = random.sample(colors, num_clusters)
colors.append('red')
return colors
def generate_clusters(num_clusters, pts_per_cluster, spread, bound_for_x, bound_for_y) -> List[List]:
"""Generate random data for clustering.
Source:
https://stackoverflow.com/questions/44356063/how-to-generate-a-set-of-random-points-within-a-given-x-y-coordinates-in-an-x
Args:
num_clusters: The number of clusters to generate.
pts_per_cluster: The number of points per cluster to generate.
spread: The spread of each cluster. Decrease for tighter clusters.
bound_for_x: The bounds for possible values of X.
bound_for_y: The bounds for possible values of Y.
Returns:
K clusters consisting of N points.
"""
seed = 0
r = Random(seed)
x_min, x_max = bound_for_x
y_min, y_max = bound_for_y
clusters = []
for _ in range(num_clusters):
x = x_min + (x_max - x_min) * r.random()
y = y_min + (y_max - y_min) * r.random()
clusters.extend(generate_cluster(pts_per_cluster, (x, y), spread, seed))
return clusters
def generate_cluster(num_points, center, spread, seed) -> List[List]:
"""Generates a cluster of random points.
Source:
https://stackoverflow.com/questions/44356063/how-to-generate-a-set-of-random-points-within-a-given-x-y-coordinates-in-an-x
Args:
num_points: The number of points for the cluster.
center: The center of the cluster.
spread: How tightly to cluster the data.
seed: Seed for random
Returns:
A random cluster of consisting of N points.
"""
x, y = center
seed = seed ^ int(x * y) # Modify seed based on x and y so that each cluster is different
r = Random(seed)
points = []
for i in range(num_points):
theta = 2 * pi * r.random()
s = spread * r.random()
point = [x + s * cos(theta), y + s * sin(theta)]
points.append(point)
return points
|
from gi.repository import Gtk
from pychess.Utils.IconLoader import get_pixbuf, load_icon
# from pychess.widgets.WebKitBrowser import open_link
main_window = None
gtk_close = load_icon(16, "gtk-close", "window-close")
def mainwindow():
return main_window
def createImage(pixbuf):
image = Gtk.Image()
image.set_from_pixbuf(pixbuf)
return image
def createAlignment(top, right, bottom, left):
align = Gtk.Alignment.new(0.5, 0.5, 1, 1)
align.set_property("top-padding", top)
align.set_property("right-padding", right)
align.set_property("bottom-padding", bottom)
align.set_property("left-padding", left)
return align
def new_notebook(name=None):
def customGetTabLabelText(child):
return name
notebook = Gtk.Notebook()
if name is not None:
notebook.set_name(name)
notebook.get_tab_label_text = customGetTabLabelText
notebook.set_show_tabs(False)
notebook.set_show_border(False)
return notebook
def dock_panel_tab(title, desc, icon, button=None):
box = Gtk.Box()
pixbuf = get_pixbuf(icon, 16)
image = Gtk.Image.new_from_pixbuf(pixbuf)
label = Gtk.Label(label=title)
box.set_tooltip_text(desc)
box.pack_start(image, False, True, 0)
box.pack_start(label, False, True, 0)
if button is not None:
box.pack_start(button, False, True, 0)
box.set_spacing(2)
box.show_all()
return box
def insert_formatted(text_view, iter, text, tag=None):
def insert(text):
if tag is not None:
tb.insert_with_tags_by_name(iter, text, tag)
else:
tb.insert(iter, text)
tb = text_view.get_buffer()
# I know this is far from perfect but I don't want to use re for this
if "://" in text or "www" in text:
parts = text.split()
position = 0
for i, part in enumerate(parts):
if "://" in part or "www" in part:
if part.startswith('"'):
part = part[1:]
endpos = part.find('"')
if endpos != -1:
part = part[:endpos]
part0 = (
"http://web.archive.org/%s" % part
if part.startswith("http://www.endgame.nl")
else part
)
parts[i] = '<a href="%s">%s</a>' % (part0, part)
position = i
break
insert("%s " % " ".join(parts[:position]))
label = Gtk.Label()
label.set_markup(parts[position])
# label.connect("activate-link", open_link)
label.show()
anchor = tb.create_child_anchor(iter)
text_view.add_child_at_anchor(label, anchor)
insert(" %s" % " ".join(parts[position + 1 :]))
else:
insert(text)
|
"""A class for the Lemke Howson algorithm with lexicographical ordering"""
from itertools import cycle
import numpy as np
import numpy.typing as npt
from typing import Tuple
from nashpy.integer_pivoting import make_tableau, pivot_tableau_lex
from .lemke_howson import shift_tableau, tableau_to_strategy
def lemke_howson_lex(
A: npt.NDArray, B: npt.NDArray, initial_dropped_label: int = 0
) -> Tuple[npt.NDArray, npt.NDArray]:
"""
Obtain the Nash equilibria using the Lemke Howson algorithm implemented
using lexicographical integer pivoting. (Able to solve degenerate games)
1. Start at the artificial equilibrium (which is fully labeled)
2. Choose an initial label to drop and move in the polytope for which
the vertex has that label to the edge that does not share that label.
(This is implemented using integer pivoting and the choice of label
to drop is implemented using lexicographical ordering)
3. A label will now be duplicated in the other polytope, drop it in a
similar way.
4. Repeat steps 2 and 3 until have Nash Equilibrium.
Parameters
----------
A : array
The row player payoff matrix
B : array
The column player payoff matrix
initial_dropped_label: int
The initial dropped label.
Returns
-------
Tuple
An equilibria
"""
if np.min(A) <= 0:
A = A + abs(np.min(A)) + 1
if np.min(B) <= 0:
B = B + abs(np.min(B)) + 1
# build tableaux
col_tableau = make_tableau(A)
col_tableau = shift_tableau(col_tableau, A.shape)
row_tableau = make_tableau(B.transpose())
full_labels = set(range(sum(A.shape)))
# slack variables
row_slack_variables = range(B.shape[0], sum(B.shape))
col_slack_variables = range(A.shape[0])
# non-basic variables
row_non_basic_variables = full_labels - set(row_slack_variables)
col_non_basic_variables = full_labels - set(col_slack_variables)
# print(initial_dropped_label)
if initial_dropped_label in row_non_basic_variables:
tableaux = cycle(
(
(row_tableau, row_slack_variables, row_non_basic_variables),
(col_tableau, col_slack_variables, col_non_basic_variables),
)
)
else:
tableaux = cycle(
(
(col_tableau, col_slack_variables, col_non_basic_variables),
(row_tableau, row_slack_variables, row_non_basic_variables),
)
)
# First pivot (to drop a label)
next_tableau, next_slack_variables, next_non_basic_variables = next(tableaux)
entering_label = pivot_tableau_lex(
next_tableau,
initial_dropped_label,
next_slack_variables,
next_non_basic_variables,
)
# keeps track of each tableau's non-basic variables
next_non_basic_variables.add(entering_label)
next_non_basic_variables.remove(initial_dropped_label)
while col_non_basic_variables.union(row_non_basic_variables) != full_labels:
next_tableau, next_slack_variables, next_non_basic_variables = next(tableaux)
# the first label is 'entering' in the sense that it will enter the next
# tableau's set of basic variables
just_entered_label = entering_label
entering_label = pivot_tableau_lex(
next_tableau,
entering_label,
next_slack_variables,
next_non_basic_variables,
)
next_non_basic_variables.add(entering_label)
next_non_basic_variables.remove(just_entered_label)
row_strategy = tableau_to_strategy(
row_tableau,
full_labels - row_non_basic_variables,
range(A.shape[0]),
)
col_strategy = tableau_to_strategy(
col_tableau,
full_labels - col_non_basic_variables,
range(A.shape[0], sum(A.shape)),
)
return row_strategy, col_strategy
|
"""
Copyright 2020 Jackpine Technologies Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# coding: utf-8
"""
cons3rt - Copyright Jackpine Technologies Corp.
NOTE: This file is auto-generated. Do not edit the file manually.
"""
import pprint
import re # noqa: F401
import six
from cons3rt.configuration import Configuration
__author__ = 'Jackpine Technologies Corporation'
__copyright__ = 'Copyright 2020, Jackpine Technologies Corporation'
__license__ = 'Apache 2.0',
__version__ = '1.0.0'
__maintainer__ = 'API Support'
__email__ = 'support@cons3rt.com'
class Cons3rtTemplateData(object):
"""NOTE: This class is auto-generated. Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'display_name': 'str',
'operating_system': 'str',
'virt_realm_template_name': 'str',
'cons3rt_agent_installed': 'bool',
'container_capable': 'bool',
'disks': 'list[Disk]',
'fail_count': 'int',
'has_gpu': 'bool',
'id': 'int',
'license': 'str',
'max_num_cpus': 'int',
'max_ram_in_megabytes': 'int',
'note': 'str',
'package_management_type': 'str',
'power_on_delay_override': 'int',
'power_shell_version': 'str',
'template_registration': 'TemplateRegistration',
'remote_access_templates': 'list[RemoteAccessTemplate]',
'service_management_type': 'str',
'user_count': 'int',
'virt_realm_id': 'int'
}
attribute_map = {
'display_name': 'displayName',
'operating_system': 'operatingSystem',
'virt_realm_template_name': 'virtRealmTemplateName',
'cons3rt_agent_installed': 'cons3rtAgentInstalled',
'container_capable': 'containerCapable',
'disks': 'disks',
'fail_count': 'failCount',
'has_gpu': 'hasGpu',
'id': 'id',
'license': 'license',
'max_num_cpus': 'maxNumCpus',
'max_ram_in_megabytes': 'maxRamInMegabytes',
'note': 'note',
'package_management_type': 'packageManagementType',
'power_on_delay_override': 'powerOnDelayOverride',
'power_shell_version': 'powerShellVersion',
'template_registration': 'templateRegistration',
'remote_access_templates': 'remoteAccessTemplates',
'service_management_type': 'serviceManagementType',
'user_count': 'userCount',
'virt_realm_id': 'virtRealmId'
}
def __init__(self, display_name=None, operating_system=None, virt_realm_template_name=None, cons3rt_agent_installed=None, container_capable=None, disks=None, fail_count=None, has_gpu=None, id=None, license=None, max_num_cpus=None, max_ram_in_megabytes=None, note=None, package_management_type=None, power_on_delay_override=None, power_shell_version=None, template_registration=None, remote_access_templates=None, service_management_type=None, user_count=None, virt_realm_id=None, local_vars_configuration=None): # noqa: E501
"""Cons3rtTemplateData - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._display_name = None
self._operating_system = None
self._virt_realm_template_name = None
self._cons3rt_agent_installed = None
self._container_capable = None
self._disks = None
self._fail_count = None
self._has_gpu = None
self._id = None
self._license = None
self._max_num_cpus = None
self._max_ram_in_megabytes = None
self._note = None
self._package_management_type = None
self._power_on_delay_override = None
self._power_shell_version = None
self._template_registration = None
self._remote_access_templates = None
self._service_management_type = None
self._user_count = None
self._virt_realm_id = None
self.discriminator = None
if display_name is not None:
self.display_name = display_name
self.operating_system = operating_system
self.virt_realm_template_name = virt_realm_template_name
if cons3rt_agent_installed is not None:
self.cons3rt_agent_installed = cons3rt_agent_installed
if container_capable is not None:
self.container_capable = container_capable
if disks is not None:
self.disks = disks
if fail_count is not None:
self.fail_count = fail_count
if has_gpu is not None:
self.has_gpu = has_gpu
if id is not None:
self.id = id
if license is not None:
self.license = license
if max_num_cpus is not None:
self.max_num_cpus = max_num_cpus
if max_ram_in_megabytes is not None:
self.max_ram_in_megabytes = max_ram_in_megabytes
if note is not None:
self.note = note
if package_management_type is not None:
self.package_management_type = package_management_type
if power_on_delay_override is not None:
self.power_on_delay_override = power_on_delay_override
if power_shell_version is not None:
self.power_shell_version = power_shell_version
if template_registration is not None:
self.template_registration = template_registration
if remote_access_templates is not None:
self.remote_access_templates = remote_access_templates
if service_management_type is not None:
self.service_management_type = service_management_type
if user_count is not None:
self.user_count = user_count
if virt_realm_id is not None:
self.virt_realm_id = virt_realm_id
@property
def display_name(self):
"""Gets the display_name of this Cons3rtTemplateData. # noqa: E501
:return: The display_name of this Cons3rtTemplateData. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this Cons3rtTemplateData.
:param display_name: The display_name of this Cons3rtTemplateData. # noqa: E501
:type: str
"""
self._display_name = display_name
@property
def operating_system(self):
"""Gets the operating_system of this Cons3rtTemplateData. # noqa: E501
:return: The operating_system of this Cons3rtTemplateData. # noqa: E501
:rtype: str
"""
return self._operating_system
@operating_system.setter
def operating_system(self, operating_system):
"""Sets the operating_system of this Cons3rtTemplateData.
:param operating_system: The operating_system of this Cons3rtTemplateData. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and operating_system is None: # noqa: E501
raise ValueError("Invalid value for `operating_system`, must not be `None`") # noqa: E501
allowed_values = ["AMAZON_LINUX_2_LATEST_X64", "AMAZON_LINUX_LATEST_X64", "CENTOS_6_X64", "CENTOS_6_X86", "CENTOS_7_X64", "CENTOS_8_X64", "CORE_OS_1221_X64", "F5_BIGIP_X64", "FEDORA_23_X64", "FORTISIEM", "GENERIC_LINUX_X64", "GENERIC_WINDOWS_X64", "KALI_ROLLING_X64", "ORACLE_LINUX_6_X64", "ORACLE_LINUX_7_X64", "ORACLE_LINUX_8_X64", "OS_X_10", "OS_X_11", "PALO_ALTO_NETWORKS_PAN_OS_X64", "RASPBIAN", "RHEL_5_X64", "RHEL_5_X86", "RHEL_6_X64", "RHEL_6_X86", "RHEL_7_ATOMIC_HOST", "RHEL_7_PPCLE", "RHEL_7_X64", "RHEL_8_X64", "SOLARIS_11_X64", "UBUNTU_12_X64", "UBUNTU_14_X64", "UBUNTU_16_X64", "UBUNTU_18_X64", "UBUNTU_20_X64", "UBUNTU_CORE", "VYOS_1_1_X64", "VYOS_1_2_X64", "VYOS_1_3_X64", "VYOS_ROLLING_X64", "WINDOWS_10_X64", "WINDOWS_7_X64", "WINDOWS_7_X86", "WINDOWS_8_X64", "WINDOWS_SERVER_2008_R2_X64", "WINDOWS_SERVER_2008_X64", "WINDOWS_SERVER_2012_R2_X64", "WINDOWS_SERVER_2012_X64", "WINDOWS_SERVER_2016_X64", "WINDOWS_SERVER_2019_X64", "WINDOWS_SERVER_2019_CORE_X64", "WINDOWS_XP_X86"] # noqa: E501
if self.local_vars_configuration.client_side_validation and operating_system not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `operating_system` ({0}), must be one of {1}" # noqa: E501
.format(operating_system, allowed_values)
)
self._operating_system = operating_system
@property
def virt_realm_template_name(self):
"""Gets the virt_realm_template_name of this Cons3rtTemplateData. # noqa: E501
:return: The virt_realm_template_name of this Cons3rtTemplateData. # noqa: E501
:rtype: str
"""
return self._virt_realm_template_name
@virt_realm_template_name.setter
def virt_realm_template_name(self, virt_realm_template_name):
"""Sets the virt_realm_template_name of this Cons3rtTemplateData.
:param virt_realm_template_name: The virt_realm_template_name of this Cons3rtTemplateData. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and virt_realm_template_name is None: # noqa: E501
raise ValueError("Invalid value for `virt_realm_template_name`, must not be `None`") # noqa: E501
self._virt_realm_template_name = virt_realm_template_name
@property
def cons3rt_agent_installed(self):
"""Gets the cons3rt_agent_installed of this Cons3rtTemplateData. # noqa: E501
:return: The cons3rt_agent_installed of this Cons3rtTemplateData. # noqa: E501
:rtype: bool
"""
return self._cons3rt_agent_installed
@cons3rt_agent_installed.setter
def cons3rt_agent_installed(self, cons3rt_agent_installed):
"""Sets the cons3rt_agent_installed of this Cons3rtTemplateData.
:param cons3rt_agent_installed: The cons3rt_agent_installed of this Cons3rtTemplateData. # noqa: E501
:type: bool
"""
self._cons3rt_agent_installed = cons3rt_agent_installed
@property
def container_capable(self):
"""Gets the container_capable of this Cons3rtTemplateData. # noqa: E501
:return: The container_capable of this Cons3rtTemplateData. # noqa: E501
:rtype: bool
"""
return self._container_capable
@container_capable.setter
def container_capable(self, container_capable):
"""Sets the container_capable of this Cons3rtTemplateData.
:param container_capable: The container_capable of this Cons3rtTemplateData. # noqa: E501
:type: bool
"""
self._container_capable = container_capable
@property
def disks(self):
"""Gets the disks of this Cons3rtTemplateData. # noqa: E501
:return: The disks of this Cons3rtTemplateData. # noqa: E501
:rtype: list[Disk]
"""
return self._disks
@disks.setter
def disks(self, disks):
"""Sets the disks of this Cons3rtTemplateData.
:param disks: The disks of this Cons3rtTemplateData. # noqa: E501
:type: list[Disk]
"""
self._disks = disks
@property
def fail_count(self):
"""Gets the fail_count of this Cons3rtTemplateData. # noqa: E501
:return: The fail_count of this Cons3rtTemplateData. # noqa: E501
:rtype: int
"""
return self._fail_count
@fail_count.setter
def fail_count(self, fail_count):
"""Sets the fail_count of this Cons3rtTemplateData.
:param fail_count: The fail_count of this Cons3rtTemplateData. # noqa: E501
:type: int
"""
self._fail_count = fail_count
@property
def has_gpu(self):
"""Gets the has_gpu of this Cons3rtTemplateData. # noqa: E501
:return: The has_gpu of this Cons3rtTemplateData. # noqa: E501
:rtype: bool
"""
return self._has_gpu
@has_gpu.setter
def has_gpu(self, has_gpu):
"""Sets the has_gpu of this Cons3rtTemplateData.
:param has_gpu: The has_gpu of this Cons3rtTemplateData. # noqa: E501
:type: bool
"""
self._has_gpu = has_gpu
@property
def id(self):
"""Gets the id of this Cons3rtTemplateData. # noqa: E501
:return: The id of this Cons3rtTemplateData. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Cons3rtTemplateData.
:param id: The id of this Cons3rtTemplateData. # noqa: E501
:type: int
"""
self._id = id
@property
def license(self):
"""Gets the license of this Cons3rtTemplateData. # noqa: E501
:return: The license of this Cons3rtTemplateData. # noqa: E501
:rtype: str
"""
return self._license
@license.setter
def license(self, license):
"""Sets the license of this Cons3rtTemplateData.
:param license: The license of this Cons3rtTemplateData. # noqa: E501
:type: str
"""
self._license = license
@property
def max_num_cpus(self):
"""Gets the max_num_cpus of this Cons3rtTemplateData. # noqa: E501
:return: The max_num_cpus of this Cons3rtTemplateData. # noqa: E501
:rtype: int
"""
return self._max_num_cpus
@max_num_cpus.setter
def max_num_cpus(self, max_num_cpus):
"""Sets the max_num_cpus of this Cons3rtTemplateData.
:param max_num_cpus: The max_num_cpus of this Cons3rtTemplateData. # noqa: E501
:type: int
"""
self._max_num_cpus = max_num_cpus
@property
def max_ram_in_megabytes(self):
"""Gets the max_ram_in_megabytes of this Cons3rtTemplateData. # noqa: E501
:return: The max_ram_in_megabytes of this Cons3rtTemplateData. # noqa: E501
:rtype: int
"""
return self._max_ram_in_megabytes
@max_ram_in_megabytes.setter
def max_ram_in_megabytes(self, max_ram_in_megabytes):
"""Sets the max_ram_in_megabytes of this Cons3rtTemplateData.
:param max_ram_in_megabytes: The max_ram_in_megabytes of this Cons3rtTemplateData. # noqa: E501
:type: int
"""
self._max_ram_in_megabytes = max_ram_in_megabytes
@property
def note(self):
"""Gets the note of this Cons3rtTemplateData. # noqa: E501
:return: The note of this Cons3rtTemplateData. # noqa: E501
:rtype: str
"""
return self._note
@note.setter
def note(self, note):
"""Sets the note of this Cons3rtTemplateData.
:param note: The note of this Cons3rtTemplateData. # noqa: E501
:type: str
"""
self._note = note
@property
def package_management_type(self):
"""Gets the package_management_type of this Cons3rtTemplateData. # noqa: E501
:return: The package_management_type of this Cons3rtTemplateData. # noqa: E501
:rtype: str
"""
return self._package_management_type
@package_management_type.setter
def package_management_type(self, package_management_type):
"""Sets the package_management_type of this Cons3rtTemplateData.
:param package_management_type: The package_management_type of this Cons3rtTemplateData. # noqa: E501
:type: str
"""
allowed_values = ["APP_STORE", "APT_GET", "DNF", "DOCKER", "NONE", "PKGADD", "SNAP", "YUM"] # noqa: E501
if self.local_vars_configuration.client_side_validation and package_management_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `package_management_type` ({0}), must be one of {1}" # noqa: E501
.format(package_management_type, allowed_values)
)
self._package_management_type = package_management_type
@property
def power_on_delay_override(self):
"""Gets the power_on_delay_override of this Cons3rtTemplateData. # noqa: E501
:return: The power_on_delay_override of this Cons3rtTemplateData. # noqa: E501
:rtype: int
"""
return self._power_on_delay_override
@power_on_delay_override.setter
def power_on_delay_override(self, power_on_delay_override):
"""Sets the power_on_delay_override of this Cons3rtTemplateData.
:param power_on_delay_override: The power_on_delay_override of this Cons3rtTemplateData. # noqa: E501
:type: int
"""
self._power_on_delay_override = power_on_delay_override
@property
def power_shell_version(self):
"""Gets the power_shell_version of this Cons3rtTemplateData. # noqa: E501
:return: The power_shell_version of this Cons3rtTemplateData. # noqa: E501
:rtype: str
"""
return self._power_shell_version
@power_shell_version.setter
def power_shell_version(self, power_shell_version):
"""Sets the power_shell_version of this Cons3rtTemplateData.
:param power_shell_version: The power_shell_version of this Cons3rtTemplateData. # noqa: E501
:type: str
"""
allowed_values = ["NONE", "POWERSHELL_1_0", "POWERSHELL_2_0", "POWERSHELL_3_0", "POWERSHELL_4_0", "POWERSHELL_5_0", "POWERSHELL_6_0"] # noqa: E501
if self.local_vars_configuration.client_side_validation and power_shell_version not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `power_shell_version` ({0}), must be one of {1}" # noqa: E501
.format(power_shell_version, allowed_values)
)
self._power_shell_version = power_shell_version
@property
def template_registration(self):
"""Gets the template_registration of this Cons3rtTemplateData. # noqa: E501
:return: The template_registration of this Cons3rtTemplateData. # noqa: E501
:rtype: TemplateRegistration
"""
return self._template_registration
@template_registration.setter
def template_registration(self, template_registration):
"""Sets the template_registration of this Cons3rtTemplateData.
:param template_registration: The template_registration of this Cons3rtTemplateData. # noqa: E501
:type: TemplateRegistration
"""
self._template_registration = template_registration
@property
def remote_access_templates(self):
"""Gets the remote_access_templates of this Cons3rtTemplateData. # noqa: E501
:return: The remote_access_templates of this Cons3rtTemplateData. # noqa: E501
:rtype: list[RemoteAccessTemplate]
"""
return self._remote_access_templates
@remote_access_templates.setter
def remote_access_templates(self, remote_access_templates):
"""Sets the remote_access_templates of this Cons3rtTemplateData.
:param remote_access_templates: The remote_access_templates of this Cons3rtTemplateData. # noqa: E501
:type: list[RemoteAccessTemplate]
"""
self._remote_access_templates = remote_access_templates
@property
def service_management_type(self):
"""Gets the service_management_type of this Cons3rtTemplateData. # noqa: E501
:return: The service_management_type of this Cons3rtTemplateData. # noqa: E501
:rtype: str
"""
return self._service_management_type
@service_management_type.setter
def service_management_type(self, service_management_type):
"""Sets the service_management_type of this Cons3rtTemplateData.
:param service_management_type: The service_management_type of this Cons3rtTemplateData. # noqa: E501
:type: str
"""
allowed_values = ["SYSTEMD", "INITD", "LAUNCHD", "UNKNOWN", "UPDATE_RC", "UPSTART", "WINDOWS"] # noqa: E501
if self.local_vars_configuration.client_side_validation and service_management_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `service_management_type` ({0}), must be one of {1}" # noqa: E501
.format(service_management_type, allowed_values)
)
self._service_management_type = service_management_type
@property
def user_count(self):
"""Gets the user_count of this Cons3rtTemplateData. # noqa: E501
:return: The user_count of this Cons3rtTemplateData. # noqa: E501
:rtype: int
"""
return self._user_count
@user_count.setter
def user_count(self, user_count):
"""Sets the user_count of this Cons3rtTemplateData.
:param user_count: The user_count of this Cons3rtTemplateData. # noqa: E501
:type: int
"""
self._user_count = user_count
@property
def virt_realm_id(self):
"""Gets the virt_realm_id of this Cons3rtTemplateData. # noqa: E501
:return: The virt_realm_id of this Cons3rtTemplateData. # noqa: E501
:rtype: int
"""
return self._virt_realm_id
@virt_realm_id.setter
def virt_realm_id(self, virt_realm_id):
"""Sets the virt_realm_id of this Cons3rtTemplateData.
:param virt_realm_id: The virt_realm_id of this Cons3rtTemplateData. # noqa: E501
:type: int
"""
self._virt_realm_id = virt_realm_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Cons3rtTemplateData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Cons3rtTemplateData):
return True
return self.to_dict() != other.to_dict()
|
from ..PyQt.QtGui import QDoubleSpinBox, QApplication
from ..PyQt.QtCore import pyqtProperty, QEvent, Qt
from .base import PyDMWritableWidget
class PyDMSpinbox(QDoubleSpinBox, PyDMWritableWidget):
"""
A QDoubleSpinBox with support for Channels and more from PyDM.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
QDoubleSpinBox.__init__(self, parent)
PyDMWritableWidget.__init__(self, init_channel=init_channel)
self.valueBeingSet = False
self.setEnabled(False)
self._alarm_sensitive_border = False
self._show_step_exponent = True
self.step_exponent = 0
self.setDecimals(0)
self.app = QApplication.instance()
self.setAccelerated(True)
def keyPressEvent(self, ev):
"""
Method invoked when a key press event happens on the QDoubleSpinBox.
For PyDMSpinBox we are interested on the Keypress events for:
- CTRL + Left/Right : Increase or Decrease the step exponent;
- Up / Down : Add or Remove `singleStep` units to the value;
- PageUp / PageDown : Add or Remove 10 times `singleStep` units
to the value;
- Return or Enter : Send the value to the channel using the
`send_value_signal`.
Parameters
----------
ev : QEvent
"""
ctrl_hold = self.app.queryKeyboardModifiers() == Qt.ControlModifier
if ctrl_hold and (ev.key() in (Qt.Key_Left, Qt.Key_Right)):
self.step_exponent += 1 if ev.key() == Qt.Key_Left else -1
self.step_exponent = max(-self.decimals(), self.step_exponent)
self.update_step_size()
elif ev.key() in (Qt.Key_Return, Qt.Key_Enter):
self.send_value()
else:
super(PyDMSpinbox, self).keyPressEvent(ev)
def widget_ctx_menu(self):
"""
Fetch the Widget specific context menu which will be populated with additional tools by `assemble_tools_menu`.
Returns
-------
QMenu or None
If the return of this method is None a new QMenu will be created by `assemble_tools_menu`.
"""
def toggle():
self.showStepExponent = not self.showStepExponent
menu = self.lineEdit().createStandardContextMenu()
menu.addSeparator()
ac = menu.addAction('Toggle Show Step Size')
ac.triggered.connect(toggle)
return menu
def update_step_size(self):
"""
Update the Single Step size on the QDoubleSpinBox.
"""
self.setSingleStep(10 ** self.step_exponent)
self.update_format_string()
def update_format_string(self):
"""
Reconstruct the format string to be used when representing the
output value.
Returns
-------
format_string : str
The format string to be used including or not the precision
and unit
"""
if self._show_units:
units = " {}".format(self._unit)
else:
units = ""
if self._show_step_exponent:
self.setSuffix("{0} Step: 1E{1}".format(units, self.step_exponent))
self.lineEdit().setToolTip("")
else:
self.setSuffix(units)
self.lineEdit().setToolTip('Step: 1E{0:+d}'.format(self.step_exponent))
def value_changed(self, new_val):
"""
Callback invoked when the Channel value is changed.
Parameters
----------
new_val : int or float
The new value from the channel.
"""
super(PyDMSpinbox, self).value_changed(new_val)
self.valueBeingSet = True
self.setValue(new_val)
self.valueBeingSet = False
def send_value(self):
"""
Method invoked to send the current value on the QDoubleSpinBox to
the channel using the `send_value_signal`.
"""
value = QDoubleSpinBox.value(self)
if not self.valueBeingSet:
self.send_value_signal[float].emit(value)
def ctrl_limit_changed(self, which, new_limit):
"""
Callback invoked when the Channel receives new control limit
values.
Parameters
----------
which : str
Which control limit was changed. "UPPER" or "LOWER"
new_limit : float
New value for the control limit
"""
super(PyDMSpinbox, self).ctrl_limit_changed(which, new_limit)
if which == "UPPER":
self.setMaximum(new_limit)
else:
self.setMinimum(new_limit)
def precision_changed(self, new_precision):
"""
Callback invoked when the Channel has new precision value.
This callback also triggers an update_format_string call so the
new precision value is considered.
Parameters
----------
new_precison : int or float
The new precision value
"""
super(PyDMSpinbox, self).precision_changed(new_precision)
self.setDecimals(new_precision)
@pyqtProperty(bool)
def showStepExponent(self):
"""
Whether to show or not the step exponent
Returns
-------
bool
"""
return self._show_step_exponent
@showStepExponent.setter
def showStepExponent(self, val):
"""
Whether to show or not the step exponent
Parameters
----------
val : bool
"""
self._show_step_exponent = val
self.update_format_string()
|
katok = ['다현', '정연', '쯔위', '사나', '지효']
def delete_data(position):
kLen = len(katok)
katok[position] = None
for i in range(position+1, kLen, 1):
katok[i-1] = katok[i]
katok[i] = None
del(katok[kLen-1])
print(katok)
delete_data(1)
print(katok)
delete_data(3)
print(katok)
|
#!/usr/bin/env python3
"""fileio.py"""
from pprint import pprint
def write_json_data(filename: str, json_data: str):
"""Writes the passed json data to the passed filename"""
try:
with open(filename, 'w') as write:
write.write(json_data)
except Exception as e:
printf(f'{e}')
def main():
"""main function"""
...
if __name__ == '__main__':
from os import system
from sys import exit
system('clear')
exit(main())
|
import cv2
image = cv2.imread("../assets/img.png")
x, y, w, h = 180, 65, 700, 750
cropped_image = image[y:y + h, x:x + w]
cv2.imshow("Cropped", cropped_image)
cv2.waitKey(0)
|
#!/usr/bin/env python
# encoding: utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
"""
Created by Cuong Pham on 2012-01-27.
Copyright (c) 2012 Nemo Find Inc. All rights reserved.
Base class for all of our spiders.
"""
from bs4 import BeautifulSoup
import lxml.html
import time
import os
from scrapy import signals
from scrapy.xlib.pydispatch import dispatcher
from scraper.items import Product
from scraper import settings
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.http import Request
from scrapy import signals, log
from scrapy.selector import HtmlXPathSelector
from scraper.pipelines import SavingPipeline
import hashlib
from scraper.common import util
import urlparse
from common import metric_datadog as metr
from common import config_name_datadog as cnd
BLACK_CHAR = ["»"]
def cleanText(data):
if type(data) == list:
return map(cleanText, data)
elif type(data) in [str, unicode] :
# return lxml.html.fromstring(data).text_content()
return ' '.join(BeautifulSoup(data).findAll(text=True))
else:
return data
class DetailScraper(CrawlSpider):
name = None
allowed_domains = []
start_urls = []
rules = []
accesstrade_link = None
from_url_file = None
savingPipe = None
handle_httpstatus_list = [404,302,500,301]
debug_call_stop = 0
def __init__(self, xpath_dict = {}, files=None):
CrawlSpider.__init__(self)
self.xpath_dict = xpath_dict
self.from_url_file = files
self.savingPipe = SavingPipeline()
if self.from_url_file:
self.crawl_from_files()
def crawl_from_files(self):
print "============================> Read file", self.from_url_file
f = open(self.from_url_file,'r')
self.start_urls = [url.strip() for url in f.readlines()]
f.close()
def setExpiredItemsBaseOnStatus(self,response):
isExpired = False
if response.status == 404:
record = self.savingPipe.set_item_expired(response.url)
print "==============================> Item expired because of 404:", response.url
isExpired = True
if response.status == 500:
record = self.savingPipe.set_item_expired(response.url)
print "==============================> Item expired because of 500:", response.url
isExpired = True
if response.status == 302:
record = self.savingPipe.set_item_expired(response.url)
print "==============================> Item expired because of 302:", response.url
isExpired = True
if response.status == 301:
record = self.savingPipe.set_item_expired(response.url)
print "==============================> Item expired because of 301:", response.url
isExpired = True
return isExpired
def parse(self, response):
#self.setExpiredItemsBaseOnStatus(response)
if self.from_url_file:
item = self.parse_item(response)
if item:
return item
else:
return self._parse_response(response, self.parse_start_url, cb_kwargs={}, follow=True)
def parse_item(self, response):
hxs = HtmlXPathSelector(response)
expired = self.setExpiredItemsBaseOnStatus(response)
if not expired:
print '======================>EXTRACT', response.url
item = Product()
item['source'] = self.name
item['origin_url'] = response.url
if not self.accesstrade_link or self.accesstrade_link is None:
item['url'] = response.url
else:
item['url'] = self.accesstrade_link + response.url
item['timestamp'] = time.time()
for prop, xpath in self.xpath_dict.items():
if xpath.strip():
try:
item[prop] = cleanText(hxs.select(xpath).extract())
except KeyError:
continue
if 'canonical' in item and item['url'] != item['canonical'][0]:
print '==================> Item expired!'
item['expired'] = 1
if not hxs.select((xpath)).extract() or hxs.select(xpath).extract() == "":
del item[prop]
if prop == "description":
item["description"] = hxs.select(xpath +"/node()").extract()
if prop == "properties":
item["properties"] = hxs.select(xpath +"/node()").extract()
item = self.check_item(item)
if item is not None and item.isValid():
self.state['items_count'] = self.state.get('items_count', 0) + 1
return item
def parse_item_and_links(self, response):
item = self.parse_item(response)
if item:
yield item
for rule in self.rules:
if not rule.link_extractor:
continue
links = rule.link_extractor.extract_links(response)
for link in links:
if link.url.startswith("http"):
url = self.parse_links(link.url)
yield Request(url)
def save_item(self,response):
item = self.parse_item(response)
item_id = util.getIdFromUrl(item['url'])
print '=== INSERTING===',item_id
record = self.savingPipe.process_item(item,self.name)
if record:
print '===INSERTED===',record
else:
print '===INSERT ERROR==='
return item
def spider_closed(self,spider,reason):
self.savingPipe.kafka.stop()
self.savingPipe.kafka.client.close()
print "Done processing spider, close kafka"
def check_item(self, item):
if item is not None:
if 'canonical' in item and item['canonical'] is not None and len(item['canonical']) > 1:
prop_canonical = item['canonical'][0]
uri = urlparse.urlparse(item['origin_url'])
if prop_canonical.startswith('/'):
prop_canonical = uri.scheme + "://" + uri.netloc + prop_canonical
elif not prop_canonical.startswith('/') and not prop_canonical.startswith('http'):
prop_canonical = uri.scheme + "://" + uri.netloc + "/" + prop_canonical
if prop_canonical != item['origin_url']:
print "=======> Item duplicate: " + item['url'] + ", we re-update it"
self.savingPipe.set_item_expired(item['url'])
item['url'] = prop_canonical
# return None
if 'name' in item and item['name'] is not None and len(item['name']) > 0:
item_name = item['name'][0]
for char in BLACK_CHAR:
item_name = item_name.replace(char, "")
item_name = item_name.strip()
if len(item_name) <= 1:
print "=======> Item expired because invalid name: " + item['url']
self.savingPipe.set_item_expired(item['url'])
return None
item['name'][0] = item_name
else:
print "=======> Item expired because invalid name: " + item['url']
self.savingPipe.set_item_expired(item['url'])
return None
if (not 'images' in item and not 'price' in item):
return None
return item
""" def spider_opened(self,spider):
log.msg('Opened Spider ' + str(spider.name) + ' ' + self.getMd5( str(spider.name)) + ' ' + str(self.timestamp() ) ,spider = spider)
log.msg('Started Spider!......................... ',spider = spider)
self.collection = self.db['spiders']
res = self.collection.update( {"doc._id":self.getMd5(str(spider.name))} , {"$set":{"doc.status":"CRAWL_STARTING","doc.server_id":settings.CRAWL_SERVER_ID,"doc.last_update":self.timestamp() } }, upsert=False, multi=False,safe=True)
if res:
log.msg('Update status spider CRAWL_STARTING success ',spider = spider)
else:
log.msg('Update status spider CRAWL_STARTING fail ',spider = spider)
return
def spider_closed(self,spider,reason):
#print "Spider", spider, stats.get_stats(spider)
log.msg('Closed Spider ' + self.getMd5( str(spider.name)) + ' ' + str(self.timestamp()),spider = spider)
log.msg('Finish Spider! Update status to mongodb ',spider = spider)
self.collection = self.db['spiders']
res = self.collection.update({"doc._id": self.getMd5( str(spider.name)) } , {"$set":{"doc.status":"CRAWL_STOPPED","doc.last_update":self.timestamp() } }, upsert=False, multi=False,safe=True)
if res:
log.msg('Update status spider CRAWL_STOPPED success ',spider = spider)
else:
log.msg('Update status spider CRAWL_STOPPED fail ',spider = spider)
return
def spider_error(self, failure, response, spider):
log.msg('Spider error! ' + str(response) + str(failure),spider = spider)
self.collection = self.db['spiders']
res = self.collection.update({"doc._id":self.getMd5(str(spider.name))} , {"$set":{"doc.status":"CRAWL_ERROR","doc.note:":str(failure),"doc.last_update":self.timestamp() } }, upsert=False, multi=False,safe=True)
if res:
log.msg("Update status spider error success ",spider = spider)
else:
log.msg("Update status spider error fail ",spider = spider)
return
def timestamp(self):
return int(time.time())
def getMd5(self,text):
# handle the 'ordinal not in range(128)' problem
if type(text) == unicode:
return hashlib.md5(text.encode('utf8')).hexdigest()
else:
return hashlib.md5(text).hexdigest()
"""
|
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from troop.models import Troop
import csv
import os
class Command(BaseCommand):
help = "Imports a set of users from a CSV file"
def add_arguments(self, parser):
parser.add_argument("filename")
parser.add_argument("--send-emails", action="store_true")
def handle(self, *args, **options):
try:
with open(options["filename"], newline="") as csv_file:
csv_reader = csv.reader(csv_file)
columns = self._extract_columns(next(csv_reader))
lines = []
for i, row in enumerate(csv_reader):
try:
line = {
"troop_number": row[columns["troop_number"]],
"troop_name": row[columns["troop_name"]],
"email": row[columns["email"]],
"first_name": row[columns["first_name"]],
"last_name": row[columns["last_name"]],
"line_number": i + 2,
}
lines.append(line)
except Exception as e:
self.stderr.write("Line {}: {}".format(i + 2, e.__cause__))
self.import_lines(lines, options["send_emails"])
except FileNotFoundError:
raise CommandError(
"File {} not found".format(os.path.abspath(options["filename"]))
)
def import_lines(self, lines, send_emails=False):
created_users, created_troops = 0, 0
for line in lines:
try:
troop, created = Troop.objects.get_or_create(
number=line["troop_number"], name=line["troop_name"],
)
if created:
created_troops += 1
user, created = get_user_model().objects.get_or_create(
email=line["email"],
first_name=line["first_name"],
last_name=line["last_name"],
)
user.troops.add(troop)
if created:
created_users += 1
if send_emails:
user.send_welcome_email()
except Exception as e:
self.stderr.write(
"Line {}: {}".format(line["line_number"], e.__cause__)
)
self.stdout.write(
f"Imported {created_troops} new troops and {created_users} new users"
)
def _extract_columns(self, row) -> dict:
required = ["email", "first_name", "last_name", "troop_number", "troop_name"]
missing = [name for name in required if name not in row]
if missing:
msg = (
"Found the following columns in the csv: {}\n"
"Required columns missing: {}".format(
", ".join(row), ", ".join(missing)
)
)
raise CommandError(msg)
return {name: i for i, name in enumerate(row)}
|
#!/usr/bin/env python3
import argparse as ap
import json
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import subprocess as sp
import sys
import tarfile
import tempfile
import zipfile
from collections import Counter
from datetime import timedelta
from distutils.dir_util import copy_tree
from io import TextIOWrapper
from pathlib import Path
from typing import Mapping, Optional, Sequence, Tuple, Union
from urllib.request import urlretrieve
from generate_stat_html import HTMLPrinter
from summarize_gcov import summarize_gcov
from summarize_sa_stats import summ_stats
def make_dir(path: str) -> None:
Path(path).mkdir(parents=True, exist_ok=True)
def load_config(config_path: str) -> dict:
with open(config_path, "r", encoding="utf-8", errors="ignore") \
as config_file:
config_dict = json.loads(config_file.read())
if not config_dict:
logging.error("Empty config file.")
sys.exit(1)
return config_dict
def run_command(cmd: Union[str, Sequence[str]], print_error: bool = True,
cwd: Optional[str] = None,
env: Optional[Mapping[str, str]] = None,
shell: bool = False) -> Tuple[int, str, str]:
args = shlex.split(cmd) if not shell else cmd
try:
proc = sp.Popen(args, stdin=sp.PIPE, stdout=sp.PIPE,
stderr=sp.PIPE, cwd=cwd, env=env, shell=shell,
encoding="utf-8", universal_newlines=True,
errors="ignore")
stdout, stderr = proc.communicate()
retcode = proc.returncode
except FileNotFoundError:
retcode = 2
stdout, stderr = "", ""
if retcode != 0 and print_error:
output = stderr if stderr else stdout
logging.error("%s\n", str(output))
return retcode, stdout, stderr
def count_lines(project: dict, project_dir: str) -> None:
failed, stdout, _ = run_command(
'cloc "%s" --json --not-match-d="cc_results"' % project_dir, False)
if not failed:
try:
cloc_json_out = json.loads(stdout)
project["LOC"] = cloc_json_out["SUM"]["code"]
except:
pass
logging.info("[%s] LOC: %s.", project['name'], project.get('LOC', '?'))
def clone_project(project: dict, project_dir: str, source_dir: str,
is_subproject: bool = False) -> bool:
"""Clone a single project.
Its version is specified by a version tag or a commit hash
found in the config file.
If a project already exists, we simply overwrite it.
"""
if project.get('prepared', False):
count_lines(project, project_dir)
return True
if os.path.isdir(project_dir):
shutil.rmtree(project_dir)
project_str = "subproject" if is_subproject else "project"
logging.info("[%s] Checking out %s... ", project['name'], project_str)
# Check if tarball is provided.
# TODO: support zip files.
if project['url'].endswith((".tar.gz", ".tar.xz", ".tar.lz", ".tgz",
".tbz", ".tlz", ".txz")):
path, _ = urlretrieve(project['url'])
with tarfile.open(path) as tar:
tar.extractall(project_dir)
content = os.listdir(project_dir)
# If the tar contains a single directory, move contents up.
if len(content) == 1:
inner = os.path.join(project_dir, content[0])
# shutil.copytree fails to copy to existing dir.
copy_tree(inner, project_dir)
shutil.rmtree(inner)
count_lines(project, project_dir)
return True
# If there is no tag specified, we clone the master branch.
# This presumes that a master branch exists.
project['tag'] = project.get('tag', 'master')
try:
int(project['tag'], base=16)
commit_hash = True
except ValueError:
commit_hash = False
# If the 'tag' value is a version tag, we can use shallow cloning.
# With a commit hash, we need to clone everything and then checkout
# the specified commit.
cmd = {'clone': 'git clone %s "%s"' % (project['url'], project_dir)}
if commit_hash:
cmd['checkout'] = 'git -C "%s" checkout %s' % (
project_dir, project['tag'])
else:
cmd['clone'] += ' --depth 1 --branch %s --single-branch' % \
project['tag']
sys.stdout.flush()
clone_failed, _, clone_err = run_command(cmd['clone'], print_error=False)
if clone_failed and 'master' in str(clone_err):
clone_failed, _, _ = run_command(
'git clone %s "%s"' % (project['url'], project_dir))
if clone_failed:
return False
if 'checkout' in cmd:
checkout_failed, _, _ = run_command(cmd['checkout'])
if checkout_failed:
return False
for sub_project in project.get("subprojects", []):
sub_dir = os.path.join(source_dir, sub_project["subdir"])
if not clone_project(sub_project, sub_dir, sub_dir, True):
return False
if project.get('submodules', False):
submodule_failed, _, _ = run_command('git submodule update --init',
cwd=project_dir)
if submodule_failed:
return False
if not is_subproject:
count_lines(project, project_dir)
return True
def identify_build_system(project_dir: str, configure: bool) -> Optional[str]:
"""Identifies the build system of a project.
Used heuristics:
- If there's a 'CMakeLists.txt' file at the project root: 'cmake'.
- If there's an 'autogen.sh' script at the project root: run it.
- If there's a 'configure' script at the project root: run it,
then return 'makefile'.
FIXME: If no build system found, should we apply the same
heuristics for src subfolder if exists?
"""
project_files = os.listdir(project_dir)
if not project_files:
logging.error("No files found in '%s'.\n", project_dir)
return None
if 'CMakeLists.txt' in project_files:
return 'cmake'
if 'Makefile' in project_files:
return 'makefile'
# When there is a custom configure command,
# fall back to make files.
if not configure:
return 'makefile'
if 'autogen.sh' in project_files:
# Autogen needs to be executed in the project's root directory.
autogen_failed, _, _ = run_command("sh autogen.sh", cwd=project_dir)
if autogen_failed:
return None
# Need to re-list files, as autogen might have generated a config script.
project_files = os.listdir(project_dir)
if 'configure' in project_files:
configure_failed, _, _ = run_command("./configure", cwd=project_dir)
if configure_failed:
return None
return 'makefile'
logging.error("Build system cannot be identified.")
return None
def check_logged(projects_root: str, projects: Sequence[dict]) -> int:
""" Count successfully checked projects."""
configured_projects = {project["name"] for project in projects}
project_dirs = os.listdir(projects_root)
num = 0
for project in project_dirs:
if os.path.isfile(os.path.join(projects_root, project)):
continue
if project not in configured_projects:
continue
num += 1
return num
def get_compilation_database(project: dict,
project_dir: str) -> Tuple[str, str]:
binary_dir = project_dir
if "binary_dir" in project:
binary_dir = os.path.join(binary_dir, project["binary_dir"])
make_dir(binary_dir)
json_path = os.path.join(binary_dir, "compile_commands.json")
return json_path, binary_dir
def log_project(project: dict, project_dir: str, num_jobs: int) -> bool:
if 'prepared' in project:
return True
configure = True
if 'configure_command' in project:
configure = False
project['configure_command'] = \
project['configure_command'].replace("$JOBS", str(num_jobs))
_, _, _ = run_command(project['configure_command'],
True, project_dir, shell=True)
if 'make_command' in project:
build_sys = 'userprovided'
else:
build_sys = identify_build_system(project_dir, configure)
failed = not build_sys
logging.info("[%s] Generating build log... ", project['name'])
json_path, binary_dir = get_compilation_database(project, project_dir)
if build_sys == 'cmake':
cmd = 'cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -B"%s" -H"%s"' \
% (binary_dir, project_dir)
failed, _, _ = run_command(cmd, True, binary_dir)
elif build_sys == 'makefile':
cmd = "CodeChecker log -b 'make -j%d' -o \"%s\"" \
% (num_jobs, json_path)
failed, _, _ = run_command(cmd, True, project_dir)
elif build_sys == 'userprovided':
if not project['make_command']:
logging.info("[%s] 'make_command' is empty. Command invocation skipped.", project['name'])
else:
project['make_command'] = \
project['make_command'].replace("$JOBS", str(num_jobs))
cmd = "CodeChecker log -b '%s' -o \"%s\"" \
% (project['make_command'], json_path)
failed, _, _ = run_command(cmd, True, project_dir, shell=True)
if failed:
shutil.rmtree(project_dir)
return False
return True
def collect_args(arg_name: str, configuration_sources: Sequence[dict]) -> str:
return " ".join([conf.get(arg_name, "")
for conf in configuration_sources])
def update_path(path: str, env: Optional[Mapping[str, str]] = None) \
-> Mapping[str, str]:
if env is None:
env = os.environ
env["PATH"] = path + ":" + env["PATH"]
return env
def build_package(project: dict, project_dir: str, jobs: int) -> bool:
logging.info("[%s] Generating build log... ", project['name'])
make_dir(project_dir)
json_path, _ = get_compilation_database(project, project_dir)
if project["package_type"] == "vcpkg":
run_command("vcpkg remove %s" % project["package"], True, project_dir)
cmd = "CodeChecker log -b 'vcpkg install %s' -o \"%s\"" \
% (project["package"], json_path)
failed, _, _ = run_command(cmd, True, project_dir)
return not failed
if project["package_type"] == "conan":
run_command("conan install %s" % project["package"], True, project_dir)
cmd = "CodeChecker log -b 'conan install %s --build' -o \"%s\"" \
% (project["package"], json_path)
failed, _, _ = run_command(cmd, True, project_dir)
return not failed
logging.info("[%s] Unsupported package.", project['name'])
return False
def check_project(project: dict, project_dir: str, config: dict,
num_jobs: int) -> None:
"""Analyze project and store the results with CodeChecker."""
json_path, _ = get_compilation_database(project, project_dir)
if "configurations" not in project:
project["configurations"] = config.get("configurations",
[{"name": ""}])
_, skippath = tempfile.mkstemp()
with open(skippath, 'w', encoding="utf-8", errors="ignore") \
as skipfile:
skipfile.write("\n".join(project.get("skip", [])))
for run_config in project["configurations"]:
result_dir = "cc_results"
if run_config["name"]:
result_dir += "_" + run_config["name"]
result_path = os.path.join(project_dir, result_dir)
run_config["result_path"] = result_path
args_file, filename = tempfile.mkstemp(text=True)
with open(args_file, 'w') as args:
if run_config.get("coverage", False):
coverage_dir = os.path.join(result_path, "coverage")
run_config["coverage_dir"] = coverage_dir
args.write(" -Xclang -analyzer-config "
"-Xclang record-coverage=%s " % coverage_dir)
conf_sources = [config["CodeChecker"], project, run_config]
args.write(collect_args("clang_sa_args", conf_sources))
tag = project.get("tag")
name = project["name"]
if tag:
name += "_" + tag
if run_config["name"]:
name += "_" + run_config["name"]
run_config["full_name"] = name
logging.info("[%s] Analyzing project... ", name)
env = None
if "clang_path" in run_config:
env = update_path(run_config["clang_path"])
_, version_string, _ = run_command("clang --version", env=env)
run_config["analyzer_version"] = version_string
analyzers = config["CodeChecker"].get("analyzers", "clangsa")
cmd = ("CodeChecker analyze '%s' -j%d -o '%s' -q " +
"--analyzers %s --capture-analysis-output") \
% (json_path, num_jobs, result_path, analyzers)
cmd += " --saargs %s " % filename
cmd += " --skip %s " % skippath
cmd += collect_args("analyze_args", conf_sources)
run_command(cmd, print_error=True, env=env)
logging.info("[%s] Done. Storing results...", name)
cmd = "CodeChecker store '%s' --url '%s' -n %s " \
% (result_path, config["CodeChecker"]["url"], name)
if tag:
cmd += " --tag %s " % tag
cmd += collect_args("store_args", conf_sources)
run_command(cmd, print_error=True, env=env)
logging.info("[%s] Results stored.", name)
os.remove(skippath)
class RegexStat:
def __init__(self, regex: str):
self.regex = re.compile(regex)
self.counter = Counter()
def process_success(path: str, statistics: Optional[dict] = None) -> dict:
if statistics is None:
statistics = dict()
statistics.update({
"warnings": RegexStat(r'warning: (.+)')
})
if not os.path.exists(path):
return statistics
for name in os.listdir(path):
if not name.endswith(".txt"):
continue
with open(os.path.join(path, name), encoding="utf-8",
errors="ignore") as compiler_output:
for line in compiler_output:
for _, stat in statistics.items():
match = stat.regex.search(line)
if match:
stat.counter[match.group(1)] += 1
return statistics
def process_failures(path: str, statistics: Optional[dict] = None) \
-> Tuple[int, dict]:
if statistics is None:
statistics = dict()
statistics.update({
"warnings": RegexStat(r'warning: (.+)'),
"compilation errors": RegexStat(r'error: (.+)'),
"assertions": RegexStat(r'(Assertion.+failed\.)'),
"unreachable": RegexStat(r'UNREACHABLE executed at (.+)')
})
if not os.path.exists(path):
return 0, statistics
failures = 0
for name in os.listdir(path):
if not name.endswith(".zip"):
continue
failures += 1
full_path = os.path.join(path, name)
with zipfile.ZipFile(full_path) as archive, \
archive.open("stderr") as stderr:
for line in TextIOWrapper(stderr, 'utf-8'):
for _, stat in statistics.items():
match = stat.regex.search(line)
if match:
stat.counter[match.group(1)] += 1
return failures, statistics
def create_link(url: str, text: str) -> str:
return '<a href="%s">%s</a>' % (url, text)
def post_process_project(project: dict, project_dir: str, config: dict,
printer: HTMLPrinter) -> int:
_, stdout, _ = run_command(
"CodeChecker cmd runs --url %s -o json" % config['CodeChecker']['url'])
runs = json.loads(stdout)
project_stats = {}
fatal_errors = 0
for run_config in project["configurations"]:
cov_result_html = None
if run_config.get("coverage", False) and \
os.path.isdir(run_config["coverage_dir"]):
cov_result_path = os.path.join(
run_config["result_path"], "coverage_merged")
try:
run_command("MergeCoverage.py -i '%s' -o '%s'" %
(run_config["coverage_dir"], cov_result_path))
except OSError:
logging.warning("MergeCoverage.py is not found in path.")
cov_result_html = os.path.join(
run_config["result_path"], "coverage.html")
try:
run_command(
"gcovr -k -g '%s' --html --html-details -r '%s' -o '%s'" %
(cov_result_path, project_dir, cov_result_html))
except OSError:
logging.warning("gcovr is not found in path.")
cov_summary = summarize_gcov(cov_result_path)
cov_summary_path = os.path.join(
run_config["result_path"], "coverage.txt")
with open(cov_summary_path, "w", encoding="utf-8",
errors="ignore") as cov_file:
cov_file.write(json.dumps(cov_summary, indent=2))
stats_dir = os.path.join(run_config["result_path"], "success")
failed_dir = os.path.join(run_config["result_path"], "failed")
# Statistics from the Analyzer engine (if enabled).
stats = summ_stats(stats_dir, False)
# Additional statistics.
stats["Analyzer version"] = run_config["analyzer_version"]
if cov_result_html:
stats["Detailed coverage link"] = create_link(
cov_result_html, "coverage")
stats["Coverage"] = cov_summary["overall"]["coverage"]
for run in runs:
if run_config['full_name'] in run:
run = run[run_config['full_name']]
break
stats["Result count"] = run["resultCount"]
stats["Duration"] = timedelta(seconds=run["duration"])
stats["CodeChecker link"] = \
create_link("%s/#run=%s&tab=%s" % (config['CodeChecker']['url'],
run_config['full_name'],
run_config['full_name']),
"CodeChecker")
stats["Successfully analyzed"] = \
len([name for name in os.listdir(run_config["result_path"])
if name.endswith(".plist")])
success_stats = process_success(stats_dir)
failure_num, failure_stats = process_failures(failed_dir)
failure_stats["warnings"].counter += success_stats["warnings"].counter
stats["Failed to analyze"] = failure_num
for name, stat in failure_stats.items():
stats["Number of %s" % name] = sum(stat.counter.values())
if stats["Number of %s" % name] > 0:
top = ["%s [%d]" % x for x in stat.counter.most_common(5)]
stats["Top %s" % name] = "<br>\n".join(top)
fatal_errors += sum(failure_stats["assertions"].counter.values()) + \
sum(failure_stats["unreachable"].counter.values())
stats["Lines of code"] = project.get("LOC", '?')
disk_usage = 0
for path, _, files in os.walk(run_config['result_path']):
for f in files:
disk_usage += os.path.getsize(os.path.join(path, f))
stats["Disk usage"] = disk_usage
project_stats[run_config["name"]] = stats
printer.extend_with_project(project["name"], project_stats)
logging.info("[%s] Postprocessed.", project['name'])
return fatal_errors
def main():
logging.basicConfig(format='%(asctime)s (%(levelname)s) %(message)s',
datefmt='%H:%M:%S', level=logging.INFO)
parser = ap.ArgumentParser(description="Run differential analysis "
"experiment on a set of projects.",
formatter_class=ap.RawTextHelpFormatter)
parser.add_argument("--config", metavar="FILE",
default='test_config.json',
help="JSON file holding a list of projects")
parser.add_argument("-j", "--jobs", metavar="JOBS", type=int,
default=multiprocessing.cpu_count(),
help="number of jobs")
parser.add_argument("--fail-on-assert", dest='fail_on_assert',
action='store_true',
help="Return with non-zero error-code "
"when Clang asserts")
parser.add_argument("-o", "--output", metavar="RESULT_DIR",
dest='output', default='projects',
help="Directory where results should be generated")
args = parser.parse_args()
try:
_, cc_ver, _ = run_command("CodeChecker version")
except OSError:
logging.error("CodeChecker is not available as a command.")
sys.exit(1)
if args.jobs < 1:
logging.error("Invalid number of jobs.")
logging.info("Using configuration file '%s'.", args.config)
config = load_config(args.config)
config["CodeChecker version"] = cc_ver
script_dir = os.path.dirname(os.path.realpath(__file__))
_, out, _ = run_command("git rev-parse HEAD", False, cwd=script_dir)
config["Script version"] = out
config["Script args"] = " ".join(sys.argv)
logging.info("Number of projects to process: %d.\n",
len(config['projects']))
projects_root = os.path.abspath(args.output)
make_dir(projects_root)
stats_html = os.path.join(projects_root, "stats.html")
with HTMLPrinter(stats_html, config) as printer:
for project in config['projects']:
project_dir = os.path.join(projects_root, project['name'])
source_dir = os.path.join(project_dir,
project.get('source_dir', ''))
package = project.get('package')
if package:
build_package(project, project_dir, args.jobs)
else:
if not clone_project(project, project_dir, source_dir):
try:
shutil.rmtree(project_dir)
except:
pass
continue
if not log_project(project, source_dir, args.jobs):
continue
check_project(project, source_dir, config, args.jobs)
fatal_errors = post_process_project(project, source_dir, config,
printer)
if fatal_errors > 0 and args.fail_on_assert:
logging.error('Stopping after assertion failure.')
sys.exit(1)
logged_projects = check_logged(projects_root, config['projects'])
logging.info("\nNumber of analyzed projects: %d / %d\n"
"Results can be viewed at '%s'.\n"
"Stats can be viewed at 'file://%s'.",
logged_projects, len(config['projects']),
config['CodeChecker']['url'], stats_html)
if __name__ == '__main__':
main()
|
from copy import deepcopy
import torch
from torch import nn
from rltorch.agent import BaseAgent
class SacDiscreteAgent(BaseAgent):
def __init__(self):
super(SacDiscreteAgent, self).__init__()
self.writer = None
self.gamma_n = None
self.alpha = None
self.tau = None
self.start_steps = None
self.steps = None
self.policy = nn.Sequential()
self.critic = nn.Sequential()
self.critic_target = nn.Sequential()
def act(self, state):
if self.start_steps > self.steps:
action = self.env.action_space.sample()
else:
action = self.explore(state)
return action
def explore(self, state):
state = \
torch.ByteTensor(state[None, ...]).to(self.device).float() / 255.0
with torch.no_grad():
action, _, _, _ = self.policy.sample(state)
return action.item()
def exploit(self, state):
state = \
torch.ByteTensor(state[None, ...]).to(self.device).float() / 255.0
with torch.no_grad():
_, _, _, action = self.policy.sample(state)
return action.item()
def calc_current_q(self, states, actions, rewards, next_states, dones):
curr_q1, curr_q2 = self.critic(states)
curr_q1 = curr_q1.gather(1, actions.long())
curr_q2 = curr_q2.gather(1, actions.long())
return curr_q1, curr_q2
def calc_target_q(self, states, actions, rewards, next_states, dones):
with torch.no_grad():
next_actions, next_action_probs, log_next_action_probs, _ =\
self.policy.sample(next_states)
next_q1, next_q2 = self.critic_target(next_states)
next_q = torch.min(next_q1, next_q2)
next_q = next_action_probs * (
next_q - self.alpha * log_next_action_probs)
next_q = next_q.mean(dim=1).unsqueeze(-1)
target_q = rewards + (1.0 - dones) * self.gamma_n * next_q
return target_q
def load_weights(self):
try:
self.policy.load_state_dict(self.shared_weights['policy'])
self.critic.load_state_dict(self.shared_weights['critic'])
self.critic_target.load_state_dict(
self.shared_weights['critic_target'])
self.alpha = torch.tensor(
self.shared_weights['alpha'], device=self.device)
return True
except KeyError:
return False
def save_weights(self):
self.shared_weights['policy'] = deepcopy(
self.policy).cpu().state_dict()
self.shared_weights['critic'] = deepcopy(
self.critic).cpu().state_dict()
self.shared_weights['critic_target'] = deepcopy(
self.critic_target).cpu().state_dict()
self.shared_weights['alpha'] = self.alpha.clone().detach().item()
def __del__(self):
self.writer.close()
self.env.close()
|
import random
import pygame
from pygame.locals import *
import sys
import Grandmas_Game_Closet as Main
RED = (255, 0, 0)
GREEN = (0, 255, 0)
DARKGREEN = (20, 100, 20)
BLUE = (0, 0, 255)
PURPLE = (255, 0, 255)
YELLOW = (255, 255, 0)
GREY = (100, 100, 100)
WHITE = (255, 255, 255)
NAVY = (60, 60, 100)
DARKGREY = (30, 30, 30)
BLACK = (0, 0, 0)
TAN = (222, 184, 135)
ORANGE = (255, 128, 0)
LIGHTSKIN = (249, 222, 147)
WINWIDTH = 640
WINHEIGHT = 480
IMAGES = {}
class Card(object):
"""A single card, used in a deck
attributes: suit, value, face"""
def __init__(self, suit, value, face):
"""create a card of a given suit, value and face; when
used with is_empty in a deck can be used to pre-set the deck"""
self.suit = suit
self.value = value
self.face = face
self.image = self.facelookup()
def __str__(self):
"""print string of card"""
return self.face + " of " + self.suit
def __repr__(self):
"""save string of card"""
return self.face + " of " + self.suit
def __gt__(self, other_card):
"""compare cards"""
try:
return self.value > other_card.value
except AttributeError:
return False
def __lt__(self, other_card):
"""compare cards"""
try:
return self.value < other_card.value
except AttributeError:
return False
def __eq__(self, other_card):
"""compare cards; if other not a card, cannot be equal"""
try:
return self.value == other_card.value
except AttributeError:
return False
def facelookup(self):
"""added for pygame integration, added card .png to object"""
try:
return IMAGES[self.face + self.suit]
except KeyError:
return IMAGES[self.face[0] + self.suit]
class Deck(object):
"""a deck of cards, also containing the dealt hands of the deck
attributes: deck, hands"""
def __init__(self, num_players=0, is_shuffled=False, is_empty=False):
"""num_players determines the number of hands to deal.
zero default creates central card pool of 52 cards, 1 creates
a single player deck of 52if is_shuffled is set to True, the saved
deck will be already shuffled,if set to false, the deck will have
to be shuffled manually if is_empty is set to True, an empty deck
class will be created"""
suits = ['Hearts', 'Diamonds', 'Spades', 'Clubs']
faces = ['Ace', '2', '3', '4', '5', '6', '7', '8', '9',
'10', 'Jack', 'Queen', 'King']
values = [14, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
self.deck = []
self.hands = []
if not is_empty:
for i in range(13):
face = faces[i]
value = values[i]
for suit in suits:
self.deck.append(Card(suit, value, face))
if is_shuffled:
self.shuffle()
if num_players > 1:
self.deal(num_players)
def __repr__(self):
"""save string of deck"""
return self.deck
def __str__(self):
"""print string of deck"""
return str(self.deck)
def __len__(self):
"""return length of deck"""
return len(self.deck)
def __getitem__(self, index):
"""make deck indexable"""
return self.deck[index]
def __iter__(self, index):
"""make deck iterable"""
return self.deck[index]
def shuffle(self):
"""shuffle self.deck"""
return random.shuffle(self.deck)
def deal(self, decks):
"""deal deck to player hands"""
last_dealt = 0
for a_player in range(decks):
self.hands.append(Deck(is_empty=True))
while len(self.deck) >= len(self.hands):
for a_deck in range(len(self.hands)):
self.hands[a_deck].deck.append(self.deck.pop(0))
last_dealt = a_deck
for a_card in range(len(self.deck)):
self.hands[last_dealt].deck.append(self.deck.pop(0))
last_dealt += 1
def card(self):
"""return to top card of the deck"""
return self.deck[0]
def return_hands(self):
"""return the hands in the deck"""
return self.hands
def give_card(self, otherdeck):
"""give card from self deck to another deck, used for decks
in self.hands"""
otherdeck.deck.append(self.deck.pop(0))
def move_card(self):
"""move top card in deck to the bottom,
used for decks in self.hands"""
self.deck.append(self.deck.pop(0))
def loadimages():
"""look up the objects image"""
global IMAGES
suits = ("Hearts", "Diamonds", "Spades", "Clubs")
faces = ("2", "3", "4", "5", "6", "7", "8", "9", "10",
"J", "Q", "K", "A")
for suit in suits:
for face in faces:
image = "/" + face + "_of_" + suit.lower() + ".png"
IMAGES[face + suit] = pygame.image.load('cards' + image)
def reset_war_globals(screen):
"""reset all the global variables used"""
global FPS, fps_clock
FPS = 60
bg_color = DARKGREEN
fps_clock = pygame.time.Clock()
screen.fill(bg_color)
loadimages()
def play_war(player, computer, screen, stop):
"""main game logic"""
card_index = -2
screen.blit(pygame.transform.scale(player.card().image, CARDSIZE),
PLAYEDCARD_P)
screen.blit(pygame.transform.scale(computer.card().image, CARDSIZE),
PLAYEDCARD_C)
if player.card() == computer.card():
declare_war(player, computer, card_index + 3, screen)
stop = True
elif player.card() > computer.card():
player.move_card()
computer.give_card(player)
else:
computer.move_card()
player.give_card(computer)
return stop
def clear_screen(player, computer, screen):
"""clear played war cards from table"""
if WIN:
deck = player
cov1 = COVERED_CARD1_P
cov2 = COVERED_CARD1_C
cov3 = COVERED_CARD2_P
cov4 = COVERED_CARD2_C
cov5 = WAR_PLAYED_P
cov6 = WAR_PLAYED_C
else:
deck = computer
cov1 = COVERED_CARD1_C
cov2 = COVERED_CARD1_P
cov3 = COVERED_CARD2_C
cov4 = COVERED_CARD2_P
cov5 = WAR_PLAYED_C
cov6 = WAR_PLAYED_P
screen.blit(pygame.transform.scale(deck.deck[-1].image, CARDSIZE),
cov5)
screen.blit(pygame.transform.scale(deck.deck[-2].image, CARDSIZE),
cov6)
screen.blit(pygame.transform.scale(deck.deck[-3].image, CARDSIZE),
cov3)
screen.blit(pygame.transform.scale(deck.deck[-4].image, CARDSIZE),
cov4)
screen.blit(pygame.transform.scale(deck.deck[-5].image, CARDSIZE),
cov1)
screen.blit(pygame.transform.scale(deck.deck[-6].image, CARDSIZE),
cov2)
def render_screen(player, computer, plen, clen, screen):
"""update the screen"""
global game_over, clear_text_p, clear_text_c
player_count = CARD_FONT.render(
"Player's Deck: {}".format(len(player)), True, BLACK,
DARKGREEN)
computer_count = CARD_FONT.render(
"Computer's Deck: {}".format(len(computer)),
True, BLACK, DARKGREEN)
if plen == 0:
clear_text_p = player_count.get_rect()
clear_text_p.center = (130, 415)
plen = 10
if clen == 0:
clear_text_c = computer_count.get_rect()
clear_text_c.center = (500, 45)
clen = 10
pygame.draw.rect(screen, DARKGREEN, clear_text_p)
pygame.draw.rect(screen, DARKGREEN, clear_text_c)
player_rect = player_count.get_rect()
player_rect.center = (130, 415)
computer_rect = computer_count.get_rect()
computer_rect.center = (500, 45)
pygame.draw.rect(screen, DARKGREEN, computer_rect)
screen.blit(player_count, player_rect)
screen.blit(computer_count, computer_rect)
if not player or not computer:
game_over = True
return plen, clen
def deal(screen):
"""deal cards to screen"""
screen.blit(play_text, play_rect)
screen.blit(CARDBACK, PLAY_DECK)
screen.blit(CARDBACK, COMP_DECK)
def war(screen, ind):
"""play card game: War"""
global CARD_FONT
global reveal_rect
global reveal_text
global play_rect, play_text
global pause
global game_over
pygame.init()
CARD_FONT = pygame.font.Font('freesansbold.ttf', 25)
game_deck = Deck(2, True)
hands = game_deck.return_hands()
player_deck = hands[0]
computer_deck = hands[1]
plen = 0
clen = 0
menu_rect, instr_rect = Main.menu_bar()
reveal_text = CARD_FONT.render("Reveal Cards", True, WHITE, DARKGREEN)
reveal_rect = reveal_text.get_rect()
reveal_rect.center = (140, 50)
play_text = CARD_FONT.render("Play", True, WHITE, DARKGREEN)
play_rect = play_text.get_rect()
play_rect.center = (500, 265)
declared_war = False
game_over = False
pause = False
deal(screen)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP and \
menu_rect.collidepoint(event.pos):
Main.menu()
elif event.type == MOUSEBUTTONUP and \
instr_rect.collidepoint(event.pos):
instructions()
reset_war_globals(screen)
deal(screen)
Main.menu_bar()
if not game_over:
if declared_war:
pygame.time.wait(2000)
pygame.draw.rect(screen, DARKGREEN, COVERED_CARD2_P)
pygame.draw.rect(screen, DARKGREEN, COVERED_CARD1_P)
pygame.draw.rect(screen, DARKGREEN, COVERED_CARD2_C)
pygame.draw.rect(screen, DARKGREEN, COVERED_CARD1_C)
pygame.draw.rect(screen, DARKGREEN, WAR_PLAYED_C)
pygame.draw.rect(screen, DARKGREEN, WAR_PLAYED_P)
pygame.draw.rect(screen, DARKGREEN, PLAYEDCARD_P)
pygame.draw.rect(screen, DARKGREEN, PLAYEDCARD_C)
pygame.draw.rect(screen, DARKGREEN, reveal_rect)
screen.blit(play_text, play_rect)
declared_war = False
pause = False
declared_war, pause, player_deck, \
computer_deck = next_round(screen, event,
declared_war, pause,
player_deck,
computer_deck)
plen, clen = render_screen(player_deck, computer_deck,
plen, clen, screen)
else:
has_won = end_game(computer_deck, screen)
if event.type == KEYDOWN and event.key == K_y:
Main.resetglobals()
reset_war_globals(screen)
new_game(screen)
elif event.type == KEYDOWN and event.key == K_n:
Main.menu()
if not ind:
return has_won
pygame.display.flip()
def next_round(screen, event, declared_war, stop,
player_deck, computer_deck):
"""screen controls"""
if event.type == MOUSEBUTTONUP:
if play_rect.collidepoint(event.pos) and not stop:
stop = play_war(player_deck, computer_deck, screen, stop)
elif reveal_rect.collidepoint(event.pos):
clear_screen(player_deck, computer_deck, screen)
declared_war = True
stop = False
elif event.type == KEYDOWN:
if event.key == K_RETURN and not stop:
stop = play_war(player_deck, computer_deck, screen, stop)
elif event.key == K_r or event.key == K_RETURN:
clear_screen(player_deck, computer_deck, screen)
declared_war = True
stop = False
return declared_war, stop, player_deck, computer_deck
def end_game(computer, screen):
"""end the game"""
if computer:
colors = RED
win_text = CARD_FONT.render(
"You played to the end, but lost. Too bad", True, RED, BLACK)
pygame.draw.rect(screen, DARKGREEN, PLAY_DECK + CARDSIZE)
not_won = True
else:
colors = GREEN
win_text = CARD_FONT.render("You played to the end, and won!. "
"Lucky!", True, GREEN, BLACK)
pygame.draw.rect(screen, DARKGREEN, COMP_DECK + CARDSIZE)
not_won = False
win_rect = win_text.get_rect()
win_rect.center = (330, 240)
screen.blit(win_text, win_rect)
win_text = CARD_FONT.render("Play Again? 'Y' or 'N'", True,
colors, BLACK)
win_rect = win_text.get_rect()
win_rect.center = (330, 290)
screen.blit(win_text, win_rect)
return not_won
def determine_card(deck, index):
"""in war, if deck is less than 3 cards, determine which card to use"""
if len(deck) - 1 >= index + 2:
return deck[index + 2]
elif len(deck) - 1 >= index + 1:
return deck[index + 1]
else:
try:
return deck[index]
except IndexError:
return None
def declare_war(deck1, deck2, index, screen):
"""a single declaration of war; when players card == computers card"""
global reveal_text, reveal_rect, play_rect, game_over, WIN
deck1_playedcard = determine_card(deck1, index)
deck2_playedcard = determine_card(deck2, index)
if deck1_playedcard is None:
game_over = True
WIN = False
return None
elif deck2_playedcard is None:
game_over = True
WIN = True
return None
pygame.draw.rect(screen, DARKGREEN, play_rect)
screen.blit(CARDBACK, COVERED_CARD1_P)
screen.blit(CARDBACK, COVERED_CARD1_C)
screen.blit(CARDBACK, COVERED_CARD2_P)
screen.blit(CARDBACK, COVERED_CARD2_C)
screen.blit(pygame.transform.scale(deck1_playedcard.image,
CARDSIZE), WAR_PLAYED_P)
screen.blit(pygame.transform.scale(deck2_playedcard.image,
CARDSIZE), WAR_PLAYED_C)
if deck1_playedcard > deck2_playedcard:
if len(deck2) <= 3:
game_over = True
for a_card in range(index + 3):
try:
deck1.move_card()
deck2.give_card(deck1)
WIN = True
except IndexError:
pass
elif deck1_playedcard < deck2_playedcard:
if len(deck1) <= 3:
game_over = True
for a_card in range(index + 3):
try:
deck2.move_card()
deck1.give_card(deck2)
WIN = False
except IndexError:
pass
else:
declare_war(deck1, deck2, index + 3, screen)
screen.blit(reveal_text, reveal_rect)
def instructions():
"""text for instructions box, and call function that adds it to
the screen"""
inst = ("Instructions:", "1: Click 'PLAY' or press Enter to play"
"a card", "2: If you have the higher value card, "
"you win both cards", "3: Aces are high", "__",
"4: If the value of the cards are equal, war is declared",
"5: Two cards are dealt face down, and a single car "
"face up. The player with the high valued face up card"
"wins all eight cards.", "6: Click 'REVEAL CARDS' or "
"press R to reveal cards and continue play", "__", "__",
"PRESS 'Q' TO RETURN TO GAME")
Main.blit_instr(inst)
def new_game(screen, independent=True):
"""start a new game"""
global CARDSIZE, CARDBACK, PLAY_DECK, COMP_DECK
global PLAYEDCARD_C, PLAYEDCARD_P, COVERED_CARD1_P
global COVERED_CARD1_C, COVERED_CARD2_P, COVERED_CARD2_C
global WAR_PLAYED_P, WAR_PLAYED_C
pygame.init()
Main.resetglobals()
reset_war_globals(screen)
# pygame defaults
CARDSIZE = (116, 162)
CARDBACK = pygame.image.load('cards/cardback.png')
PLAY_DECK = (50, 240)
COMP_DECK = (460, 70)
PLAYEDCARD_C = pygame.draw.rect(screen, DARKGREEN,
(260, 70, 116, 162))
PLAYEDCARD_P = pygame.draw.rect(screen, DARKGREEN,
(260, 240, 116, 162))
COVERED_CARD1_P = pygame.draw.rect(screen, DARKGREEN,
(285, 240, 116, 162))
COVERED_CARD1_C = pygame.draw.rect(screen, DARKGREEN,
(240, 70, 116, 162))
COVERED_CARD2_P = pygame.draw.rect(screen, DARKGREEN,
(305, 240, 116, 162))
COVERED_CARD2_C = pygame.draw.rect(screen, DARKGREEN,
(220, 70, 116, 162))
WAR_PLAYED_P = pygame.draw.rect(screen, DARKGREEN,
(325, 240, 116, 162))
WAR_PLAYED_C = pygame.draw.rect(screen, DARKGREEN,
(200, 70, 116, 162))
pygame.display.flip()
# play war!
has_won = war(screen, independent)
return has_won
|
import hashlib
import hmac
import json
import logging
from metagov.core.plugin_manager import Registry, Parameters, VotingStandard
import metagov.plugins.discourse.schemas as Schemas
import requests
from metagov.core.errors import PluginErrorInternal
from metagov.core.models import GovernanceProcess, Plugin, AuthType, ProcessStatus
logger = logging.getLogger(__name__)
EVENT_POST_CREATED = "post_created"
EVENT_TOPIC_CREATED = "topic_created"
EVENT_USER_FIELDS_CHANGED = "user_fields_changed"
"""
TODO: add actions and events for "user actions":
LIKE = 1
WAS_LIKED = 2
BOOKMARK = 3
NEW_TOPIC = 4
REPLY = 5
RESPONSE= 6
MENTION = 7
QUOTE = 9
EDIT = 11
NEW_PRIVATE_MESSAGE = 12
GOT_PRIVATE_MESSAGE = 13
"""
@Registry.plugin
class Discourse(Plugin):
name = "discourse"
auth_type = AuthType.API_KEY
config_schema = {
"type": "object",
"additionalProperties": False,
"properties": {
"api_key": {
"type": "string",
"description": "Discourse API key for a bot user that is an admin. Actions will be taken on behalf of this user.",
},
"server_url": {"type": "string", "description": "URL of the Discourse server"},
"webhook_secret": {
"type": "string",
"description": "A random string. When creating the Metagov webhook in Discourse, enter this string under 'secret.'",
},
"webhook_slug": {"type": "string"},
},
"required": ["api_key", "server_url", "webhook_secret"],
}
class Meta:
proxy = True
def initialize(self):
resp = requests.get(f"{self.config['server_url']}/about.json")
response = resp.json()
community_name = response.get("about").get("title")
logger.info(f"Initialized Discourse plugin for community {community_name}")
self.state.set("community_name", community_name)
self.store_user_list()
def construct_post_url(self, post):
return f"{self.config['server_url']}/t/{post['topic_slug']}/{post['topic_id']}/{post['post_number']}"
def construct_topic_url(self, topic):
return f"{self.config['server_url']}/t/{topic['slug']}/{topic['id']}"
def construct_post_response(self, post):
return {"url": self.construct_post_url(post), "topic_id": post["topic_id"], "post_id": post["id"]}
def discourse_request(self, method, route, json=None, data=None):
url = f"{self.config['server_url']}/{route}"
logger.info(f"{method} {url}")
headers = {"Api-Key": self.config["api_key"]}
resp = requests.request(method, url, headers=headers, json=json, data=data)
if not resp.ok:
logger.error(f"{resp.status_code} {resp.reason}")
logger.error(resp.request.body)
raise PluginErrorInternal(resp.text)
if resp.content:
return resp.json()
return None
@Registry.action(
slug="create-message",
description="Start a new private message thread",
input_schema=Schemas.send_message_parameters,
output_schema=Schemas.create_post_or_topic_response,
)
def create_message(self, parameters):
parameters["target_recipients"] = ",".join(parameters.pop("target_usernames"))
if parameters.get("topic_id"):
parameters["archetype"] = "regular"
else:
parameters["archetype"] = "private_message"
post = self.discourse_request("POST", "posts.json", json=parameters)
return self.construct_post_response(post)
@Registry.action(
slug="create-post",
description="Create a new post",
input_schema=Schemas.create_post_parameters,
output_schema=Schemas.create_post_or_topic_response,
)
def create_post(self, parameters):
post = self.discourse_request("POST", "posts.json", json=parameters)
return self.construct_post_response(post)
@Registry.action(
slug="create-topic",
description="Create a new topic",
input_schema=Schemas.create_topic_parameters,
output_schema=Schemas.create_post_or_topic_response,
)
def create_topic(self, parameters):
post = self.discourse_request("POST", "posts.json", json=parameters)
return self.construct_post_response(post)
@Registry.action(
slug="delete-post",
description="Delete a post",
input_schema=Schemas.delete_post_or_topic_parameters,
output_schema=None,
)
def delete_post(self, parameters):
self.discourse_request("DELETE", f"posts/{parameters['id']}")
return {}
@Registry.action(
slug="delete-topic",
description="Delete a topic",
input_schema=Schemas.delete_post_or_topic_parameters,
output_schema=None,
)
def delete_topic(self, parameters):
self.discourse_request("DELETE", f"t/{parameters['id']}.json")
return {}
@Registry.action(
slug="recover-post",
description="Recover a deleted post",
input_schema=Schemas.delete_post_or_topic_parameters,
output_schema=None,
)
def recover_post(self, parameters):
self.discourse_request("PUT", f"posts/{parameters['id']}/recover")
return {}
@Registry.action(
slug="recover-topic",
description="Recover a deleted topic",
input_schema=Schemas.delete_post_or_topic_parameters,
output_schema=None,
)
def recover_topic(self, parameters):
self.discourse_request("PUT", f"t/{parameters['id']}/recover")
return {}
@Registry.action(
slug="lock-post",
description="Lock or unlock a post on discourse",
input_schema=Schemas.lock_post_parameters,
output_schema=Schemas.lock_post_response,
)
def lock_post(self, parameters):
post_id = parameters["id"]
data = {"locked": json.dumps(parameters["locked"])}
return self.discourse_request("PUT", f"posts/{post_id}/locked", data=data)
def validate_request_signature(self, request):
event_signature = request.headers.get("X-Discourse-Event-Signature")
if not event_signature:
raise PluginErrorInternal("Missing event signature")
key = bytes(self.config["webhook_secret"], "utf-8")
string_signature = hmac.new(key, request.body, hashlib.sha256).hexdigest()
expected_signature = f"sha256={string_signature}"
if not hmac.compare_digest(event_signature, expected_signature):
raise PluginErrorInternal("Invalid signature header")
instance = request.headers["X-Discourse-Instance"]
if instance != self.config["server_url"]:
raise PluginErrorInternal("Unexpected X-Discourse-Instance")
def store_user_list(self):
# TODO paginate request
response = self.discourse_request("GET", f"admin/users/list/active.json")
logger.info(f"Fetching {len(response)} users...")
users = {}
for user in response:
id = str(user["id"])
users[id] = self.discourse_request("GET", f"admin/users/{id}.json")
self.state.set("users", users)
logger.info(f"Saved {len(response)} users in state.")
@Registry.webhook_receiver(
event_schemas=[
{"type": EVENT_POST_CREATED, "schema": Schemas.post_topic_created_event},
{"type": EVENT_TOPIC_CREATED, "schema": Schemas.post_topic_created_event},
{"type": EVENT_USER_FIELDS_CHANGED},
]
)
def process_discourse_webhook(self, request):
self.validate_request_signature(request)
event = request.headers.get("X-Discourse-Event")
body = json.loads(request.body)
logger.info(f"Received event '{event}' from Discourse")
if event == "post_created":
post = body.get("post")
data = {
"raw": post["raw"],
"topic_id": post["topic_id"],
"id": post["id"],
"url": self.construct_post_url(post),
}
initiator = {"user_id": post["username"], "provider": "discourse"}
self.send_event_to_driver(event_type=EVENT_POST_CREATED, initiator=initiator, data=data)
elif event == "topic_created":
topic = body.get("topic")
data = {
"title": topic["title"],
"id": topic["id"],
"tags": topic["tags"],
"category": topic["category_id"],
"url": self.construct_topic_url(topic),
}
initiator = {"user_id": topic["created_by"]["username"], "provider": "discourse"}
self.send_event_to_driver(event_type=EVENT_TOPIC_CREATED, initiator=initiator, data=data)
elif event == "user_updated":
updated_user = body.get("user")
# Get the old user record from state
user_map = self.state.get("users")
user_id = str(updated_user["id"])
old_user = user_map.get(user_id)
# Update state so that we have the latest user map
user_map[user_id] = updated_user
self.state.set("users", user_map)
# if `user_fields` changed, send an event to the Driver
if not old_user or old_user["user_fields"] != updated_user["user_fields"]:
data = {
"id": updated_user["id"],
"username": updated_user["username"],
"user_fields": updated_user["user_fields"],
"old_user_fields": old_user["user_fields"] if old_user else None,
}
initiator = {"user_id": updated_user["username"], "provider": "discourse"}
self.send_event_to_driver(event_type=EVENT_USER_FIELDS_CHANGED, initiator=initiator, data=data)
"""
GOVERNANCE PROCESSES
"""
@Registry.governance_process
class DiscoursePoll(GovernanceProcess):
name = "poll"
plugin_name = "discourse"
input_schema = VotingStandard.create_input_schema(
include=["title", "options", "details", "closing_at"],
extra_properties={
"topic_id": {"type": "integer", "description": "required if creating the poll as a new post."},
"category": {
"type": "integer",
"description": "optional if creating the poll as a new topic, and ignored if creating it as a new post.",
},
"poll_type": {"type": "string", "enum": ["regular", "multiple", "number"], "default": "regular"},
"public": {"type": "boolean", "description": "whether votes are public"},
"results": {
"type": "string",
"enum": ["always", "on_vote", "on_close", "staff_only"],
"description": "when to show results",
},
"min": {
"type": "integer",
"description": "Must be at least 1. For 'number' poll type, this is the minimum number. For 'multiple' poll type, this is the minumum number of options that a user can vote for. For 'regular' poll type, this option is ignored.",
},
"max": {
"type": "integer",
"description": "Must be at least 1, but less than or equal with the number of options. For 'number' poll type, this is the maximum number. For 'multiple' poll type, this is the maximum number of options that a user can vote for. For 'regular' poll type, this option is ignored.",
},
"step": {
"type": "integer",
"description": "For 'number' poll type, the step in between numbers. Ignored for other poll types. The minimum step value is 1.",
},
"chart_type": {"type": "string", "enum": ["pie", "bar"]},
"groups": {"type": "array", "items": {"type": "string"}},
},
required=["title"],
)
class Meta:
proxy = True
def start(self, parameters: Parameters) -> None:
discourse_server_url = self.plugin_inst.config["server_url"]
url = f"{discourse_server_url}/posts.json"
poll_type = parameters.poll_type
if poll_type != "number" and not parameters.options:
raise PluginErrorInternal(f"Options are required for poll type {poll_type}")
optional_params = []
if parameters.closing_at:
optional_params.append(f"close={parameters.closing_at}")
if parameters.groups:
optional_params.append(f"groups={','.join(parameters.groups)}")
if parameters.public is True:
optional_params.append("public=true")
if parameters.chart_type:
optional_params.append(f"chartType={parameters.chart_type}")
for p in ["min", "max", "step", "results"]:
if getattr(parameters, p):
optional_params.append(f"{p}={getattr(parameters, p)}")
options = "".join([f"* {opt}\n" for opt in parameters.options]) if poll_type != "number" else ""
raw = f"""
{parameters.details or ""}
[poll type={poll_type} {' '.join(optional_params)}]
# {parameters.title}
{options}
[/poll]
"""
payload = {"raw": raw, "title": parameters.title}
if parameters.category is not None:
payload["category"] = parameters.category
if parameters.topic_id is not None:
payload["topic_id"] = parameters.topic_id
logger.info(payload)
logger.info(url)
response = self.plugin_inst.discourse_request("POST", "posts.json", json=payload)
if response.get("errors"):
errors = response["errors"]
raise PluginErrorInternal(str(errors))
poll_url = self.plugin_inst.construct_post_url(response)
logger.info(f"Poll created at {poll_url}")
logger.debug(response)
self.state.set("post_id", response.get("id"))
self.state.set("topic_id", response.get("topic_id"))
self.state.set("topic_slug", response.get("topic_slug"))
self.outcome = {"poll_url": poll_url} # this gets serialized and returned
self.status = ProcessStatus.PENDING.value
self.save()
def update(self):
"""
We make a request to Discourse EVERY time, here, so that we can catch cases where the poll was closed
manually by a user. Would be simplified if we disallow that, and instead this function could just
check if `closing_at` has happened yet (if set) and call close() if it has.
"""
post_id = self.state.get("post_id")
if post_id is None:
raise PluginErrorInternal(f"Missing post ID, can't update {self}")
response = self.plugin_inst.discourse_request("GET", f"posts/{post_id}.json")
poll = response["polls"][0]
self.update_outcome_from_discourse_poll(poll)
def close(self):
"""
Invoked by the Driver to manually close the poll. This would be used in cases where `closing_at` param is not set,
or in cases where the Driver wants to close the poll early (before closing_at time).
"""
post_id = self.state.get("post_id")
data = {"post_id": post_id, "poll_name": "poll", "status": "closed"}
response = self.plugin_inst.discourse_request("PUT", "polls/toggle_status", data=data)
poll = response["poll"]
self.update_outcome_from_discourse_poll(poll)
# Lock the post
# self.plugin_inst.lock_post({"locked": True, "id": post_id})
def update_outcome_from_discourse_poll(self, poll):
"""Save changes to outcome and state if changed"""
dirty = False
votes = self.outcome.get("votes", {})
for opt in poll["options"]:
key = opt["html"]
if not opt.get("votes"):
# votes arent visible right now (depends on 'results' input parameters)
continue
val = opt["votes"]
if votes.get(key) != val:
votes[key] = val
dirty = True
if poll["status"] == "closed":
self.status = ProcessStatus.COMPLETED.value
dirty = True
if dirty:
logger.info(f"{self}: {self.outcome}")
self.outcome["votes"] = votes
self.save()
|
import re
import requests
def get(url: str) -> dict:
"""
title、videos
"""
data = {}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36"
}
rep = requests.get(url, headers=headers, timeout=10)
if rep.status_code != 200:
return {"msg": "获取失败"}
html = rep.text
data["title"] = re.findall(r'data-title="(.*?)"', html)[0]
data["videos"] = re.findall(r"\sfilmurl: '(.*?)',", html)
return data
if __name__ == "__main__":
url = "http://peiyinxiu.com/m/127066455"
print(get(url))
|
# Automatically created by: shub deploy
from setuptools import setup, find_packages
setup(
name = 'hubstorage-frontera',
version = '0.1',
packages = find_packages(),
install_requires = [
'frontera',
'hubstorage',
'requests',
'six',
],
)
|
import blpapi
import datetime
class BbDownloader:
def __init__(self):
self.output_file = ""
self.TICK_DATA = blpapi.Name("tickData")
self.COND_CODE = blpapi.Name("conditionCodes")
self.TICK_SIZE = blpapi.Name("size")
self.TIME = blpapi.Name("time")
self.TYPE = blpapi.Name("type")
self.VALUE = blpapi.Name("value")
self.RESPONSE_ERROR = blpapi.Name("responseError")
self.CATEGORY = blpapi.Name("category")
self.MESSAGE = blpapi.Name("message")
self.SESSION_TERMINATED = blpapi.Name("SessionTerminated")
def write_tick_data(self, output_filename, security, start_date, end_date):
self.output_file = open(output_filename, "w")
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost("localhost")
sessionOptions.setServerPort(8194)
session = blpapi.Session(sessionOptions)
# Start a Session
if not session.start():
print "Failed to start session."
return
try:
# Open service to get historical data from
if not session.openService("//blp/refdata"):
print "Failed to open //blp/refdata"
return
self.sendIntradayTickRequest(session, security, start_date, end_date)
# wait for events from session.
self.eventLoop(session)
finally:
self.output_file.flush()
session.stop()
print "Finished"
def sendIntradayTickRequest(self, session, security, start_date, end_date):
refDataService = session.getService("//blp/refdata")
request = refDataService.createRequest("IntradayTickRequest")
# only one security/eventType per request
request.set("security", security)
# Add fields to request
eventTypes = request.getElement("eventTypes")
for event in ["ASK", "BID", "TRADE"]:
eventTypes.appendValue(event)
# All times are in GMT
request.set("startDateTime", start_date)
request.set("endDateTime", end_date)
request.set("includeConditionCodes", True)
print "Sending Request:", request
session.sendRequest(request)
def eventLoop(self, session):
done = False
while not done:
# nextEvent() method below is called with a timeout to let
# the program catch Ctrl-C between arrivals of new events
event = session.nextEvent(500)
if event.eventType() == blpapi.Event.PARTIAL_RESPONSE:
self.processResponseEvent(event)
elif event.eventType() == blpapi.Event.RESPONSE:
self.processResponseEvent(event)
done = True
else:
for msg in event:
if event.eventType() == blpapi.Event.SESSION_STATUS:
if msg.messageType() == self.SESSION_TERMINATED:
done = True
def processResponseEvent(self, event):
for msg in event:
if msg.hasElement(self.RESPONSE_ERROR):
print msg.getElement(self.RESPONSE_ERROR)
continue
self.processMessage(msg)
def processMessage(self, msg):
data = msg.getElement(self.TICK_DATA).getElement(self.TICK_DATA)
for item in data.values():
time = item.getElementAsDatetime(self.TIME)
timeString = item.getElementAsString(self.TIME)
type = item.getElementAsString(self.TYPE)
value = item.getElementAsFloat(self.VALUE)
size = item.getElementAsInteger(self.TICK_SIZE)
if item.hasElement(self.COND_CODE):
cc = item.getElementAsString(self.COND_CODE)
else:
cc = ""
line = format("%s\t%s\t%.3f\t\t%d\t%s\n" % (timeString, type, value, size, cc))
self.output_file.write(line)
bbdl = BbDownloader()
bbdl.write_tick_data(output_filename="spx.txt",
security="SPX INDEX",
start_date="2013-06-24T00:00:00",
end_date="2013-06-24T23:00:00")
|
# Copyright (c) 2021 Cybereason Inc
# This code is licensed under MIT license (see LICENSE.md for details)
MT_DATABASE = 'DATABASE'
MT_DATABASE_SYSTEM = 'DATABASE_SYSTEM'
MT_INSTANCE = 'INSTANCE'
|
from .roleplay.main import * |
from .ascii_factory import num2str
def init(screen):
global width, height
height, width = screen.getmaxyx()
def draw_speedmeter(screen, state):
margin_y, margin_x = 4, 4
hud = ['▛▀▀▀▀▀▀▀▀▀▀▀▀▀▜',
'▍ ▐',
'▍ ▐',
'▍ ▐',
'▍ ▐',
'▙▃▃▃▃▃▃▃▃▃▃▃▃▃▟',
'▍ MPH ▐',
'▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀']
hud_width = len(hud[0])
speed = num2str(state['speed'])
for l, (hud_line, speed_line) in enumerate(zip(hud[1:-1], speed)):
hud[l+1] = hud_line[0] + speed_line + hud_line[-1]
x0 = width - margin_x - hud_width
y0 = margin_y
for y, line in enumerate(hud):
screen.addstr(y0+y, x0, line)
def draw_hud(screen, state):
draw_speedmeter(screen, state)
|
from flask import Flask, render_template, abort
from flask_talisman import Talisman
from csp import csp
app = Flask(__name__)
Talisman(app,
content_security_policy=csp,
content_security_policy_nonce_in=['script-src'])
SUPPORTED_LANGS = ('en', 'ja')
@app.route('/')
def index():
return render_template('en/splash.html')
@app.route('/<lang>/')
def index_i18n(lang):
if lang not in SUPPORTED_LANGS:
abort(404)
return render_template('%s/splash.html' % lang)
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
|
'# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re
import urllib
import urlparse
import json
from resources.lib.modules import client
from resources.lib.modules import cleantitle
from resources.lib.modules import directstream
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['putlockertv.to']
self.base_link = 'https://www2.putlockertv.to/'
self.movie_search_path = ('search?keyword=%s')
self.episode_search_path = ('/filter?keyword=%s&sort=post_date:Adesc'
'&type[]=series')
self.film_path = '/watch/%s'
self.info_path = '/ajax/episode/info?ts=%s&_=%s&id=%s&server=28&update=0'
self.grabber_path = '/grabber-api/?ts=%s&id=%s&token=%s&mobile=0'
self.tooltip_path = '/ajax/film/tooltip/%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
query = (self.movie_search_path % (clean_title))
url = urlparse.urljoin(self.base_link, query)
search_response = client.request(url)
results_list = client.parseDOM(
search_response, 'div', attrs={'class': 'item'})[0]
film_id = re.findall('(\/watch\/)([^\"]*)', results_list)[0][1]
query = (self.film_path % film_id)
url = urlparse.urljoin(self.base_link, query)
film_response = client.request(url)
ts = re.findall('(data-ts=\")(.*?)(\">)', film_response)[0][1]
sources_dom_list = client.parseDOM(
film_response, 'ul', attrs={'class': 'episodes range active'})
sources_list = []
for i in sources_dom_list:
source_id = re.findall('([\/])(.{0,6})(\">)', i)[0][1]
sources_list.append(source_id)
servers_dom_list = client.parseDOM(
film_response, 'div', attrs={'class': 'server row'})
servers_list = []
data = {
'imdb': imdb,
'title': title,
'localtitle': localtitle,
'year': year,
'ts': ts,
'sources': sources_list,
'id': film_id
}
url = urllib.urlencode(data)
return url
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
data = {
'imdb': imdb,
'tvdb': tvdb,
'tvshowtitle': tvshowtitle,
'year': year
}
url = urllib.urlencode(data)
return url
except Exception:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)
clean_title = cleantitle.geturl(data['tvshowtitle'])
query = (self.movie_search_path % clean_title)
url = urlparse.urljoin(self.base_link, query)
search_response = client.request(url)
results_list = client.parseDOM(
search_response, 'div', attrs={'class': 'items'})[0]
film_id = []
film_tries = [
'\/' + (clean_title + '-0' + season) + '[^-0-9](.+?)\"',
'\/' + (clean_title + '-' + season) + '[^-0-9](.+?)\"',
'\/' + clean_title + '[^-0-9](.+?)\"'
]
for i in range(len(film_tries)):
if not film_id:
film_id = re.findall(film_tries[i], results_list)
else:
break
film_id = film_id[0]
query = (self.film_path % film_id)
url = urlparse.urljoin(self.base_link, query)
film_response = client.request(url)
ts = re.findall('(data-ts=\")(.*?)(\">)', film_response)[0][1]
sources_dom_list = client.parseDOM(
film_response, 'ul', attrs={'class': 'episodes range active'})
if not re.findall(
'([^\/]*)\">' + episode + '[^0-9]', sources_dom_list[0]):
episode = '%02d' % int(episode)
sources_list = []
for i in sources_dom_list:
source_id = re.findall(
('([^\/]*)\">' + episode + '[^0-9]'), i)[0]
sources_list.append(source_id)
data.update({
'title': title,
'premiered': premiered,
'season': season,
'episode': episode,
'ts': ts,
'sources': sources_list,
'id': film_id
})
url = urllib.urlencode(data)
return url
except Exception:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)
data['sources'] = re.findall("[^', u\]\[]+", data['sources'])
try:
q = re.findall("\.(.*)", data['id'])[0]
except:
q = data['id']
query = (self.tooltip_path % q)
url = urlparse.urljoin(self.base_link, query)
q = client.request(url)
quality = re.findall('ty">(.*?)<', q)[0]
if '1080p' in quality:
quality = '1080p'
elif '720p' in quality:
quality = 'HD'
for i in data['sources']:
token = str(self.__token(
{'id': i, 'server': 28, 'update': 0, 'ts': data['ts']}))
query = (self.info_path % (data['ts'], token, i))
url = urlparse.urljoin(self.base_link, query)
info_response = client.request(url, XHR=True)
grabber_dict = json.loads(info_response)
try:
if grabber_dict['type'] == 'direct':
token64 = grabber_dict['params']['token']
query = (self.grabber_path % (data['ts'], i, token64))
url = urlparse.urljoin(self.base_link, query)
response = client.request(url, XHR=True)
sources_list = json.loads(response)['data']
for j in sources_list:
quality = j['label'] if not j['label'] == '' else 'SD'
#quality = 'HD' if quality in ['720p','1080p'] else 'SD'
quality = source_utils.label_to_quality(quality)
if 'googleapis' in j['file']:
sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False})
continue
#source = directstream.googlepass(j['file'])
valid, hoster = source_utils.is_host_valid(j['file'], hostDict)
urls, host, direct = source_utils.check_directstreams(j['file'], hoster)
for x in urls:
sources.append({
'source': 'gvideo',
'quality': quality,
'language': 'en',
'url': x['url'],
'direct': True,
'debridonly': False
})
elif not grabber_dict['target'] == '':
url = 'https:' + grabber_dict['target'] if not grabber_dict['target'].startswith('http') else grabber_dict['target']
#host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
valid, hoster = source_utils.is_host_valid(url, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(url, hoster)
sources.append({
'source': hoster,
'quality': quality,
'language': 'en',
'url': urls[0]['url'], #url.replace('\/','/'),
'direct': False,
'debridonly': False
})
except: pass
return sources
except Exception:
return sources
def resolve(self, url):
try:
if not url.startswith('http'):
url = 'http:' + url
for i in range(3):
if 'google' in url and not 'googleapis' in url:
url = directstream.googlepass(url)
if url:
break
return url
except Exception:
return
def __token(self, r):
def additup(t):
n = 0
for i in range(0, len(t)):
n += ord(t[i]) + i
return n
try:
base = "iQDWcsGqN"
s = additup(base)
for n in r:
t = base + n
i = str(r[n])
e = 0
for x in range(0,max(len(t), len(i))):
e += ord(i[x]) if x < len(i) else 0
e += ord(t[x]) if x < len(t) else 0
s += additup(str(hex(e))[2:])
return s
except Exception:
return 0 |
# Copyright (c) 2015.
# Philipp Wagner <bytefish[at]gmx[dot]de> and
# Florian Lier <flier[at]techfak.uni-bielefeld.de> and
# Norman Koester <nkoester[at]techfak.uni-bielefeld.de>
#
#
# Released to public domain under terms of the BSD Simplified license.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the organization nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# See <http://www.opensource.org/licenses/bsd-license>
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from ocvfacerec.facerec.distance import EuclideanDistance
from ocvfacerec.facerec.util import as_row_matrix
import logging
import numpy as np
import operator as op
class AbstractClassifier(object):
def compute(self, X, y):
raise NotImplementedError("Every AbstractClassifier must implement the compute method.")
def predict(self, X):
raise NotImplementedError("Every AbstractClassifier must implement the predict method.")
def update(self, X, y):
raise NotImplementedError("This Classifier is cannot be updated.")
class NearestNeighbor(AbstractClassifier):
"""
Implements a k-Nearest Neighbor Model with a generic distance metric.
"""
def __init__(self, dist_metric=EuclideanDistance(), k=1):
AbstractClassifier.__init__(self)
self.k = k
self.dist_metric = dist_metric
self.X = []
self.y = np.array([], dtype=np.int32)
def update(self, X, y):
"""
Updates the classifier.
"""
self.X.append(X)
self.y = np.append(self.y, y)
def compute(self, X, y):
self.X = X
self.y = np.asarray(y)
def predict(self, q):
"""
Predicts the k-nearest neighbor for a given query in q.
Args:
q: The given query sample, which is an array.
Returns:
A list with the classifier output. In this framework it is
assumed, that the predicted class is always returned as first
element. Moreover, this class returns the distances for the
first k-Nearest Neighbors.
Example:
[ 0,
{ 'labels' : [ 0, 0, 1 ],
'distances' : [ 10.132, 10.341, 13.314 ]
}
]
So if you want to perform a thresholding operation, you could
pick the distances in the second array of the generic classifier
output.
"""
distances = []
for xi in self.X:
xi = xi.reshape(-1, 1)
d = self.dist_metric(xi, q)
distances.append(d)
if len(distances) > len(self.y):
raise Exception("More distances than classes. Is your distance metric correct?")
distances = np.asarray(distances)
# Get the indices in an ascending sort order:
idx = np.argsort(distances)
# Sort the labels and distances accordingly:
sorted_y = self.y[idx]
sorted_distances = distances[idx]
# Take only the k first items:
sorted_y = sorted_y[0:self.k]
sorted_distances = sorted_distances[0:self.k]
# Make a histogram of them:
hist = dict((key, val) for key, val in enumerate(np.bincount(sorted_y)) if val)
# And get the bin with the maximum frequency:
predicted_label = max(hist.iteritems(), key=op.itemgetter(1))[0]
# A classifier should output a list with the label as first item and
# generic data behind. The k-nearest neighbor classifier outputs the
# distance of the k first items. So imagine you have a 1-NN and you
# want to perform a threshold against it, you should take the first
# item
return [predicted_label, {'labels': sorted_y, 'distances': sorted_distances}]
def __repr__(self):
return "NearestNeighbor (k=%s, dist_metric=%s)" % (self.k, repr(self.dist_metric))
# libsvm
try:
from svmutil import *
except ImportError:
logger = logging.getLogger("facerec.classifier.SVM")
logger.debug("Import Error: libsvm bindings not available.")
except:
logger = logging.getLogger("facerec.classifier.SVM")
logger.debug("Import Error: libsvm bindings not available.")
import sys
from StringIO import StringIO
bkp_stdout = sys.stdout
class SVM(AbstractClassifier):
"""
This class is just a simple wrapper to use libsvm in the
CrossValidation module. If you don't use this framework
use the validation methods coming with LibSVM, they are
much easier to access (simply pass the correct class
labels in svm_predict and you are done...).
The grid search method in this class is somewhat similar
to libsvm grid.py, as it performs a parameter search over
a logarithmic scale. Again if you don't use this framework,
use the libsvm tools as they are much easier to access.
Please keep in mind to normalize your input data, as expected
for the model. There's no way to assume a generic normalization
step.
"""
def __init__(self, param=None):
AbstractClassifier.__init__(self)
self.logger = logging.getLogger("facerec.classifier.SVM")
self.param = param
self.svm = svm_model()
self.param = param
if self.param is None:
self.param = svm_parameter("-q")
def compute(self, X, y):
self.logger.debug("SVM TRAINING (C=%.2f,gamma=%.2f,p=%.2f,nu=%.2f,coef=%.2f,degree=%.2f)" % (
self.param.C, self.param.gamma, self.param.p, self.param.nu, self.param.coef0, self.param.degree))
# turn data into a row vector (needed for libsvm)
X = as_row_matrix(X)
y = np.asarray(y)
problem = svm_problem(y, X.tolist())
self.svm = svm_train(problem, self.param)
self.y = y
def predict(self, X):
"""
Args:
X: The query image, which is an array.
Returns:
A list with the classifier output. In this framework it is
assumed, that the predicted class is always returned as first
element. Moreover, this class returns the libsvm output for
p_labels, p_acc and p_vals. The libsvm help states:
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
X = np.asarray(X).reshape(1, -1)
sys.stdout = StringIO()
p_lbl, p_acc, p_val = svm_predict([0], X.tolist(), self.svm)
sys.stdout = bkp_stdout
predicted_label = int(p_lbl[0])
return [predicted_label, {'p_lbl': p_lbl, 'p_acc': p_acc, 'p_val': p_val}]
def __repr__(self):
return "Support Vector Machine (kernel_type=%s, C=%.2f,gamma=%.2f,p=%.2f,nu=%.2f,coef=%.2f,degree=%.2f)" % (
KERNEL_TYPE[self.param.kernel_type], self.param.C, self.param.gamma, self.param.p, self.param.nu,
self.param.coef0, self.param.degree)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Old starter program currently not in use and not maintained?
"""
import sys
import os
sys.stdout = sys.stderr
import atexit
import threading
import cherrypy
from rst2html import Rst2Html
from flup.server.fcgi import WSGIServer
import cgitb
cgitb.enable()
cherrypy.config.update({'environment': 'embedded'})
## if cherrypy.__version__.startswith('3.0') and cherrypy.engine.state == 0:
## cherrypy.engine.start(blocking=False)
## atexit.register(cherrypy.engine.stop)
## os.chdir(os.path.split(__file__)[0])
application = cherrypy.tree.mount(Rst2Html())
cherrypy.config.update({'engine.autoreload_on': False,
## "tools.sessions.on": True,
## "tools.sessions.timeout": 5,
## "log.screen": False,
## "log.access_file": "/tmp/cherry_access.log",
## "log.error_file": "/tmp/cherry_error.log",
'server.socket_file': "/var/run/rst2html.sock",
## 'server.socket_host': 'rst2html.lemoncurry.nl',
## 'server.socket_port': 80,
})
#try:
WSGIServer(application).run()
#finally:
#cherrypy.engine.stop()
|
"""
Assignment 1:
- Check is the given number is even or not.
- Your function should return True if the number is even, False if the number is odd.
- Name your function `is_even`
Input: An integer
Output: A boolean
Example:
> is_even(2) == True
> is_even(5) == False
> is_even(0) == True
"""
...
# tests
if __name__ == '__main__':
assert is_even(2) is True
assert is_even(5) is False
assert is_even(0) is True
|
from collections import defaultdict
import django
from django.db.models import (
BooleanField,
DateField,
ExpressionWrapper,
OuterRef,
Q,
Subquery,
functions,
)
from .orm_fields import OrmBaseField, OrmBoundField
from .types import (
ASC,
DateTimeType,
DateType,
IsNullType,
NumberChoiceType,
NumberType,
StringType,
)
from .util import annotation_path
_DATE_FUNCTIONS = [
"is_null",
"year",
"quarter",
"month",
"day",
"week_day",
"month_start",
"iso_year",
"iso_week",
"week_start",
]
_TYPE_FUNCTIONS = defaultdict(
lambda: ["is_null"],
{
DateType: _DATE_FUNCTIONS,
DateTimeType: _DATE_FUNCTIONS + ["hour", "minute", "second", "date"],
StringType: ["is_null", "length"],
},
)
_month_choices = [
(1, "January"),
(2, "February"),
(3, "March"),
(4, "April"),
(5, "May"),
(6, "June"),
(7, "July"),
(8, "August"),
(9, "September"),
(10, "October"),
(11, "November"),
(12, "December"),
]
_weekday_choices = [
(1, "Sunday"),
(2, "Monday"),
(3, "Tuesday"),
(4, "Wednesday"),
(5, "Thursday"),
(6, "Friday"),
(7, "Saturday"),
]
def _get_django_function(name, qs):
def IsNull(field_name):
# https://code.djangoproject.com/ticket/32200
if django.VERSION[:3] == (3, 1, 3): # pragma: django != 3.1.3
return Subquery(
qs.annotate(
ddb_is_null=ExpressionWrapper(
Q(**{field_name: None}), output_field=BooleanField()
)
)
.filter(pk=OuterRef("pk"))
.values("ddb_is_null")[:1],
output_field=BooleanField(),
)
else:
return ExpressionWrapper(
Q(**{field_name: None}), output_field=BooleanField()
)
mapping = {
"year": (functions.ExtractYear, NumberType, (), ASC, {"useGrouping": False}),
"quarter": (functions.ExtractQuarter, NumberType, (), ASC, {}),
"month": (functions.ExtractMonth, NumberChoiceType, _month_choices, ASC, {}),
"month_start": (
lambda x: functions.TruncMonth(x, DateField()),
DateType,
(),
ASC,
{},
),
"day": (functions.ExtractDay, NumberType, (), ASC, {}),
"week_day": (
functions.ExtractWeekDay,
NumberChoiceType,
_weekday_choices,
ASC,
{},
),
"hour": (functions.ExtractHour, NumberType, (), ASC, {}),
"minute": (functions.ExtractMinute, NumberType, (), ASC, {}),
"second": (functions.ExtractSecond, NumberType, (), ASC, {}),
"date": (functions.TruncDate, DateType, (), ASC, {}),
"is_null": (IsNull, IsNullType, (), None, {}),
"length": (functions.Length, NumberType, (), None, {}),
}
mapping.update(
{
"iso_year": (functions.ExtractIsoYear, NumberType, (), ASC, {}),
"iso_week": (functions.ExtractWeek, NumberType, (), ASC, {}),
"week_start": (
lambda x: functions.TruncWeek(x, DateField()),
DateType,
(),
ASC,
{},
),
}
)
return mapping[name]
class OrmBoundFunctionField(OrmBoundField):
def _annotate(self, request, qs, debug=False):
func = _get_django_function(self.name, qs)[0](self.previous.queryset_path_str)
return self._annotate_qs(qs, func)
def parse_lookup(self, lookup, value):
parsed, err_message = super().parse_lookup(lookup, value)
if (
self.name in ["year", "iso_year"]
and parsed is not None
and lookup != "is_null"
):
if parsed < 2:
err_message = "Can't filter to years less than 2"
if parsed > 9998:
err_message = "Can't filter to years greater than 9998"
return parsed, err_message
class OrmFunctionField(OrmBaseField):
def __init__(self, model_name, name, type_, choices, default_sort, format_hints):
super().__init__(
model_name,
name,
name.replace("_", " "),
type_=type_,
concrete=True,
can_pivot=True,
choices=choices,
default_sort=default_sort,
format_hints=format_hints,
)
def bind(self, previous):
assert previous
full_path = previous.full_path + [self.name]
return OrmBoundFunctionField(
field=self,
previous=previous,
full_path=full_path,
pretty_path=previous.pretty_path + [self.pretty_name],
queryset_path=[annotation_path(full_path)],
filter_=True,
)
def get_functions_for_type(type_):
return {
func: OrmFunctionField(type_.name, func, *_get_django_function(func, None)[1:])
for func in _TYPE_FUNCTIONS[type_]
}
|
info = {
"UNIT_NUMBERS": {
"a náid": 0,
"náid": 0,
"a haon": 1,
"aon": 1,
"a dó": 2,
"dhá": 2,
"dó": 2,
"a trí": 3,
"trí": 3,
"a ceathair": 4,
"ceathair": 4,
"ceithre": 4,
"a cúig": 5,
"cúig": 5,
"a sé": 6,
"sé": 6,
"a seacht": 7,
"seacht": 7,
"a hocht": 8,
"ocht": 8,
"a naoi": 9,
"naoi": 9
},
"DIRECT_NUMBERS": {
"a deich": 10,
"deich": 10
},
"TENS": {
"fiche": 20,
"tríocha": 30,
"daichead": 40,
"caoga": 50,
"seasca": 60,
"seachtó": 70,
"ochtó": 80,
"nócha": 90
},
"HUNDREDS": {},
"BIG_POWERS_OF_TEN": {},
"SKIP_TOKENS": [],
"USE_LONG_SCALE": False
}
|
from SNR_Calculation import SNR_Calculation
mode = {
"em_mode": "Conv",
"em_gain": [1],
"readout_rate": 1,
"preamp": 1,
"bin": 1,
"sub_img": 1024,
"t_exp": 1,
}
_SKY_FLUX = 12.298897076737294 # e-/pix/s
_HATS24_FLUX = 56122.295000000006 # e-/s
_N_PIX_STAR = 305
_HATS24_MAG = 12.25
mag = 12
aux = 10 ** ((_HATS24_MAG - mag) / 2.5)
star_flux = _HATS24_FLUX * aux
snr_calc = SNR_Calculation(mode, -70, _SKY_FLUX, star_flux, _N_PIX_STAR, 9917)
snr_calc.calc_DC()
snr_calc.calc_RN()
snr_calc.calc_SNR()
snr = snr_calc.get_SNR()
print(snr)
|
# *******************************************************************************
#
# Copyright (c) 2020-2021 David Briant. All rights reserved.
#
# *******************************************************************************
import sys
if hasattr(sys, '_TRACE_IMPORTS') and sys._TRACE_IMPORTS: print(__name__)
import os, os.path, json
from io import TextIOWrapper
from coppertop.pipe import *
from bones.core.types import pystr
from coppertop.std.text import strip
from coppertop.std.transforming import each
getCwd = coppertop(style=unary1, name='getCwd')(os.getcwd)
isFile = coppertop(style=unary1, name='isFile')(os.path.isfile)
isDir = coppertop(style=unary1, name='isDir')(os.path.isdir)
dirEntries = coppertop(style=unary1, name='dirEntries')(os.listdir)
@coppertop(style=binary2)
def joinPath(a, b):
return os.path.join(a, *(b if isinstance(b, (list, tuple)) else [b]))
@coppertop
def readlines(f:TextIOWrapper) -> list:
return f.readlines()
@coppertop
def linesOf(pfn:pystr):
with open(pfn) as f:
return f >> readlines >> each >> strip(_,'\\n')
@coppertop(style=binary)
def copyTo(src, dest):
raise NotImplementedError()
@coppertop
def readJson(pfn:pystr):
with open(pfn) as f:
return json.load(f)
@coppertop
def readJson(f:TextIOWrapper):
return json.load(f)
|
def foo(*, x=None, y=None):
print(x, y) |
# Find K pairs with smallest sums
# ttungl@gmail.com
def kSmallestPairs(self, nums1, nums2, k):
# sol:
queue = []
def push(i, j):
if i < len(nums1) and j < len(nums2):
heapq.heappush(queue, [nums1[i] + nums2[j], i, j])
push(0, 0) # init
pairs = [] # output
while queue and len(pairs) < k:
_, i, j = heapq.heappop(queue) # heappop returns smallest item from the heap
pairs.append([nums1[i], nums2[j]]) # add to the output
push(i, j+1) # next pair (next column j) in the same row i gets added to the priority queue.
if j==0: push(i+1, 0) # if first pair is first column in its row, then first pair in the next row is added to the queue.
return pairs
|
import random
import unittest
import requests
import common
_S = 'basics'
_T0 = 'minimal'
_T1 = 'basictable1'
_T2 = 'basictable2'
_T2b = 'ambiguous2'
_Tc1 = 'composite1'
_Tc2 = 'composite2'
_Tr1 = 'column_removal_a'
_Tr2 = 'column_removal_b'
from common import Int8, Text, Timestamptz, \
RID, RCT, RMT, RCB, RMB, RidKey, \
ModelDoc, SchemaDoc, TableDoc, ColumnDoc, KeyDoc, FkeyDoc
def defs(S):
# these table definitions get reused in multiple test modules under different per-module schema
return ModelDoc(
[
SchemaDoc(
S,
[
TableDoc(
_T0,
[ RID, RCT, RMT, RCB, RMB, ColumnDoc('value', Text) ],
[ RidKey ],
),
TableDoc(
_T1,
[
RID, RCT, RMT, RCB, RMB,
ColumnDoc('id', Int8, nullok=False),
ColumnDoc('name', Text),
],
[ RidKey, KeyDoc(['id']) ]
),
TableDoc(
_T2,
[
RID, RCT, RMT, RCB, RMB,
ColumnDoc('id', Int8, nullok=False),
ColumnDoc('level1_id', Int8),
ColumnDoc('name', Text),
],
[ RidKey, KeyDoc(['id']) ],
[ FkeyDoc(S, _T2, ['level1_id'], S, _T1, ['id']) ]
),
TableDoc(
_T2b,
[
RID, RCT, RMT, RCB, RMB,
ColumnDoc('id', Int8, {"tag:misd.isi.edu,2015:test0": "value 0"}, nullok=False),
ColumnDoc('level1_id1', Int8, {"tag:misd.isi.edu,2015:test0": "value 0"}),
ColumnDoc('level1_id2', Int8, {"tag:misd.isi.edu,2015:test0": "value 0"}),
ColumnDoc('name', Text, {"tag:misd.isi.edu,2015:test0": "value 0"}),
],
[
KeyDoc(['RID'], {"tag:misd.isi.edu,2015:test0": "value 0"}),
KeyDoc(['id'], {"tag:misd.isi.edu,2015:test0": "value 0"}),
],
[
FkeyDoc(S, _T2b, ['level1_id1'], S, _T1, ['id'], {"tag:misd.isi.edu,2015:test0": "value 0"}),
FkeyDoc(S, _T2b, ['level1_id2'], S, _T1, ['id'], {"tag:misd.isi.edu,2015:test0": "value 0"}),
],
{"tag:misd.isi.edu,2015:test0": "value 0"}
),
TableDoc(
_Tc1,
[
RID, RCT, RMT, RCB, RMB,
ColumnDoc('id', Int8, nullok=False),
ColumnDoc('last_update', Timestamptz),
ColumnDoc('name', Text),
ColumnDoc('site', Int8, nullok=False),
],
[ RidKey, KeyDoc(['id', 'site']) ],
),
TableDoc(
_Tc2,
[
RID, RCT, RMT, RCB, RMB,
ColumnDoc('id', Int8, nullok=False),
ColumnDoc('last_update', Timestamptz),
ColumnDoc('name', Text),
ColumnDoc('site', Int8, nullok=False),
],
[ RidKey, KeyDoc(['id', 'site']) ],
[
FkeyDoc(S, _Tc2, ['id', 'site'], S, _Tc1, ['id', 'site'])
]
),
TableDoc(
_Tr1,
[
RID, RCT, RMT, RCB, RMB,
ColumnDoc('id', Int8, nullok=False),
ColumnDoc('id2', Int8, nullok=False),
ColumnDoc('name', Int8),
],
[ RidKey, KeyDoc(['id']), KeyDoc(['id2']) ],
schema_name=S
),
TableDoc(
_Tr2,
[
RID, RCT, RMT, RCB, RMB,
ColumnDoc('id', Int8, nullok=False),
ColumnDoc('t1_id1', Int8, nullok=False),
ColumnDoc('t1_id2', Int8, nullok=False),
],
[ RidKey, KeyDoc(['id']) ],
[
FkeyDoc(S, _Tr2, ['t1_id1'], S, _Tr1, ['id']),
FkeyDoc(S, _Tr2, ['t1_id2'], S, _Tr1, ['id']),
],
schema_name=S
),
],
{"tag:misd.isi.edu,2015:test0": "value 0"}
)
],
{"tag:misd.isi.edu,2015:test0": "value 0"}
)
_defs = defs(_S)
_table_defs = _defs['schemas'][_S]['tables']
def expand_table_resources(S, table_defs, table):
resources = [
'schema/%s' % S,
'schema/%s/table/%s' % (S, table)
]
for coldef in table_defs[table]['column_definitions']:
resources.append('%s/column/%s' % (resources[1], coldef['name']))
for keydef in table_defs[table]['keys']:
resources.append('%s/key/%s' % (resources[1], ','.join(keydef['unique_columns'])))
for fkeydef in table_defs[table]['foreign_keys']:
resources.append(
'%s/foreignkey/%s/reference/%s/%s' % (
resources[1],
','.join([ "%(column_name)s" % c for c in fkeydef['foreign_key_columns']]),
"%(schema_name)s:%(table_name)s" % fkeydef['referenced_columns'][0],
','.join([ "%(column_name)s" % c for c in fkeydef['referenced_columns']]),
)
)
return resources
def setUpModule():
# this setup covers a bunch of basic table-creation feature tests
# but we put them here so they are shared fixtures for the rest of the detailed feature tests below...
r = common.primary_session.get('schema/%s' % _S)
if r.status_code == 404:
# idempotent because unittest can re-enter module several times...
common.primary_session.post('schema', json=_defs).raise_for_status()
class BasicColumn (common.ErmrestTest):
table = _T2b
column = 'name'
coldef = {
"type": { "typename": "text" },
"name": "name",
"annotations": {"tag:misd.isi.edu,2015:test0": "value 0"}
}
def _cols_url(self): return 'schema/%s/table/%s/column' % (_S, self.table)
def test_get_all(self):
self.assertHttp(self.session.get(self._cols_url()), 200, 'application/json')
def test_get_one(self):
self.assertHttp(self.session.get('%s/%s' % (self._cols_url(), self.column)), 200, 'application/json')
def test_mutate_1_delete(self):
self.assertHttp(self.session.delete('%s/%s' % (self._cols_url(), self.column)), 204)
self.assertHttp(self.session.get('%s/%s' % (self._cols_url(), self.column)), 404)
def test_mutate_3_recreate(self):
self.assertHttp(self.session.post(self._cols_url(), json=self.coldef), 201)
self.test_get_one()
class ServiceAdvertisement (common.ErmrestTest):
def test_service_ad(self):
r = self.session.get('/ermrest')
self.assertHttp(r, 200, 'application/json')
ad = r.json()
self.assertIn('version', ad)
self.assertIn('features', ad)
class CatalogNaming (common.ErmrestTest):
my_acl = [ common.primary_client_id, "extra junk" ]
def _check_repr(self, r, required={}):
if isinstance(r, requests.Response):
rep = r.json()
else:
rep = r
self.assertIsInstance(rep, dict)
for k, v in required.items():
self.assertIn(k, rep)
if isinstance(v, dict):
self._check_repr(rep[k], v)
else:
self.assertEqual(v, rep[k])
return rep
def test_catalog_post_input(self):
my_id = "my_catalog_%d" % random.randint(0,2**30)
my_url = '/ermrest/catalog/%s' % (common.urlquote(my_id),)
delete_id = {my_id}
try:
# claim ID as new catalog
doc1 = { "id": my_id, "owner": self.my_acl }
r = self.session.post('/ermrest/catalog', json=doc1)
# check post response
self.assertHttp(r, 201, 'application/json')
doc2 = self._check_repr(r, {
"id": my_id,
})
delete_id.add(doc2['id'])
# check catalog status representation
r = self.session.get(my_url)
self.assertHttp(r, 200, 'application/json')
self._check_repr(r, {
'id': my_id,
'acls': {"owner": self.my_acl},
})
finally:
for cid in delete_id:
self.assertHttp(self.session.delete('/ermrest/catalog/%s' % common.urlquote(cid)), (200, 204, 404))
def test_alias(self):
my_id = "my_alias_%d" % random.randint(0,2**30)
my_url = '/ermrest/alias/%s' % (common.urlquote(my_id),)
delete_id = {my_id}
try:
# claim ID as unbound alias
doc1 = { "id": my_id, "owner": self.my_acl }
r = self.session.post('/ermrest/alias', json=doc1)
# check post response
self.assertHttp(r, 201, 'application/json')
doc2 = self._check_repr(r, {
"id": my_id,
})
delete_id.add(doc2['id'])
# check alias status representation
r = self.session.get(my_url)
self.assertHttp(r, 200, 'application/json')
self._check_repr(r, {
"id": my_id,
"owner": self.my_acl,
"alias_target": None,
})
# check alias update
r = doc1.update({"alias_target": common.cid})
r = self.session.put(my_url, json=doc1)
self.assertHttp(r, 200, 'application/json')
self._check_repr(r, {
"id": my_id,
"owner": self.my_acl,
"alias_target": common.cid,
})
# check aliased catalog status representation
r = self.session.get('/ermrest/catalog/%s' % (common.urlquote(my_id),))
self.assertHttp(r, 200, 'application/json')
self._check_repr(r, {
"id": my_id,
"alias_target": common.cid,
})
finally:
for cid in delete_id:
self.assertHttp(self.session.delete('/ermrest/alias/%s' % common.urlquote(cid)), (200, 204, 404))
class BasicKey (common.ErmrestTest):
table = _T1
key = 'id'
newkey = 'id,name'
newkeydef = {"unique_columns": ["id", "name"]}
def _keys_url(self): return 'schema/%s/table/%s/key' % (_S, self.table)
def test_get_keys(self):
self.assertHttp(self.session.get(self._keys_url()), 200, 'application/json')
def test_get_key(self):
self.assertHttp(self.session.get('%s/%s' % (self._keys_url(), self.key)), 200, 'application/json')
def test_newkey_absent(self):
self.assertHttp(self.session.get('%s/%s' % (self._keys_url(), self.newkey)), 404)
def test_newkey_create(self):
self.assertHttp(self.session.post(self._keys_url(), json=self.newkeydef), 201, 'application/json')
self.assertHttp(self.session.get('%s/%s' % (self._keys_url(), self.newkey)), 200)
def test_newkey_delete(self):
self.assertHttp(self.session.delete('%s/%s' % (self._keys_url(), self.newkey)), 204)
class CompositeKey (BasicKey):
table = _Tc1
key = 'id,site'
def add_fkey_gets(klass):
# generate tests for each level of foreignkey rest path hierarchy
for depth in range(1,6):
def test(self):
self.assertHttp(self.session.get(self._fkey_url(depth)))
setattr(klass, 'test_0_access_fkey_depth%d' % depth, test)
return klass
@add_fkey_gets
class ForeignKey (common.ErmrestTest):
table = _T2
def _entity_url(self):
return 'entity/%s:%s' % (_S, self.table)
def _fkey_url(self, depth=1):
fkey = _table_defs[self.table]['foreign_keys'][0]
parts = [
'schema/%s/table/%s/foreignkey' % (_S, self.table),
','.join([ c['column_name'] for c in fkey['foreign_key_columns']]),
'reference',
'%(schema_name)s:%(table_name)s' % fkey['referenced_columns'][0],
','.join([ c['column_name'] for c in fkey['referenced_columns']])
]
return '/'.join(parts[0:depth])
def test_1_delete_fkey(self):
self.assertHttp(self.session.delete(self._fkey_url(5)), 204)
self.assertHttp(self.session.get(self._fkey_url(5)), 409)
def _recreate_fkey(self, on_delete, on_update):
self.session.delete(self._fkey_url(5))
fkey_def = dict(_table_defs[self.table]['foreign_keys'][0])
fkey_def['on_delete'] = on_delete
fkey_def['on_update'] = on_update
self.assertHttp(self.session.post(self._fkey_url(1), json=fkey_def), 201)
r = self.session.get(self._fkey_url(5))
self.assertHttp(r, 200, 'application/json')
self.assertEqual(r.json()[0]['on_update'], on_update or 'NO ACTION')
self.assertEqual(r.json()[0]['on_delete'], on_delete or 'NO ACTION')
def test_2_recreate_none_none(self): self._recreate_fkey(None, None)
def test_2_recreate_noact_noact(self): self._recreate_fkey('NO ACTION', 'NO ACTION')
def test_2_recreate_restrict_noact(self): self._recreate_fkey('RESTRICT', 'NO ACTION')
def test_2_recreate_cascade_noact(self): self._recreate_fkey('CASCADE', 'NO ACTION')
def test_2_recreate_setnull_noact(self): self._recreate_fkey('SET NULL', 'NO ACTION')
def test_2_recreate_setdefault_noact(self): self._recreate_fkey('SET DEFAULT', 'NO ACTION')
def test_2_recreate_noact_restrict(self): self._recreate_fkey('NO ACTION', 'RESTRICT')
def test_2_recreate__noactcascade(self): self._recreate_fkey('NO ACTION', 'CASCADE')
def test_2_recreate_noact_setnull(self): self._recreate_fkey('NO ACTION', 'SET NULL')
def test_2_recreate_noact_setdefault(self): self._recreate_fkey('NO ACTION', 'SET DEFAULT')
class ForeignKeyComposite (ForeignKey):
table = _Tc2
def add_url_parse_tests(klass):
# generate tests for combinations of API, filter, projection
filters = {
"unfiltered": "",
"number": "/id=4",
"text": "/name=foo",
"empty": "/name=",
"null": "/name::null::",
"regexp": "/name::regexp::x.%2A",
}
apis = [ "entity", "attribute", "attributegroup", "aggregate" ]
good_projections = {
"entity": "",
"attribute": "/id,name",
"attributegroup": "/id;name",
"aggregate": "/n:=cnt(id)"
}
bad_projections = {
"entity": "/id,name",
"attribute": "/id;name",
"attributegroup": "",
"aggregate": ""
}
for api in apis:
for fk in filters:
def goodproj(self):
url = '%s%s%s%s' % (api, self.base, filters[fk], good_projections[api])
self.assertHttp(self.session.get(url), self.base and 200 or 400)
def badproj(self):
url = '%s%s%s%s' % (api, self.base, filters[fk], bad_projections[api])
self.assertHttp(self.session.get(url), 400)
setattr(klass, 'test_%s_%s_proj' % (api, fk), goodproj)
setattr(klass, 'test_%s_%s_badproj' % (api, fk), badproj)
return klass
@add_url_parse_tests
class ParseTable (common.ErmrestTest): base = '/%s:%s' % (_S, _T1)
class ParseNoTable (ParseTable): base = ''
class ConstraintNameNone (common.ErmrestTest):
table = 'test_constr_names'
keynames = None
fkeynames = None
status = 201
def defs(self):
return [
TableDoc(
self.table,
[
RID, RCT, RMT, RCB, RMB,
ColumnDoc('id', Int8, nullok=False),
ColumnDoc('level1_id1', Int8),
ColumnDoc('name', Text),
],
[ RidKey, KeyDoc(['id'], names=self.keynames) ],
[
FkeyDoc(_S, self.table, ['level1_id1'], _S, _T1, ['id'], names=self.fkeynames),
],
schema_name=_S
)
]
def test_1_create(self):
self.assertHttp(self.session.post('schema', json=self.defs()), self.status)
if self.status == 201:
r = self.session.get('schema/%s/table/%s' % (_S, self.table))
self.assertHttp(r, 200)
if self.keynames:
self.assertIn(tuple(self.keynames[0]), [ tuple(k['names'][0]) for k in r.json()['keys'] ])
if self.fkeynames:
self.assertIn(self.fkeynames[0], r.json()['foreign_keys'][0]['names'])
def tearDown(self):
if self.status == 201:
self.session.delete('schema/%s/table/%s' % (_S, self.table))
class ConstraintNameEmpty (ConstraintNameNone):
keynames = []
fkeynames = []
class ConstraintNameCustom (ConstraintNameNone):
keynames = [[_S, "mykey"]]
fkeynames = [[_S, "myfkey"]]
class KeyNamesNumber (ConstraintNameNone):
keynames = [[_S, 5]]
status = 400
class FKeyNamesNumber (ConstraintNameNone):
fkeynames = [[_S, 5]]
status = 400
class KeyNamesNonlist (ConstraintNameNone):
keynames = [5]
status = 400
class FKeyNamesNonlist (ConstraintNameNone):
fkeynames = [5]
status = 400
class KeyNameTooLong (ConstraintNameNone):
keynames = [[_S, "mykey", "extra"]]
status = 400
class FKeyNameTooLong (ConstraintNameNone):
fkeynames = [[_S, "mykey", "extra"]]
status = 400
class ColumnRemoval (common.ErmrestTest):
def _delete_col(self, tname, cname):
self.assertHttp(self.session.delete('schema/%s/table/%s/column/%s' % (_S, tname, cname)), 204)
self.assertHttp(self.session.get('schema/%s/table/%s' % (_S, _Tr1)), 200)
def test_remove_col(self):
self._delete_col(_Tr1, 'name')
def test_remove_fkey_fcol(self):
self._delete_col(_Tr2, 't1_id2')
def test_remove_fkey_pcol(self):
self._delete_col(_Tr2, 't1_id1')
def test_remove_keycol(self):
self._delete_col(_Tr1, 'id2')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author:
Mikolaj Metelski
"""
import json
import pathlib
import pickle
import uuid
from typing import Any, Dict, List, Tuple, Union
import helpers
from core import AbstractModel
def run_on_param_grid(model: Union[AbstractModel.__class__, AbstractModel],
data: Any, **params_ranges: Dict[str, Any]):
"""Run model on grid of its params.
If you just want to run the model (on one set of
parameters and one dataset), use `model(**params)(data)` instead.
If you pass a model object as `model`, it will call `model.reparam(**new_parameters)`
for each point on the grid. If you pass a class inheriting from `AbstractModel` instead,
it will create the models from scratch every time. This may be very important if your `model.__init__`
and `model.__call__` have different calibration procedures, or when the model depends on the history
of its reparametrisation.
Pass the arguments of the model as keyword arguments with list values.
Examples:
>>> class MyModel(AbstractModel):
>>> ...
>>> run_on_param_grid(MyModel, mydata, a=[1, 2], b=[True, False])
will run the following models on mydata:
>>> MyModel(a=1, b=True)(mydata)
>>> MyModel(a=2, b=True)(mydata)
>>> MyModel(a=1, b=False)(mydata)
>>> MyModel(a=2, b=False)(mydata)
But this code:
>>> sample_model = MyModel(a=5, b=6)
>>> run_on_param_grid(sample_model, mydata, a=[1, 2], b=[True, False])
will run the following models on mydata:
>>> sample_model.reparam(a=1, b=True)
>>> sample_model(mydata)
>>> sample_model.reparam(a=2, b=True)
>>> sample_model(mydata)
>>> sample_model.reparam(a=1, b=False)
>>> sample_model(mydata)
>>> sample_model.reparam(a=2, b=False)
>>> sample_model(mydata)
Todo:
[ ] Input validation
[-] Use recursive_dict/fix the key convention
[x] do not rely on key ordering
[ ] if you don't pass a list, fix value and do
not show up in results as separate key
[ ] parallelize
Args:
model (Union[AbstractModel.__class__, AbstractModel]): inherits from AbstractModel or is a model object itself
data (Any): model will be called with the same data
for each parameter combination
params_ranges: keyword arguments of the model with lists of values
Returns:
(type): dict of run label - tuple pairs. first elemnt of tuple is parameters, second - model output.
"""
def inner(params):
run_uuid = uuid.uuid4()
if isinstance(model, AbstractModel.__class__):
run_label = '_'.join([model.__name__, str(run_uuid)])
return {run_label: (params, model(**params)(data))}
elif isinstance(model, AbstractModel):
run_label = '_'.join([model.__class__.__name__, str(run_uuid)])
model.reparam(**params)
return {run_label: (params, model(data))}
else:
raise TypeError(
"can only pass class inheriting from AbstractModel or an object of such class as model"
)
# TODO parallelize here
from collections import ChainMap
return dict(
ChainMap(*map(inner, helpers.product_of_dicts(**params_ranges))))
def calibrate_on_param_grid(model, data, target, **params_ranges):
"""Run the model on a parameter grid and pick a set of parameters
that minimises `target`.
If you already called run_on_param_grid and have the results,
use calibrate_on_run_results instead.
Todo:
[ ] Input validation
Args:
model: inherits from AbstractModel
data: model will be called with the same data
for each parameter combination
target: a function that accepts two positional arguments:
parameters used in the model (as a stringified dict) and
a possible model output.
params_ranges: keyword arguments of the model with
lists of values
Returns:
dict: key is a tuple made from a combination of params
See:
calibrate_on_run_results
"""
results = run_on_param_grid(model, data, **params_ranges)
return calibrate_on_run_results(results, target)
def calibrate_on_run_results(results, target):
"""Pick a set of parameters that minimises target
given the results of run_on_param_grid.
If you don't have the run results, use calibrate_on_param_grid instead.
Todo:
[ ] Input validation
Args:
results: returned from run_on_param_grid
target: a function that accepts two positional arguments:
parameters used in the model (as a stringified dict) and
a possible model output.
See:
calibrate_on_param_grid
run_on_param_grid
"""
map_target = {label: target(*pair) for label, pair in results.items()}
optimal_run_label = min(map_target, key=map_target.get)
return optimal_run_label, results.get(optimal_run_label)
def run_and_pickle(model_object: AbstractModel, model_input: Any) -> None:
"""Run model object on model input and save results in a pickle file.
By default, saves the model output in './out/Model-uuid' folder
together with the pickled model object and a json of the model tree.
Args:
model_object (AbstractModel): self-explanatory
model_input (Any): self-explanatory
Returns:
None
"""
run_label = '_'.join([model_object.__class__.__name__, str(uuid.uuid4())])
model_output = model_object(model_input)
out_path = pathlib.Path("./out") / run_label
path = out_path.mkdir(parents=True, exist_ok=False)
with (out_path / "parameters.json").open("w+") as file:
json.dump(model_object.model_tree, file)
with (out_path / "model_output.pickle").open("wb+") as file:
pickle.dump(model_output, file)
return model_output
def pickle_sweep_results(results):
"""Pickle results of run_on_param_grid.
By default, a model output is saved as './out/Model-uuid' format.
Args:
results: output from Model(**parameter_input)(model_input).
"""
raise NotImplementedError
for label, param, model_output in results:
out_path = pathlib.Path("./out") / label
path = out_path.mkdir(parents=True, exist_ok=False)
with (out_path / "parameters.json").open("w+") as file:
json.dump(param, file)
with (out_path / "output.pickle").open("wb+") as file:
pickle.dump(model_output, file)
|
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
class PercentField(models.IntegerField):
"""
Integer field that ensures field value is in the range 0-100.
"""
default_validators = [
MinValueValidator(0),
MaxValueValidator(100),
]
|
import logging
import os
from dataclasses import dataclass, field
from datetime import datetime
from shutil import copy2
from typing import Generator, List, Optional
from jinja2 import Environment, FileSystemLoader
from PIL import ImageGrab
logger = logging.getLogger(__name__)
class ReportError(Exception):
pass
def dispatch_screenshot_number(max_ss: int) -> Generator[int, None, None]:
"""
File name for screenshots
Generator to yield a number from 0 to arg: max_ss
To govern the no of screenshots for a run
:return: generator
"""
for x in list(range(max_ss)):
yield x
class Status:
Start: str = 'Start'
Pass: str = 'Pass'
Fail: str = 'Fail'
Warn: str = 'Warn'
Highlight: str = 'Highlight'
@dataclass
class Step:
description: str
status: str
screenshot: Optional[str]
@dataclass
class Test:
number: int
description: str
status: str = field(default=None)
steps: List[Step] = field(default_factory=list)
class Tests(list):
"""
If one or more fail steps, test is marked as failed
If one or more warn steps and no fail steps,test is maked as warning
If one or more pass steps and no fail/warn steps, test is marked as passed
"""
def append(self, test):
if not isinstance(test, Test):
raise TypeError(f'{test} is not of type Test')
statues = [step.status for step in test.steps]
pass_ = statues.count('Pass')
fail = statues.count('Fail')
warn = statues.count('Warn')
if fail >= 1:
test.status = 'Fail'
if warn >= 1 and fail == 0:
test.status = 'Warn'
if pass_ >= 1 and fail == 0 and warn == 0:
test.status = 'Pass'
super(Tests, self).append(test)
class Report:
def __init__(self):
self.report_folder = None
self.module_folder = None
self.screenshots_folder = None
self.module_name = None
self.release_name = None
self.max_screenshots = 1000
self.selenium_driver = None
self.env = Environment(
loader=FileSystemLoader(
searchpath=os.path.join(__file__, '../templates')
)
)
self.report_template = self.env.get_template('report.html')
self.tests = Tests()
self.test = None
self.attachments = []
self.screenshot = None
self.status = Status
self.screenshot_num = dispatch_screenshot_number(self.max_screenshots)
def setup(
self,
report_folder,
module_name='default',
release_name='default',
max_screenshots=None,
selenium_driver=None
):
"""
This method should follow Report Class Initialization
:param report_folder: Report folder (Root) Path
:param module_name: Module/Application/Function Name
:param release_name: Release Name
:param selenium_driver: Selenium Webdriver Instance
:return:
"""
if not report_folder:
raise ReportError('Report Folder Path Required')
if module_name == 'default':
import warnings
warnings.warn('Module name set to default')
if release_name == 'default':
import warnings
warnings.warn('Release name set to default')
if max_screenshots:
self.max_screenshots = max_screenshots
self.selenium_driver = selenium_driver
self.report_folder = report_folder
self.module_name = module_name
self.release_name = release_name
self.module_folder = os.path.join(
self.report_folder,
'{name} {date}'.format(
name=self.module_name,
date=datetime.now().strftime('%m_%d_%Y %H_%M_%S')
)
)
self.screenshots_folder = os.path.join(
self.module_folder, 'Screenshots'
)
if not os.path.exists(self.report_folder):
os.mkdir(self.report_folder)
if not os.path.exists(self.module_folder):
os.mkdir(self.module_folder)
if not os.path.exists(self.screenshots_folder):
os.mkdir(self.screenshots_folder)
def __repr__(self):
return 'Report(Module=%s, Release=%s)' % (
self.module_name,
self.release_name
)
def capture_screenshot(self):
"""
Capture screenshot
If selenium_driver, screenshot of the browser view port is captured
"""
current_screenshot_num = str(next(self.screenshot_num))
current_screenshot = os.path.join(
self.screenshots_folder,
current_screenshot_num + '.png'
)
if self.selenium_driver:
self.selenium_driver.save_screenshot(current_screenshot)
else:
ImageGrab.grab().save(current_screenshot)
return current_screenshot_num
def write_step(self, step, status, test_number=None, *, screenshot=False):
if screenshot:
self.screenshot = self.capture_screenshot()
else:
self.screenshot = None
if status == 'Start':
if not test_number:
raise ReportError('Test Number Required with Start Status')
if not self.test:
self.test = Test(test_number, step)
else:
self.tests.append(self.test)
self.test = Test(test_number, step)
elif status in ['Pass', 'Fail', 'Warn', 'Highlight']:
if not self.test:
raise ReportError(
'Start step missing, please call Start status first'
)
self.test.steps.append(Step(step, status, self.screenshot))
else:
raise ReportError('Invalid Status')
def add_attachment(self, attachment):
if not os.path.isfile(attachment):
raise ReportError(f'{attachment} is not of type file')
self.attachments.append(attachment)
def generate_report(self):
# Generate the Automation Report
self.tests.append(self.test)
if self.attachments:
attachments_folder = os.path.join(
self.module_folder,
'Attachments'
)
if not os.path.exists(attachments_folder):
os.mkdir(attachments_folder)
for attachment in self.attachments:
copy2(attachment, attachments_folder)
total_tests = len(self.tests)
passed_tests = len([
test for test in self.tests if test.status == 'Pass']
)
failed_tests = len(
[test for test in self.tests if test.status == 'Fail']
)
warning_tests = len(
[test for test in self.tests if test.status == 'Warn']
)
report_output = self.report_template.render(
module_name=self.module_name,
release_name=self.release_name,
run_date=datetime.now().strftime('%m:%d:%Y'),
total_tests=total_tests,
passed_tests=passed_tests,
failed_tests=failed_tests,
warning_tests=warning_tests,
tests=self.tests
)
with open(os.path.join(self.module_folder, 'Report.html'), 'w') as f:
f.write(report_output)
|
#módulo que vai ter um método que encripta arquivos
def change_files(filename, cryptoFn, block_size=16):
with open(filename, 'r+b') as _file:
raw_value = _file.read(block_size)
while raw_value:
cipher_value = cryptoFn(raw_value)
#compara o tamanho do bloco cifrado e do plain text
if len(raw_value) != len(cipher_value):
raise ValueError('O valor cifrado {} tem um tamanho diferente d ovalor plano {}'.format(len(cipher_value), len(raw_value)))
_file.seek(-len(raw_value), 1)
_file.write(cipher_value)
raw_value = _file.read(block_size) |
# Generated by Django 3.1.2 on 2020-12-16 13:16
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('myapp', '0019_auto_20201216_1305'),
]
operations = [
migrations.AddField(
model_name='suggestionmodel',
name='likes',
field=models.ManyToManyField(related_name='z', to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='LikeModel',
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 28 18:01:13 2021
@author: matisse
"""
import board, pieces, ia
# Renvoie un objet de déplacement basé sur l'entrée de l'utilisateur. Il ne vérifie pas si le déplacement est valide.
def get_utilisateur_move():
tour_str = input("Votre tour: ")
tour_str = tour_str.replace(" ", "")
try:
xfrom = lettre_to_xpos(tour_str[0:1])
yfrom = 8 - int(tour_str[1:2]) # Le tableau est dessiné "à l'envers", donc inversez la coordonnée y.
xto = lettre_to_xpos(tour_str[2:3])
yto = 8 - int(tour_str[3:4]) # Le tableau est dessiné "à l'envers", donc inversez la coordonnée y.
return ia.Move(xfrom, yfrom, xto, yto, False)
except ValueError:
print("Format Invalide")
return get_utilisateur_move()
# Renvoie un mouvement valide basé sur la saisie de l'utilisateur.
def get_valid_utilisateur_move(board):
while True:
move = get_utilisateur_move()
valid = False
possible_moves = board.get_possible_moves(pieces.Piece.BLANC)
# aucun mouvement possible
if (not possible_moves):
return 0
for possible_move in possible_moves:
if (move.equals(possible_move)):
move.castling_move = possible_move.castling_move
valid = True
break
if (valid):
break
else:
print("Mouvement Invalide")
return move
# Convertit une lettre (A-H) en position x sur l'échiquier.
def lettre_to_xpos(lettre):
lettre = lettre.upper()
if lettre == 'A':
return 0
if lettre == 'B':
return 1
if lettre == 'C':
return 2
if lettre == 'D':
return 3
if lettre == 'E':
return 4
if lettre == 'F':
return 5
if lettre == 'G':
return 6
if lettre == 'H':
return 7
raise ValueError("Lettre Invalide.")
#
# Point d'entrée.
#
board = board.Board.new()
print(board.to_string())
while True:
move = get_valid_utilisateur_move(board)
if (move == 0):
if (board.is_check(pieces.Piece.BLANC)):
print("Checkmate. Les noirs ont gagnés.")
break
else:
print("Pat.")
break
board.perform_move(move)
print("Tour utilisateur: " + move.to_string())
print(board.to_string())
IA_move = ia.IA.get_ia_move(board, [])
if (IA_move == 0):
if (board.is_check(pieces.Piece.NOIR)):
print("Checkmate.Les blancs ont gagnés.")
break
else:
print("Pat.")
break
board.perform_move(IA_move)
print("Tour IA: " + IA_move.to_string())
print(board.to_string()) |
import sys
from TeproAlgo import TeproAlgo
class TeproDTO(object):
"""This class will encapsulate all data that is
sent back and forth among NLP apps that belong
to the TEPROLIN platform."""
def __init__(self, text: str, conf: dict):
# The original text to be preprocessed
self._text = text
# The sentence splitter will store each
# sentence in this list, as a str
self._sentences = []
# This is a list of lists of TeproTok(s) with
# all available information
self._tokenized = []
# The set of all performed operations on
# this DTO object
self._performedOps = set()
# This is the configuration dict
# that comes from Teprolin
self._opsConf = conf
# Number of processed tokens in
# this DTO
self._proctoks = 0
def getProcessedTokens(self):
return self._proctoks
def getConfiguredAlgoForOper(self, oper: str):
if oper in self._opsConf:
return self._opsConf[oper]
return None
def addPerformedOp(self, op: str):
if op in TeproAlgo.getAvailableOperations():
self._performedOps.add(op)
else:
raise RuntimeError("Operation '" + op +
"' is not a valid TeproAlgo operation!")
def isOpPerformed(self, op: str) -> bool:
if op in TeproAlgo.getAvailableOperations():
return op in self._performedOps
else:
raise RuntimeError("Operation '" + op +
"' is not a valid TeproAlgo operation!")
def setText(self, text: str):
self._text = text
def getText(self) -> str:
return self._text
def getNumberOfSentences(self) -> int:
return len(self._sentences)
def getSentenceString(self, i: int):
"""Get the i-th sentence."""
if i >= 0 and i < len(self._sentences):
return self._sentences[i]
return None
def getSentenceTokens(self, i: int):
"""Get the i-th sentence as a list of TeproTok(s)."""
if i >= 0 and i < len(self._tokenized):
return self._tokenized[i]
return None
def addSentenceTokens(self, tokens: list):
"""Adds a new list of TeproTok(s) to the internal
list of tokenized sentences."""
self._tokenized.append(tokens)
self._proctoks += len(tokens)
def addSentenceString(self, sentence: str):
"""Adds a str sentence to the list of internal
list of sentences."""
self._sentences.append(sentence)
def dumpConllX(self, outfile=sys.stdout):
"""Prints the CoNLL-X format in outfile,
for the current DTO."""
for ts in self._tokenized:
for tt in ts:
print(tt.getConllXRecord(), file=outfile)
print(file=outfile, flush=True)
def jsonDict(self):
"""Returns the dict representation of this DTO
for JSON encoding."""
return {
'text': self._text,
'sentences': self._sentences,
'tokenized': self._tokenized
}
def alignSentences(self, fromSent: list, sid: int):
if sid < len(self._tokenized):
toSent = self._tokenized[sid]
# Indexes into fromSent
i = 0
# Indexes into toSent
j = 0
alignment = []
while i < len(fromSent) and j < len(toSent):
fromTok = fromSent[i]
toTok = toSent[j]
if fromTok == toTok:
# Sentences are in sync
alignment.append((i, j))
# And advance one position
i += 1
j += 1
else:
oi = i
oj = j
aFound = False
for i in range(oi, oi + 10):
if i >= len(fromSent):
break
fromTok = fromSent[i]
for j in range(oj, oj + 10):
if j >= len(toSent):
break
toTok = toSent[j]
if fromTok == toTok:
# Add all sources indexes which do
# not match with all target indexes which
# do not match.
for ii in range(oi, i):
for jj in range(oj, j):
alignment.append((ii, jj))
# Sentences are in sync
alignment.append((i, j))
# And advance one position
i += 1
j += 1
aFound = True
break
# end for y
if aFound:
break
# end for x
if not aFound:
return None
# end else (alignment out of sync)
# end while
return alignment
else:
return None
def copyTokenAnnotation(self, fromSent: list, sid: int, align: list, oper: str):
"""Copy the annotation corresponding to oper from fromSent into
the sentence with sid in self._tokenized.
Use the align list to map from fromSent into sentence with sid in self._tokenized."""
if align is None:
return
if sid < len(self._tokenized):
toSent = self._tokenized[sid]
for (i, j) in align:
fromTok = fromSent[i]
toTok = toSent[j]
toTok.copyFrom(fromTok, align, oper)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.