content
stringlengths 5
1.05M
|
|---|
import math
import numpy as np
import tributary.streaming as ts
rng = range(-10, 11)
def foo_range():
for _ in rng:
yield (_, 1)
pos_rng = range(1, 11)
def foo_pos():
for _ in pos_rng:
yield (_, 1)
neg_rng = range(-10, 0)
def foo_neg():
for _ in neg_rng:
yield (_, 1)
zero_one_rng = np.arange(0, 1, 0.05) # [0,1)
def foo_zero_one():
for _ in zero_one_rng:
yield (_, 0.05)
class TestDualOps:
def test_Noop(self):
"""
No-op
"""
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Noop(t)
assert ts.run(out) == list(foo_range())
def test_Negate(self):
"""
f = -x
f' = -1
"""
expected_pos = [(-1 * x, -1) for x in pos_rng]
expected_neg = [(-1 * x, -1) for x in neg_rng]
t_pos = ts.Timer(foo_pos, count=len(pos_rng), use_dual=True)
t_neg = ts.Timer(foo_neg, count=len(neg_rng), use_dual=True)
out_pos = ts.Negate(t_pos)
out_neg = ts.Negate(t_neg)
assert ts.run(out_pos) == expected_pos
assert ts.run(out_neg) == expected_neg
def test_Invert(self):
"""
f = 1/x
f' = -x^-2
"""
expected_pos = [(1 / x, -1 * x ** (-2)) for x in pos_rng]
expected_neg = [(1 / x, -1 * x ** (-2)) for x in neg_rng]
t_pos = ts.Timer(foo_pos, count=len(pos_rng), use_dual=True)
t_neg = ts.Timer(foo_neg, count=len(neg_rng), use_dual=True)
out_pos = ts.Invert(t_pos)
out_neg = ts.Invert(t_neg)
assert ts.run(out_pos) == expected_pos
assert ts.run(out_neg) == expected_neg
def test_Add(self):
"""
f = x+x
f' = 2
"""
expected = [(x + x, 2) for x in rng]
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Add(t, t)
assert ts.run(out) == expected
def test_Sub(self):
"""
f = x-x
f' = 0
"""
expected = [(x - x, 0) for x in rng]
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Sub(t, t)
assert ts.run(out) == expected
def test_Mult(self):
"""
f = x*x
f' = 2x
"""
expected = [(x * x, 2 * x) for x in rng]
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Mult(t, t)
assert ts.run(out) == expected
def test_Div(self):
"""
f = x/x
f' = 0
"""
expected = [(1, 0) for x in pos_rng]
t = ts.Timer(foo_pos, count=len(pos_rng), use_dual=True)
out = ts.Div(t, t)
assert ts.run(out) == expected
def test_RDiv(self):
"""
f = x/x
f' = 0
"""
expected = [(1, 0) for x in pos_rng]
t = ts.Timer(foo_pos, count=len(pos_rng), use_dual=True)
out = ts.RDiv(t, t)
assert ts.run(out) == expected
def test_Pow(self):
"""
f = x^2
f' = 2x
"""
expected = [(x ** 2, 2 * x) for x in rng]
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Pow(t, 2)
assert ts.run(out) == expected
def test_Sum(self):
"""
f = x+x+2
f' = 2
"""
expected = [(x + x + 2, 2) for x in rng]
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
t2 = ts.Timer(foo_range, count=len(rng), use_dual=True)
c = ts.Const((2, 0), use_dual=True)
out = ts.Sum(t, t2, c)
assert ts.run(out) == expected
def test_Average(self):
"""
f = (x + x + 1)/3
f' = 2/3
"""
expected = [((x + x + 1) / 3, 2 / 3) for x in rng]
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
t2 = ts.Timer(foo_range, count=len(rng), use_dual=True)
c = ts.Const((1, 0), use_dual=True)
out = ts.Average(t, t2, c)
assert ts.run(out) == expected
def test_Not(self):
t = ts.Timer(foo_range, count=2, use_dual=True)
out = ts.Not(t)
assert ts.run(out) == [False, False]
def test_And(self):
t = ts.Timer(foo_range, count=2, use_dual=True)
out = ts.And(t, t)
assert ts.run(out) == [(-10, 1), (-9, 1)]
def test_Or(self):
t = ts.Timer(foo_range, count=2, use_dual=True)
out = ts.Or(t, t)
assert ts.run(out) == [(-10, 1), (-9, 1)]
def test_Equal(self):
t = ts.Timer(foo_range, count=2, use_dual=True)
out = ts.Equal(t, t)
assert ts.run(out) == [True, True]
def test_NotEqual(self):
t = ts.Timer(foo_range, count=2, use_dual=True)
c = ts.Const((-10, 1), use_dual=True)
out = ts.NotEqual(t, c)
assert ts.run(out) == [False, True]
def test_Lt(self):
t = ts.Timer(foo_range, count=2, use_dual=True)
c = ts.Const((1, 1), use_dual=True)
out = ts.Lt(c, t)
assert ts.run(out) == [False, False]
def test_Le(self):
t = ts.Timer(foo_range, count=2, use_dual=True)
c = ts.Const((-9, 1), use_dual=True)
out = ts.Le(c, t)
assert ts.run(out) == [False, True]
def test_Gt(self):
t = ts.Timer(foo_range, count=2, use_dual=True)
c = ts.Const((-9, 1), use_dual=True)
out = ts.Gt(t, c)
assert ts.run(out) == [False, False]
def test_Ge(self):
t = ts.Timer(foo_range, count=2, use_dual=True)
c = ts.Const((-9, 1), use_dual=True)
out = ts.Ge(t, c)
assert ts.run(out) == [False, True]
def test_Log(self):
"""
f = ln(x)
f' = 1/x
"""
expected = [(math.log(x), 1 / x) for x in pos_rng]
t = ts.Timer(foo_pos, count=len(pos_rng), use_dual=True)
print(t)
out = ts.Log(t)
assert ts.run(out) == expected
def test_Sin(self):
"""
f = sin(x)
f' = cos(x)
"""
expected = [(math.sin(x), math.cos(x)) for x in rng]
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Sin(t)
assert ts.run(out) == expected
def test_Cos(self):
"""
f = cos(x)
f' = -sin(x)
"""
expected = [(math.cos(x), -1 * math.sin(x)) for x in rng]
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Cos(t)
assert ts.run(out) == expected
def test_Tan(self):
"""
f = tan(x)
f' = (1/cos(x))^2
"""
expected = [(math.tan(x), (1 / math.cos(x)) ** 2) for x in rng]
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Tan(t)
assert ts.run(out) == expected
def test_Arcsin(self):
"""
f = arcsin(x)
f' = 1/sqrt(1-x^2)
"""
expected = [(math.asin(x), 0.05 / math.sqrt(1 - x ** 2)) for x in zero_one_rng]
t = ts.Timer(foo_zero_one, count=len(zero_one_rng), use_dual=True)
out = ts.Arcsin(t)
assert ts.run(out) == expected
def test_Arccos(self):
"""
f = arccos(x)
f' = -1/sqrt(1-x^2)
"""
expected = [(math.acos(x), -0.05 / math.sqrt(1 - x ** 2)) for x in zero_one_rng]
t = ts.Timer(foo_zero_one, count=len(zero_one_rng), use_dual=True)
out = ts.Arccos(t)
assert ts.run(out) == expected
def test_Arctan(self):
"""
f = arctan(x)
f' = 1/(1+x^2)
"""
expected = [(math.atan(x), 0.05 / (1 + x ** 2)) for x in zero_one_rng]
t = ts.Timer(foo_zero_one, count=len(zero_one_rng), use_dual=True)
out = ts.Arctan(t)
assert ts.run(out) == expected
def test_Sqrt(self):
"""
f = sqrt(x)
f' = 0.5/sqrt(x)
"""
expected = [(math.sqrt(x), 0.5 / math.sqrt(x)) for x in pos_rng]
t = ts.Timer(foo_pos, count=len(pos_rng), use_dual=True)
out = ts.Sqrt(t)
assert ts.run(out) == expected
def test_Abs(self):
"""
f = abs(x)
f' = x/abs(x)
"""
expected = [(abs(x), x / abs(x)) for x in neg_rng]
t = ts.Timer(foo_neg, count=len(neg_rng), use_dual=True)
out = ts.Abs(t)
assert ts.run(out) == expected
def test_Exp(self):
"""
f = exp(x)
f' = exp(x)
"""
expected = [(math.exp(x), math.exp(x)) for x in rng]
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Exp(t)
assert ts.run(out) == expected
def test_Erf(self):
"""
f = erf(x)
f' = (2/sqrt(pi))*e^(-x^2)
"""
expected = [
(math.erf(x), (2 / math.sqrt(math.pi)) * math.exp(-(x ** 2))) for x in rng
]
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Erf(t)
assert ts.run(out) == expected
def test_Floor(self):
expected = [(math.floor(x), math.floor(0.05)) for x in zero_one_rng]
t = ts.Timer(foo_zero_one, count=len(zero_one_rng), use_dual=True)
out = ts.Floor(t)
assert ts.run(out) == expected
def test_Ceil(self):
expected = [(math.ceil(x), math.ceil(0.05)) for x in zero_one_rng]
t = ts.Timer(foo_zero_one, count=len(zero_one_rng), use_dual=True)
out = ts.Ceil(t)
assert ts.run(out) == expected
def test_Round(self):
expected = [(round(x, ndigits=1), round(0.05, ndigits=1)) for x in zero_one_rng]
t = ts.Timer(foo_zero_one, count=len(zero_one_rng), use_dual=True)
out = ts.Round(t, 1)
assert ts.run(out) == expected
def test_Int(self):
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Int(t)
assert ts.run(out) == list(rng)
def test_Float(self):
t = ts.Timer(foo_range, count=len(rng), use_dual=True)
out = ts.Float(t)
assert ts.run(out) == [float(x) for x in rng]
def test_Bool(self):
t = ts.Timer(foo_range, count=2, use_dual=True)
out = ts.Bool(t)
assert ts.run(out) == [True, True]
def test_Str(self):
t = ts.Timer(foo_range, count=1, use_dual=True)
out = ts.Str(t)
assert ts.run(out) == ["-10+1ε"]
def test_Len(self):
t = ts.Timer(foo_range, count=1, use_dual=True)
out = ts.Len(t)
assert ts.run(out) == [2]
|
import sys
import logging
import re
from typing import Text, Dict, Any
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from py2neo import Graph
from markdownify import markdownify as md
logger = logging.getLogger(__name__)
p = 'data/medical/lookup/Diseases.txt'
disease_names = [i.strip() for i in open(p, 'r', encoding='UTF-8').readlines()]
# default neo4j account should be user="neo4j", password="neo4j"
try:
graph = Graph(host="127.0.0.1", http_port=7474, user="neo4j", password="myneo")
except Exception as e:
logger.error('Neo4j connection error: {}, check your Neo4j'.format(e))
sys.exit(-1)
else:
logger.debug('Neo4j Database connected successfully.')
def retrieve_disease_name(name):
names = []
name = '.*' + '.*'.join(list(name)) + '.*'
pattern = re.compile(name)
for i in disease_names:
candidate = pattern.search(i)
if candidate:
names.append(candidate.group())
return names
def make_button(title, payload):
return {'title': title, 'payload': payload}
class ActionEcho(Action):
def name(self) -> Text:
return "action_echo"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
user_say = "You said: " + tracker.latest_message['text']
dispatcher.utter_message(user_say)
return []
class ActionFirst(Action):
def name(self) -> Text:
return "action_first"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
# dispatcher.utter_template("utter_first", tracker)
# print('ActionFirst'*10)
dispatcher.utter_message(template="utter_first")
# dispatcher.utter_template("utter_howcanhelp", tracker)
# print('dispatcher.utter_message')
dispatcher.utter_message(md("您可以这样向我提问: <br/>头痛怎么办<br/>\
什么人容易头痛<br/>\
头痛吃什么药<br/>\
头痛能治吗<br/>\
头痛属于什么科<br/>\
头孢地尼分散片用途<br/>\
如何防止头痛<br/>\
头痛要治多久<br/>\
糖尿病有什么并发症<br/>\
糖尿病有什么症状"))
return []
class ActionDonKnow(Action):
def name(self) -> Text:
return "action_donknow"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
# dispatcher.utter_template("utter_donknow", tracker)
dispatcher.utter_message(template="utter_donknow")
# dispatcher.utter_template("utter_howcanhelp", tracker)
dispatcher.utter_message(md("您可以这样向我提问: <br/>头痛怎么办<br/>\
什么人容易头痛<br/>\
头痛吃什么药<br/>\
头痛能治吗<br/>\
头痛属于什么科<br/>\
头孢地尼分散片用途<br/>\
如何防止头痛<br/>\
头痛要治多久<br/>\
糖尿病有什么并发症<br/>\
糖尿病有什么症状"))
return []
class ActionSearchTreat(Action):
def name(self) -> Text:
return "action_search_treat"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
disease = tracker.get_slot("disease")
pre_disease = tracker.get_slot("sure")
print("pre_disease::::" + str(pre_disease))
possible_diseases = retrieve_disease_name(disease)
# if len(possible_diseases) == 1 or sure == "true":
if disease == pre_disease or len(possible_diseases) == 1:
a = graph.run("match (a:Disease{name: {disease}}) return a", disease=disease).data()[0]['a']
if "intro" in a:
intro = a['intro']
template = "{0}的简介:{1}"
retmsg = template.format(disease, intro)
else:
retmsg = disease + "暂无简介"
dispatcher.utter_message(retmsg)
if "treat" in a:
treat = a['treat']
template = "{0}的治疗方式有:{1}"
retmsg = template.format(disease, "、".join(treat))
else:
retmsg = disease + "暂无常见治疗方式"
dispatcher.utter_message(retmsg)
elif len(possible_diseases) > 1:
buttons = []
for d in possible_diseases:
buttons.append(make_button(d, '/search_treat{{"disease":"{0}", "sure":"{1}"}}'.format(d, d)))
dispatcher.utter_button_message("请点击选择想查询的疾病,若没有想要的,请忽略此消息", buttons)
else:
dispatcher.utter_message("知识库中暂无与 {0} 疾病相关的记录".format(disease))
return []
class ActionSearchFood(Action):
def name(self) -> Text:
return "action_search_food"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
disease = tracker.get_slot("disease")
pre_disease = tracker.get_slot("sure")
print("pre_disease::::" + str(pre_disease))
possible_diseases = retrieve_disease_name(disease)
""" search_food db action here """
food = dict()
if disease == pre_disease or len(possible_diseases) == 1:
m = [x['m.name'] for x in graph.run("match (a:Disease{name: {disease}})-[:can_eat]->(m:Food) return m.name",
disease=disease).data()]
food['can_eat'] = "、".join(m) if m else "暂无记录"
m = [x['m.name'] for x in graph.run("match (a:Disease{name: {disease}})-[:not_eat]->(m:Food) return m.name",
disease=disease).data()]
food['not_eat'] = "、".join(m) if m else "暂无记录"
retmsg = "在患 {0} 期间,可以食用:{1},\n但不推荐食用:{2}".\
format(disease, food['can_eat'], food['not_eat'])
dispatcher.utter_message(retmsg)
elif len(possible_diseases) > 1:
buttons = []
for d in possible_diseases:
buttons.append(make_button(d, '/search_food{{"disease":"{0}", "sure":"{1}"}}'.format(d, d)))
dispatcher.utter_button_message("请点击选择想查询的疾病,若没有想要的,请忽略此消息", buttons)
else:
dispatcher.utter_message("知识库中暂无与 {0} 相关的饮食记录".format(disease))
return []
class ActionSearchSymptom(Action):
def name(self) -> Text:
return "action_search_symptom"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
disease = tracker.get_slot("disease")
pre_disease = tracker.get_slot("sure")
print("pre_disease::::" + str(pre_disease))
possible_diseases = retrieve_disease_name(disease)
if disease == pre_disease or len(possible_diseases) == 1:
a = [x['s.name'] for x in graph.run("MATCH (p:Disease{name: {disease}})-[r:has_symptom]->\
(s:Symptom) RETURN s.name", disease=disease).data()]
template = "{0}的症状可能有:{1}"
retmsg = template.format(disease, "、".join(a))
dispatcher.utter_message(retmsg)
elif len(possible_diseases) > 1:
buttons = []
for d in possible_diseases:
buttons.append(make_button(d, '/search_symptom{{"disease":"{0}", "sure":"{1}"}}'.format(d, d)))
dispatcher.utter_button_message("请点击选择想查询的疾病,若没有想要的,请忽略此消息", buttons)
else:
dispatcher.utter_message("知识库中暂无与 {0} 相关的症状记录".format(disease))
return []
class ActionSearchCause(Action):
def name(self) -> Text:
return "action_search_cause"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
disease = tracker.get_slot("disease")
pre_disease = tracker.get_slot("sure")
print("pre_disease::::" + str(pre_disease))
possible_diseases = retrieve_disease_name(disease)
if disease == pre_disease or len(possible_diseases) == 1:
a = graph.run("match (a:Disease{name: {disease}}) return a.cause", disease=disease).data()[0]['a.cause']
if "treat" in a:
treat = a['treat']
template = "{0}的治疗方式有:{1}"
retmsg = template.format(disease, "、".join(treat))
else:
retmsg = disease + "暂无该疾病的病因的记录"
dispatcher.utter_message(retmsg)
elif len(possible_diseases) > 1:
buttons = []
for d in possible_diseases:
buttons.append(make_button(d, '/search_cause{{"disease":"{0}", "sure":"{1}"}}'.format(d, d)))
dispatcher.utter_button_message("请点击选择想查询的疾病,若没有想要的,请忽略此消息", buttons)
else:
dispatcher.utter_message("知识库中暂无与 {0} 相关的原因记录".format(disease))
return []
class ActionSearchNeopathy(Action):
def name(self) -> Text:
return "action_search_neopathy"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
disease = tracker.get_slot("disease")
pre_disease = tracker.get_slot("sure")
print("pre_disease::::" + str(pre_disease))
possible_diseases = retrieve_disease_name(disease)
if disease == pre_disease or len(possible_diseases) == 1:
a = [x['s.name'] for x in graph.run("MATCH (p:Disease{name: {disease}})-[r:has_neopathy]->\
(s:Disease) RETURN s.name", disease=disease).data()]
template = "{0}的并发症可能有:{1}"
retmsg = template.format(disease, "、".join(a))
dispatcher.utter_message(retmsg)
elif len(possible_diseases) > 1:
buttons = []
for d in possible_diseases:
buttons.append(make_button(d, '/search_neopathy{{"disease":"{0}", "sure":"{1}"}}'.format(d, d)))
dispatcher.utter_button_message("请点击选择想查询的疾病,若没有想要的,请忽略此消息", buttons)
else:
dispatcher.utter_message("知识库中暂无与 {0} 相关的并发症记录".format(disease))
return []
class ActionSearchDrug(Action):
def name(self) -> Text:
return "action_search_drug"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
disease = tracker.get_slot("disease")
pre_disease = tracker.get_slot("sure")
print("pre_disease::::" + str(pre_disease))
possible_diseases = retrieve_disease_name(disease)
if disease == pre_disease or len(possible_diseases) == 1:
a = [x['s.name'] for x in graph.run("MATCH (p:Disease{name: {disease}})-[r:can_use_drug]->\
(s:Drug) RETURN s.name", disease=disease).data()]
if a:
template = "在患 {0} 时,可能会用药:{1}"
retmsg = template.format(disease, "、".join(a))
else:
retmsg = "无 %s 的可能用药记录" % disease
dispatcher.utter_message(retmsg)
elif len(possible_diseases) > 1:
buttons = []
for d in possible_diseases:
buttons.append(make_button(d, '/search_drug{{"disease":"{0}", "sure":"{1}"}}'.format(d, d)))
dispatcher.utter_button_message("请点击选择想查询的疾病,若没有想要的,请忽略此消息", buttons)
else:
dispatcher.utter_message("知识库中暂无与 {0} 相关的用药记录".format(disease))
return []
class ActionSearchPrevention(Action):
def name(self) -> Text:
return "action_search_prevention"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
disease = tracker.get_slot("disease")
pre_disease = tracker.get_slot("sure")
print("pre_disease::::" + str(pre_disease))
possible_diseases = retrieve_disease_name(disease)
if disease == pre_disease or len(possible_diseases) == 1:
a = graph.run("match (a:Disease{name: {disease}}) return a.prevent", disease=disease).data()[0]
if 'a.prevent' in a:
prevent = a['a.prevent']
template = "以下是有关预防 {0} 的知识:{1}"
retmsg = template.format(disease, md(prevent.replace('\n', '<br/>')))
else:
retmsg = disease + "暂无常见预防方法"
dispatcher.utter_message(retmsg)
elif len(possible_diseases) > 1:
buttons = []
for d in possible_diseases:
buttons.append(make_button(d, '/search_prevention{{"disease":"{0}", "sure":"{1}"}}'.format(d, d)))
dispatcher.utter_button_message("请点击选择想查询的疾病,若没有想要的,请忽略此消息", buttons)
else:
dispatcher.utter_message("知识库中暂无与 {0} 相关的预防记录".format(disease))
return []
class ActionSearchDrugFunc(Action):
def name(self) -> Text:
return "action_search_drug_func"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
drug = tracker.get_slot("drug")
if drug:
a = [x['n.name'] for x in graph.run("match (n:Disease)-[:can_use_drug]->(a:Drug{name: {drug}})"
"return n.name", drug=drug).data()]
template = "{0} 可用于治疗疾病:{1}"
retmsg = template.format(drug, "、".join(a))
else:
retmsg = drug + " 在疾病库中暂无可治疗的疾病"
dispatcher.utter_message(retmsg)
return []
class ActionSearchDiseaseTreatTime(Action):
def name(self) -> Text:
return "action_search_disease_treat_time" # treat_period
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
disease = tracker.get_slot("disease")
pre_disease = tracker.get_slot("sure")
print("pre_disease::::" + str(pre_disease))
possible_diseases = retrieve_disease_name(disease)
if disease == pre_disease or len(possible_diseases) == 1:
a = graph.run("match (a:Disease{name: {disease}}) return a", disease=disease).data()[0]['a']
if "treat_period" in a:
treat_period = a['treat_period']
template = "{0}需要的治疗时间:{1}"
retmsg = template.format(disease, treat_period)
else:
retmsg = disease + "暂无治疗时间的记录"
dispatcher.utter_message(retmsg)
elif len(possible_diseases) > 1:
buttons = []
for d in possible_diseases:
buttons.append(make_button(d, '/search_disease_treat_time{{"disease":"{0}", "sure":"{1}"}}'.format(d, d)))
dispatcher.utter_button_message("请点击选择想查询的疾病,若没有想要的,请忽略此消息", buttons)
else:
dispatcher.utter_message("知识库中暂无与 {0} 相关的治疗时间记录".format(disease))
return []
class ActionSearchEasyGet(Action):
def name(self) -> Text:
return "action_search_easy_get"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
disease = tracker.get_slot("disease")
pre_disease = tracker.get_slot("sure")
print("pre_disease::::" + str(pre_disease))
possible_diseases = retrieve_disease_name(disease)
if disease == pre_disease or len(possible_diseases) == 1:
a = graph.run("match (a:Disease{name: {disease}}) return a", disease=disease).data()[0]['a']
easy_get = a['easy_get']
template = "{0}的易感人群是:{1}"
retmsg = template.format(disease, easy_get)
dispatcher.utter_message(retmsg)
elif len(possible_diseases) > 1:
buttons = []
for d in possible_diseases:
buttons.append(make_button(d, '/search_easy_get{{"disease":"{0}", "sure":"{1}"}}'.format(d, d)))
dispatcher.utter_button_message("请点击选择想查询的疾病,若没有想要的,请忽略此消息", buttons)
else:
dispatcher.utter_message("知识库中暂无与 {0} 相关的易感人群记录".format(disease))
return []
class ActionSearchDiseaseDept(Action):
def name(self) -> Text:
return "action_search_disease_dept"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]):
disease = tracker.get_slot("disease")
pre_disease = tracker.get_slot("sure")
print("pre_disease::::" + str(pre_disease))
possible_diseases = retrieve_disease_name(disease)
if disease == pre_disease or len(possible_diseases) == 1:
a = graph.run("match (a:Disease{name: {disease}})-[:belongs_to]->(s:Department) return s.name",
disease=disease).data()[0]['s.name']
template = "{0} 属于 {1}"
retmsg = template.format(disease, a)
dispatcher.utter_message(retmsg)
elif len(possible_diseases) > 1:
buttons = []
for d in possible_diseases:
buttons.append(make_button(d, '/search_disease_dept{{"disease":"{0}", "sure":"{1}"}}'.format(d, d)))
dispatcher.utter_button_message("请点击选择想查询的疾病,若没有想要的,请忽略此消息", buttons)
else:
dispatcher.utter_message("知识库中暂无与 {0} 疾病相关的科室记录".format(disease))
return []
|
#!/usr/bin/env python3
# import sys
import dpkt
import os
from pyfiglet import Figlet
import time
#Simple ASCII Art Banner Display
def banner_message(message):
if message == "start":
f = Figlet(font='slant')
return(f.renderText("PCAParser"))
#Live Capture function
#Live Capture was the trickiest for a while, as you have the option upon install of Wireshark to allow non-sudo priviledged users generate live captures. This is a mistake if you do
#as the old addage goes, deny by default. This is also the first instance in the code that asks about display/capture filters, one of the biggest headaces of this code executing fluidly.
def live_capture(param):
option = input_check("Did you want to scan the live capture for anything specific? If so, please refer to our Github for DISPLAY FILTER SYNTAX. Type Y or N " , "Invalid input: Expected Y or N" , validate_list, ["Y", "y", "N", "N"])
if option in ["Y", "y"]:
search = input("What display filter option(s): ")
#Tuples are immutable, so need to double convert to add the search filter
param_list = list(param)
param_list.append(search)
new_param = tuple(param_list)
print(new_param)
#Simple .format rendered os.system call function. Need the * as we are injectioned a tuple into the returned string.
return(os.system("tshark -i {} -w {}.pcapng {} -f {} ".format(*new_param)))
else:
return(os.system("tshark -i {} -w {}.pcapng {} ".format(*param)))
#PCAP > CSV convertion function
#More filter options lay here, also these are considered display filters for formatting purposes.
#We just included as many as possible in the CSV file. Of course this can be edited in your final version.
def convert(param2):
return(os.system ('tshark -r {} -T fields -E header=y -E separator=, -E quote=d -E occurrence=f -e ip.version -e ip.hdr_len -e ip.tos -e ip.id -e ip.flags -e ip.flags.rb -e ip.flags.df -e ip.flags.mf -e ip.frag_offset -e ip.ttl -e ip.proto -e ip.checksum -e ip.src -e ip.dst -e ip.len -e ip.dsfield -e tcp.srcport -e tcp.dstport -e tcp.seq -e tcp.ack -e tcp.len -e tcp.hdr_len -e tcp.flags -e tcp.flags.fin -e tcp.flags.syn -e tcp.flags.reset -e tcp.flags.push -e tcp.flags.ack -e tcp.flags.urg -e tcp.flags.cwr -e tcp.window_size -e tcp.checksum -e tcp.urgent_pointer -e tcp.options.mss_val > {}.csv'.format(*param2)))
#Seeker function
#The length and formatting of placing each potential filter option would be nearly impossible, so we stripped it way way down.
#I may have to create my own helper guide in the near future to help anyone in the future try and master Wireshark
def seeker(filename):
print("So, this is a bit advanced, so we don't have all 100 or so Display\
Filters ready show, so we have displayed only options as follows:\
\nip.<addr/dst/src> (for searching for a specific IP Address)\nipv6.\
<addr/dst/src> (ipv6 IP Address)\n<udp/tcp>.<port/dst/src> (Searching for TCP/UPD port numbers\n\
So if you want a better break down of how to use Search Filter options, please refer to our additional documentation\
available on our github.")
search = str(input("With that out of the way, please enter your desired Display Filter search:"))
destination = "/tmp/" + str(input("We are going to put this into your tmp folder, please enter file name:")) + ".pcapng"
return(os.system('tshark -r {} -Y "{}" -w {}'.format(filename, search, destination)))
#Very simple counter function to display upon entering any PCAP
def counters(filename):
counter=0
ipcounter=0
tcpcounter=0
udpcounter=0
#such a simple stupid method for checking pcap vs pcapng formats, couldn't find a way to check file type in a clean manner.
if filename[-2:] == "ng":
x = dpkt.pcapng.Reader(open(filename, 'rb'))
else:
x = dpkt.pcap.Reader(open(filename, 'rb'))
for ts, pkt in x:
counter+=1
eth=dpkt.ethernet.Ethernet(pkt)
if eth.type!=dpkt.ethernet.ETH_TYPE_IP:
continue
ip=eth.data
ipcounter+=1
if ip.p==dpkt.ip.IP_PROTO_TCP:
tcpcounter+=1
if ip.p==dpkt.ip.IP_PROTO_UDP:
udpcounter+=1
return ("Rough amount of data in your current PCAP:\n\
Total number of packets in the pcap file: {}\n\
Total number of ip packets: {}\n\
Total number of tcp packets: {}\n\
Total number of udp packets: {}").format(counter,ipcounter,tcpcounter,udpcounter)
#Exporting was very straight forward
def export(param4):
return(os.system("tshark -r {} --export-objects {},{}".format(*param4)))
#This input checker is thanks to Eddie Qi and Thaddeus Pearson
#https://github.com/thaddeuspearson/Supersploit
def input_check(usr_prompt, error, is_valid, valid_list):
user_input = input(usr_prompt)
while not is_valid(user_input, valid_list):
print(error)
user_input = input(usr_prompt)
return user_input
# input_check helper function.
def validate_list(item, lst):
if item not in lst:
return False
return True
# input_check helper function.
def validate_number(item, lst):
if item.isdigit() and int(item) <= len(lst):
return True
return False
#Main Function
def main():
#Print ASCII Text
print(banner_message("start"))
print("Welcome to PCAParse, your handy dandy swiss army knife to help you easily parse PCAP files.")
print("Are you working on a live capture or existing PCAP?")
#Call to Live, or continue to Existing
choice = input_check("Type L for live Capture OR E for Existing PCAP ", "Invalid input. Expected L or M." , validate_list, ["E", "e", "L", "l"])
if choice in ["L", "l"]:
print("Live Capture may need to be ran as sudo/root user. Check with Wireshark Admin if unsure, or wait for program to crash.")
#Sleeper function to delay code to allow user to read prior prompt
time.sleep(2)
#Displays network interfaces on current computer
os.system("tshark -D")
cap_int = str(input("Which interface are you looking to scan? (name or number)" ))
out_file = "/tmp/" + str(input("Where do you want this file saved?" ))
#Since you don't have to be sudo to run this portion, we create all created new files in the /tmp directory
print("Your file will be saved as " + out_file)
print("Are you looking to save a particular number of packets or a timed capture?")
choices = input_check("Type c for Count or a for Time: ", "Invalid input. Expected c or a." , validate_list, ["a", "A", "c", "C"])
if choices in ["a", "A"]:
option = int(input("How long do you want to scan for (in seconds): "))
#Have to make sure syntax is current in the prior os.system functions
packet_count ="-a duration:%d" % (option)
else:
packet_count = "-c " + (input("How many packets do you want captured: "))
param = cap_int, out_file, packet_count
print(live_capture(param))
elif choice in ["E", "e"]:
#Simple way to display pcaps in the current working directory
os.system("ls -hl *pcap*")
#Didn't have time to build in a tab complete function into the code, maybe later
filename = input("Enter your filename: ")
#Call to the counter function
print(counters(filename))
print("Are you looking to convert this PCAP to a CSV?")
choice2 = input_check("Y or N? ", "Invalid input, Expected Y or N." , validate_list, ["yes", "Yes", "Y", "y", "No", "no", "n", "N"])
if choice2 in ["yes", "Yes", "Y", "y"]:
dst_file = "/tmp/" + str(input("Enter your destination file here: "))
print("Your file will be saved as" + dst_file+".csv")
param2 = filename, dst_file
print(convert(param2))
elif choice2 in ["No", "no", "n", "N"]:
print("So are we looking to Export[X] or Search[S] within the PCAP?")
choice3 = input_check("S or X? ", "Invalid input, Expected S or X." , validate_list, ["S", "s", "X", "x"])
if choice3 in ["x", "X"]:
dst_dir = "/tmp/" +str(input("Enter your destination directory: "))
print("Your directory will be saved as" + dst_dir)
#JPG files will be exported with the HTTP data dump
print("What file type are you looking for? Enter only 1:\n dicom \n http \n imf \n smb \n tftp")
exports = str(input("Enter export object type: "))
param4 = filename, exports, dst_dir
print(export(param4))
print("You can find your destination folder here: {}".format(dst_dir))
elif choice3 in ["S", "s"]:
#Call to seeker function, which is the second most complex portion
print(seeker(filename))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : memory.py
# @Author: zixiao
# @Date : 2019-04-07
# @Desc :
import numpy as np
class Memory:
def __init__(self, size, w, h, frame_len):
self.size = size
self.index = 0
self.count = 0
self.num_in_memory = 0
self.frame_len = frame_len
self.obs = np.zeros((size, w, h), dtype=np.uint8)
self.actions = np.zeros((size,), dtype=np.uint8)
self.rewards = np.zeros((size,), dtype=np.float32)
self.obs_shape = [w, h]
self.w = w
self.h = h
def store_transition(self, action, reward, obs_):
index = int((self.index + 1) % self.size)
self.actions[self.index] = action
self.rewards[self.index] = reward
self.obs[index] = obs_
self.index = index
self.count += 1
self.num_in_memory = min(self.size, self.count)
def get_memory(self, batch_size):
nums = np.random.choice(self.num_in_memory, size=batch_size)
obs_batch = np.zeros((batch_size, self.frame_len, self.w, self.h))
obs_batch_ = np.zeros((batch_size, self.frame_len, self.w, self.h))
for i in range(len(nums)):
obs_start = nums[i] - self.frame_len + 1
obs_end = nums[i]
if obs_start < 0:
obs_start += self.num_in_memory
obs_batch[i] = np.concatenate((self.obs[obs_start:self.num_in_memory ], self.obs[0:obs_end + 1]))
else:
obs_batch[i] = self.obs[obs_start:obs_end + 1]
obs_start_ = nums[i]
obs_end_ = nums[i] + self.frame_len - 1
if obs_end_ >=self.num_in_memory:
obs_end_ -= self.num_in_memory
obs_batch_[i] = np.concatenate((self.obs[obs_start_:self.num_in_memory ], self.obs[0:obs_end_ + 1]))
else:
obs_batch_[i] = self.obs[obs_start_:obs_end_ + 1]
action_batch = self.actions[nums]
reward_batch = self.rewards[nums]
return obs_batch, action_batch, reward_batch, obs_batch_
def get_last_frame(self):
start = self.index - self.frame_len + 1
end = self.index
if start < 0:
start += self.num_in_memory
obs_frame = np.concatenate((self.obs[start:self.num_in_memory + 1], self.obs[0:end + 1]))
else:
obs_frame = self.obs[start:end + 1]
return obs_frame
def store_obs(self, obs):
self.obs[self.index] = obs
|
#!/usr/bin/env python2.7
import json
import jwt
import os
import random
import re
import select
import subprocess
import socket
import time
import threading
import traceback
import zmq
import requests
import six.moves.queue
from datetime import datetime, timedelta
from functools import partial
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import create_connection, WebSocketTimeoutException, ABNF
from selfdrive.loggerd.config import ROOT
import selfdrive.crash as crash
import selfdrive.messaging as messaging
from common.api import Api
from common.params import Params
from selfdrive.services import service_list
from selfdrive.swaglog import cloudlog
from selfdrive.version import version, dirty
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = os.getenv('HANDLER_THREADS', 4)
LOCAL_PORT_WHITELIST = set([8022])
dispatcher["echo"] = lambda s: s
payload_queue = six.moves.queue.Queue()
response_queue = six.moves.queue.Queue()
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event)),
threading.Thread(target=ws_send, args=(ws, end_event))
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,))
for x in xrange(HANDLER_THREADS)
]
map(lambda thread: thread.start(), threads)
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for i, thread in enumerate(threads):
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = payload_queue.get(timeout=1)
response = JSONRPCResponseManager.handle(data, dispatcher)
response_queue.put_nowait(response)
except six.moves.queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
traceback.print_exc()
response_queue.put_nowait(json.dumps({"error": str(e)}))
# security: user should be able to request any message from their car
# TODO: add service to, for example, start visiond and take a picture
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service_list[service].port)
socket.setsockopt(zmq.RCVTIMEO, timeout)
ret = messaging.recv_one(socket)
return ret.to_dict()
@dispatcher.add_method
def listDataDirectory():
files = [os.path.relpath(os.path.join(dp, f), ROOT) for dp, dn, fn in os.walk(ROOT) for f in fn]
return files
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
with open(os.path.join(ROOT, fn), "rb") as f:
ret = requests.put(url, data=f, headers=headers, timeout=10)
return ret.status_code
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
cloudlog.event("athena startLocalProxy", remote_ws_uri=remote_ws_uri, local_port=local_port)
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
params = Params()
dongle_id = params.get("DongleId")
private_key = open("/persist/comma/id_rsa").read()
identity_token = jwt.encode({'identity':dongle_id, 'exp': datetime.utcnow() + timedelta(hours=1)}, private_key, algorithm='RS256')
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
map(lambda thread: thread.start(), threads)
return {"success": 1}
except Exception as e:
traceback.print_exc()
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile('/persist/comma/id_rsa.pub'):
return None
with open('/persist/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
with open('/system/comma/home/.ssh/authorized_keys', 'r') as f:
return f.read()
@dispatcher.add_method
def getSimInfo():
sim_state = subprocess.check_output(['getprop', 'gsm.sim.state']).strip().split(',')
network_type = subprocess.check_output(['getprop', 'gsm.network.type']).strip().split(',')
mcc_mnc = subprocess.check_output(['getprop', 'gsm.sim.operator.numeric']).strip() or None
sim_id_aidl_out = subprocess.check_output(['service', 'call', 'iphonesubinfo', '11'])
sim_id_aidl_lines = sim_id_aidl_out.split('\n')
if len(sim_id_aidl_lines) > 3:
sim_id_lines = sim_id_aidl_lines[1:4]
sim_id_fragments = [re.search(r"'([0-9\.]+)'", line).group(1) for line in sim_id_lines]
sim_id = reduce(lambda frag1, frag2: frag1.replace('.', '') + frag2.replace('.', ''), sim_id_fragments)
else:
sim_id = None
return {
'sim_id': sim_id,
'mcc_mnc': mcc_mnc,
'network_type': network_type,
'sim_state': sim_state
}
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
traceback.print_exc()
break
ssock.close()
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
traceback.print_exc()
end_event.set()
def ws_recv(ws, end_event):
while not end_event.is_set():
try:
data = ws.recv()
payload_queue.put_nowait(data)
except WebSocketTimeoutException:
pass
except Exception:
traceback.print_exc()
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
response = response_queue.get(timeout=1)
ws.send(response.json)
except six.moves.queue.Empty:
pass
except Exception:
traceback.print_exc()
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main(gctx=None):
params = Params()
dongle_id = params.get("DongleId")
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
crash.install()
private_key = open("/persist/comma/id_rsa").read()
api = Api(dongle_id, private_key)
conn_retries = 0
while 1:
try:
print("connecting to %s" % ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True)
ws.settimeout(1)
conn_retries = 0
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except Exception:
conn_retries += 1
traceback.print_exc()
time.sleep(backoff(conn_retries))
params.delete("AthenadPid")
if __name__ == "__main__":
main()
|
def init():
return {
"kafka_metrics_topic": "telemetry.metrics",
"kafka_job_queue": "{}.analytics.job_queue"
}
|
import torch.optim as optim
def sgd_optimizer(model, learning_rate, momentum, l2_factor=0.0):
"""Create optimizer.
Args:
model: Model instance.
learning_rate: Learning rate for the optimizer.
momentum: Momentum of optimizer.
l2_factor: Factor for L2 regularization.
Returns:
SGD optimizer.
"""
return optim.SGD(
model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=l2_factor
)
|
import py_compile
import sys
import csv
import os.path as _p
import pathlib
if len(sys.argv) == 2:
# directory mode
dir = sys.argv[1]
with open(_p.join(dir, '.py_compile')) as fin, open(_p.join(dir,'.py_compile.done'), 'w') as fout:
wrt = csv.writer(fout, doublequote=False, escapechar='\\')
for in_path, out_path in csv.reader(fin, doublequote=False, escapechar='\\'):
dout_path = _p.join(dir, out_path)
subdir = _p.dirname(dout_path)
pathlib.Path(subdir).mkdir(parents=True, exist_ok=True)
py_compile.compile(in_path, dout_path)
wrt.writerow((out_path,))
elif len(sys.argv) == 3:
# file compile mode
dir = _p.dirname(sys.argv[2])
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
py_compile.compile(sys.argv[1], sys.argv[2])
|
from . import TestCase
class TestGetSet(TestCase):
def test_get(self):
self.assertEqual(None, self.ssdb.get('None'))
def test_set(self):
self.assertEqual(1, self.ssdb.set(b'set', b'set'))
self.assertEqual(1, self.ssdb.set(b'set', b'set'))
self.assertEqual(b'set', self.ssdb.get('set'))
|
from django.shortcuts import render
from rest_framework import generics, permissions
from .models import Employee
from .serializers import EmployeeSerializer
from django.views.generic.base import TemplateResponseMixin
# Create your views here.
class EmployeeListView(generics.ListCreateAPIView):
'''
This generates a list view of all employee entities
'''
# permission_classes = (permissions.IsAuthenticated,)
queryset = Employee.objects.all()
serializer_class = EmployeeSerializer
class EmployeeDetailView(generics.RetrieveUpdateDestroyAPIView):
'''
This generates a detailed view of specified employee entity using the unique employee ID generated.
Employee ID usually starts with 'E' and has 5 digits generated sequentially. E.g. 'E00005'
'''
# permission_classes = (permissions.IsAuthenticated,)
queryset = Employee.objects.all()
serializer_class = EmployeeSerializer
lookup_field = 'employee_id'
|
from random import randint
from time import sleep
def turn(player_lives: int, player_name: str, sentence: str, sentence_out: str) -> list: # [state_of_choice, reformed_sentence, lives_to_remove]
found = False
letters = "qwertzuiopasdfghjklyxcvbnm"
counter = 0
if player_lives > 0:
letter = input(f"{player_name}, please enter a letter: ")
if len(letter) == 1 and letter in letters:
for i in range(len(sentence)):
if sentence.lower()[i] == letter:
found = True
sentence_out = sentence_out[:i] + letter + sentence_out[i+1:]
counter += 1
elif len(letter) > 1:
if letter.lower() == sentence.lower():
return ["correct_sentence", sentence_out, 0]
else:
return ["incorrect", sentence_out, 1]
else:
return ["incorrect", sentence_out, 1]
if found:
return ["correct", sentence_out, counter] # return[2] -> Num of letter appearences
else:
return ["incorrect", sentence_out, 1]
def count(sentence: str) -> int:
counter = 0
for i in sentence:
if i == '*':
counter += 1
return counter
def game(player_1: str, player_2: str) -> None:
pl_1_lives = 3
pl_2_lives = 3
sentences = [
"Politicians in croatia are dumb",
"I can not believe you are acctually playing this",
"Bruh just do something useful",
"Assume penguin is a cylindrical object",
"Chess is a great game that improves your brain functions",
"People are living in a simulation",
"People are acctually very similar to artificial intelligence",
"Writing this without API makes me wanna die",
"Can not wait to find help with creating sentences"
] #TODO -> Try taking data from some API instead making your own sentences
print (f"Hello, today {player_1} will be playing hangman against {player_2}.\n")
sleep(5)
print(f"1. Every single one of you will have a choice to pick a letter until you find the sentence .\n")
sleep(5)
print(f"2. Every correct letter gives you letter appearence points and every correct sentence gives you\n that points that are left in a sentence(exact number of letters left)\n")
sleep(7)
print("If you enter more than one letter it is considered as an sentence input (until I upgrade code)")
print("\nSTARTING NOW, GLHF!\n")
sleep(5)
letters = "qwertzuiopasdfghjklyxcvbnm"
player_1_points = 0
player_2_points = 0
while len(sentences) > 0:
sentence = sentences[randint(0, len(sentences) - 1)]
sentence_out = ""
for i in sentence.lower():
if i in letters:
sentence_out += "*"
else:
sentence_out += " "
while "*" in sentence_out:
pl_1_lives_curr = 1
pl_2_lives_curr = 1
print("\nSentence: " + sentence_out)
results1 = turn(pl_1_lives_curr, player_1, sentence, sentence_out)
if results1[0] == "correct_sentence":
print(f"\nGreat job! That is correct sentence!\n {str(count(results1[1]))} points for {player_1}!")
player_1_points += count(results1[1])
sentences.remove(sentence)
break
elif results1[0] == "correct":
print(f"\nGreat job! That is correct letter!\nOne point for {player_1}!")
player_1_points += results1[2]
sentence_out = results1[1]
else:
print(f"\nINCORRECT!\nOne point taken from {player_1}!")
player_1_points -= 1
print("\nSentence: " + sentence_out)
results2 = turn(pl_2_lives_curr, player_2, sentence, sentence_out)
if results2[0] == "correct_sentence":
print(f"\nGreat job! That is correct sentence!\n {str(count(results2[1]))} points for {player_2}!")
player_2_points += count(results2[1])
sentences.remove(sentence)
break
elif results2[0] == "correct":
print(f"\nGreat job! That is correct letter!\nOne point for {player_2}!")
player_2_points += results2[2]
sentence_out = results2[1]
else:
print(f"\nINCORRECT!\nOne point taken from {player_2}!")
player_2_points -= 1
print(f"\nAND FOR FINAL CHECKING OF POINTS WINNER IS ...\n")
sleep(9)
if player_1_points == player_2_points:
print(f"NOBODY, WOW, THAT IS SO RARE!!!\nBOTH OF YOU HAD WON {player_1_points}.\n")
sleep(2)
elif player_1_points > player_2_points:
print(f"{player_1} with {player_1_points}!!!\n {player_2} had {player_2_points}.\n")
sleep(2)
else :
print(f"{player_2} with {player_2_points}!!!\n {player_1} had {player_1_points}.\n")
sleep(2)
def main():
player_1 = input("Enter name of player 1: ")
print()
if len(player_1) == 0:
print("I'll call you than Mr_X")
player_1 = "Mr_X"
player_2 = input("Enter name of player 2: ")
print()
if len(player_2) == 0:
print("I'll call you than Mr_Y")
player_2 = "Mr_Y"
game(player_1, player_2)
if __name__ == '__main__':
main()
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from dataclasses import dataclass
from typing import Optional
from neptune_load.sigv4_signer.sigv4_signer import SigV4Signer
from pipeline_control.adapters.neptune_loader.neptune_loader_configuration import (
NeptuneBulkloaderConfiguration,
)
from pipeline_control.service_layer.handlers import util
from .neptune_loader import NeptuneLoader
@dataclass
class NeptuneLoaderFactory:
neptune_load_configuration: Optional[NeptuneBulkloaderConfiguration]
signer: SigV4Signer
def make_read_only_loader(
self,
neptune_load_id: str,
override_neptune_load_configuration: Optional[
NeptuneBulkloaderConfiguration
] = None,
):
neptune_configuration = self.resolve_neptune_configuration(
override_neptune_load_configuration
)
if not neptune_configuration.source:
neptune_configuration.source = "s3://loader/only/data"
loader = NeptuneLoader(
signer=self.signer,
neptune_load_configuration=neptune_configuration,
)
loader.bulk_loader = neptune_load_id
return loader
def make_loader(
self,
override_neptune_load_configuration: Optional[
NeptuneBulkloaderConfiguration
] = None,
):
neptune_configuration = self.resolve_neptune_configuration(
override_neptune_load_configuration
)
return NeptuneLoader(
neptune_load_configuration=neptune_configuration, signer=self.signer
)
def resolve_neptune_configuration(self, override) -> NeptuneBulkloaderConfiguration:
neptune_configuration = util.neptune_config_from_app_config()
neptune_configuration = neptune_configuration & self.neptune_load_configuration
neptune_configuration = neptune_configuration & override
return neptune_configuration
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
from python import invoker
if len(sys.argv) != 2:
raise ValueError('Please specify exactly one [root] directory.')
root_dir = sys.argv[1]
output_path = os.path.join(root_dir, 'polyglot_snippet_data.json')
json_array = invoker.get_json_for_dir(root_dir)
with open(output_path, 'w') as file:
json.dump(json_array, file)
print(f'JSON written to: {output_path}')
print('Do not move this file!')
|
from django.shortcuts import render
from django.template.loader import render_to_string
from django.http import HttpResponse,JsonResponse
from .models import *
from django.views.generic import ListView
from my_utils.decorator import ajax_login_requird
from django.views.decorators.http import require_http_methods
from segmentfault.apps.notice.views import post_notice
# Create your views here.
class CircleList(ListView):
'''
动态列表
'''
model = CircleMessage
paginate_by = 5
template_name = 'circle/circle_list.html'
context_object_name = 'circle_list'
ordering = ('like',)
def get_queryset(self,**kwargs):
return CircleMessage.objects.filter(is_comment=False,is_delete=False).select_related('user','parent')
def get_context_data(self, *, object_list=None, **kwargs):
kwargs['testob'] = CircleMessage.objects.all().first()
return super().get_context_data(**kwargs)
@ajax_login_requird
@require_http_methods(["POST"])
def new_circle(request):
'''
发布动态接口
:param request:
:return:
'''
import uuid
ob = CircleMessage.objects.create(uuid=uuid.uuid4(), user=request.user, context=request.POST.get('message').strip())
return render(request, 'circle/circle_single.html', {"circle": ob})
@ajax_login_requird
@require_http_methods(["POST"])
def change_like(request):
'''
点赞取消赞接口
:param request:
:return:
'''
uuid = request.POST.get("uuid", '')
ob = CircleMessage.objects.filter(uuid=uuid).first()
if ob:
res = ob.change_like(request.user)
return JsonResponse({"status": 2000, "message": True, 'like_num': ob.get_like_num()})
else:
return JsonResponse({"status": 5000, "message": False})
@ajax_login_requird
@require_http_methods(["POST"])
def delete_circle(request):
'''
删除动态或评论接口
:param request:
:return:
'''
uuid = request.POST.get("uuid", '')
ob = CircleMessage.objects.filter(uuid=uuid).first()
ob.is_delete = True
ob.save()
return JsonResponse({'status': 2000, 'message': True})
@require_http_methods(["POST"])
def get_reply(request):
'''
评论列表接口
:param request:
:return:
'''
uuid = request.POST.get('uuid','')
ob = CircleMessage.objects.filter(uuid=uuid).first()
reply_num = ob.get_comment_num()
reply_query = ob.get_comment(10)
context = {
'reply_list': reply_query,
'reply_num':reply_num,
'uuid':ob.uuid,
}
html_text = render_to_string("circle/circle_reply.html",context)
return JsonResponse({"status":2000,"html_text":html_text})
@ajax_login_requird
@require_http_methods(["POST"])
def reply(request):
'''
发表回复接口
:param request:
:return:
'''
uuid = request.POST.get("uuid",'')
text = request.POST.get('text','').strip()
parent = CircleMessage.objects.filter(uuid=uuid).first()
ob = CircleMessage.objects.create(user=request.user,context=text,parent=parent,is_comment=True)
ob.save()
post_notice(request.user,parent.user,'R',parent) # 发送websocket通知
return JsonResponse({"status":2000})
def test(request):
from .tasks import app1
a = app1.delay()
return HttpResponse('ok')
|
import argparse
import sys
from pathlib import Path
from hairy_hashes.hashes import rm_duplicate_hashes
def main() -> None:
"""Remove files with duplicate SHA256 hashes."""
try:
parser = argparse.ArgumentParser(
description="Remove files with duplicate SHA256 hashes.",
formatter_class=argparse.MetavarTypeHelpFormatter,
)
parser.add_argument(
"file",
type=Path,
nargs="+",
help="file to compare",
)
parser.add_argument(
"-d",
"--date",
action="store_true",
help="choose remaining copy by most recently modified "
"(default: choose by filename)",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
help="reverse selection process",
)
args = parser.parse_args()
rm_duplicate_hashes(args.file, args.date, args.reverse)
except Exception as e:
print(str(e))
sys.exit(2)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Danila?
# - Paul Nilsson, paul.nilsson@cern.ch, 2019
import re
import logging
import json
import numbers
import traceback
import threading
from pilot.util.auxiliary import is_python3
log = logging.getLogger(__name__)
def camel_to_snake(name):
"""
Changes CamelCase to snake_case, used by python.
:param name: name to change
:return: name in snake_case
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def snake_to_camel(snake_str):
"""
Changes snake_case to firstLowCamelCase, used by server.
:param snake_str: name to change
:return: name in camelCase
"""
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + "".join(x.title() for x in components[1:])
def split(val, separator=",", min_len=0, fill_last=False):
"""
Splits comma separated values and parses them.
:param val: values to split
:param separator: comma or whatever
:param min_len: minimum needed length of array, array is filled up to this value
:param fill_last: Flag stating the array filler, if min_value is greater then extracted array length.
If true, array is filled with last value, else, with Nones.
:return: parsed array
"""
if val is None:
return [None for _ in range(min_len)]
v_arr = val.split(separator)
for i, v in enumerate(v_arr):
v_arr[i] = parse_value(v)
if min_len > len(v_arr):
filler = None if not fill_last or len(v_arr) < 1 else v_arr[0]
v_arr.extend([filler for _ in range(min_len - len(v_arr))])
return v_arr
def get_nulls(val):
"""
Converts every "NULL" string to python's None.
:param val: string or whatever
:return: val or None if val is "NULL"
"""
return val if val != "NULL" else None
def is_float(val):
"""
Test floatliness of the string value.
:param val: string or whatever
:return: True if the value may be converted to Float
"""
try:
float(val)
return True
except ValueError:
return False
def is_int(val):
"""
Test int of the string value.
:param val: string or whatever
:return: True if the value may be converted to int
"""
try:
int(val)
return True
except ValueError:
return False
def is_long(s):
"""
Test value to be convertable to integer.
:param s: string or whatever
:return: True if the value may be converted to Long
"""
try:
if not isinstance(s, basestring): # Python 2
try:
long(s)
return True
except ValueError:
return False
except Exception:
return False # Python 3 - this function should not be used on Python 3
if s and s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit()
def parse_value(value):
"""
Tries to parse value as number or None. If some of this can be done, parsed value is returned. Otherwise returns
value unparsed.
:param value:
:return: mixed
"""
try:
if not isinstance(value, basestring): # Python 2
return value
except Exception:
if not isinstance(value, str): # Python 3
return value
if is_python3(): # Python 3
if is_int(value): # Python 3
return int(value)
else:
if is_long(value): # Python 2
return long(value)
if is_float(value):
return float(value)
return get_nulls(value)
def stringify_weird(arg):
"""
Converts None to "NULL"
:param arg:
:return: arg or "NULL"
"""
if arg is None:
return "NULL"
if isinstance(arg, numbers.Number):
return arg
return str(arg)
def join(arr):
"""
Joins arrays, converting contents to strings.
:param arr:
:return: joined array
"""
return ",".join(str(stringify_weird(x)) for x in arr)
def get_input_files(description):
"""
Extracts input files from the description.
:param description:
:return: file list
"""
log.info("Extracting input files from job description")
files = {}
if description['inFiles'] and description['inFiles'] != "NULL":
in_files = split(description["inFiles"])
length = len(in_files)
ddm_endpoint = split(description.get("ddmEndPointIn"), min_len=length)
destination_se = split(description.get("destinationSE"), min_len=length)
dispatch_dblock = split(description.get("dispatchDblock"), min_len=length)
dispatch_dblock_token = split(description.get("dispatchDBlockToken"), min_len=length)
datasets = split(description.get("realDatasetsIn"), min_len=length, fill_last=True)
dblocks = split(description.get("prodDBlocks"), min_len=length)
dblock_tokens = split(description.get("prodDBlockToken"), min_len=length)
size = split(description.get("fsize"), min_len=length)
c_sum = split(description.get("checksum"), min_len=length)
scope = split(description.get("scopeIn"), min_len=length, fill_last=True)
guids = split(description.get("GUID"), min_len=length, fill_last=True)
for i, f in enumerate(in_files):
if f is not None:
files[f] = {
"ddm_endpoint": ddm_endpoint[i],
"storage_element": destination_se[i],
"dispatch_dblock": dispatch_dblock[i],
"dispatch_dblock_token": dispatch_dblock_token[i],
"dataset": datasets[i],
"dblock": dblocks[i],
"dblock_token": dblock_tokens[i],
"size": size[i],
"checksum": c_sum[i],
'scope': scope[i],
"guid": guids[i]
}
return files
def fix_log(description, files):
"""
Fixes log file description in output files (changes GUID and scope).
:param description:
:param files: output files
:return: fixed output files
"""
log.info("modifying log-specific values in a log file description")
if description["logFile"] and description["logFile"] != "NULL":
if description["logGUID"] and description["logGUID"] != "NULL" and description["logFile"] in \
files:
files[description["logFile"]]["guid"] = description["logGUID"]
files[description["logFile"]]["scope"] = description["scopeLog"]
return files
def get_output_files(description):
"""
Extracts output files from the description.
:param description:
:return: output files
"""
log.info("Extracting output files in description")
files = {}
if description['outFiles'] and description['outFiles'] != "NULL":
out_files = split(description["outFiles"])
length = len(out_files)
ddm_endpoint = split(description.get("ddmEndPointOut"), min_len=length)
destination_se = split(description.get("fileDestinationSE"), min_len=length)
dblock_token = split(description.get("dispatchDBlockTokenForOut"), min_len=length)
dblock_tokens = split(description.get("prodDBlockTokenForOut"), min_len=length)
datasets = split(description.get("realDatasets"), min_len=length)
dblocks = split(description.get("destinationDblock"), min_len=length)
destination_dblock_token = split(description.get("destinationDBlockToken"), min_len=length)
scope = split(description.get("scopeOut"), min_len=length, fill_last=True)
for i, f in enumerate(out_files):
if f is not None:
files[f] = {
"ddm_endpoint": ddm_endpoint[i],
"storage_element": destination_se[i],
"dispatch_dblock_token": dblock_token[i],
"destination_dblock_token": destination_dblock_token[i],
"dblock_token": dblock_tokens[i],
"dataset": datasets[i],
"dblock": dblocks[i],
"scope": scope[i]
}
return fix_log(description, files)
def one_or_set(array):
if len(array) < 1:
return join(array)
zero = array[0]
for i in array:
if i != zero:
return join(array)
return stringify_weird(zero)
class JobDescription(object):
__holder = None
__key_aliases = {
'PandaID': 'jobid', # it is job id, not PanDA
'transformation': 'script', # making it more convenient
'jobPars': 'script_parameters', # -.-
'coreCount': 'number_of_cores',
'prodUserID': 'user_dn',
'prodSourceLabel': 'label', # We don't have any other labels in there. And this is The Label, or just label
'homepackage': 'home_package', # lowercase, all of a sudden, splitting words
"nSent": 'throttle', # as it's usage says
'minRamCount': 'minimum_ram', # reads better
'maxDiskCount': 'maximum_input_file_size',
'maxCpuCount': 'maximum_cpu_usage_time',
'attemptNr': 'attempt_number', # bad practice to strip words API needs to be readable
}
__key_back_aliases = {
'task_id': 'taskID', # all ID's are to be placed here, because snake case lacks of all-caps abbrev info
'jobset_id': 'jobsetID',
'job_definition_id': 'jobDefinitionID',
'status_code': 'StatusCode', # uppercase starting names also should be here
}
__soft_key_aliases = {
'id': 'jobid',
'command': 'script',
'command_parameters': 'script_parameters'
}
__input_file_keys = { # corresponding fields in input_files
'inFiles': '',
"ddmEndPointIn": 'ddm_endpoint',
"destinationSE": 'storage_element',
"dispatchDBlockToken": 'dispatch_dblock_token',
"realDatasetsIn": 'dataset',
"prodDBlocks": 'dblock',
"fsize": 'size',
"dispatchDblock": 'dispatch_dblock',
'prodDBlockToken': 'dblock_token',
"GUID": 'guid',
"checksum": 'checksum',
"scopeIn": 'scope'
}
__may_be_united = ['guid', 'scope', 'dataset'] # can be sent as one for all files, if is the same
__output_file_keys = { # corresponding fields in output_files
'outFiles': '',
'ddmEndPointOut': 'ddm_endpoint',
'fileDestinationSE': 'storage_element',
'dispatchDBlockTokenForOut': 'dispatch_dblock_token',
'prodDBlockTokenForOut': 'dblock_token',
'realDatasets': 'dataset',
'destinationDblock': 'dblock',
'destinationDBlockToken': 'destination_dblock_token',
'scopeOut': 'scope',
'logGUID': 'guid',
'scopeLog': 'scope'
}
__key_back_aliases_from_forward = None
__key_reverse_aliases = None
__key_aliases_snake = None
input_files = None
output_files = None
def __init__(self):
super(JobDescription, self).__init__()
self.__key_back_aliases_from_forward = self.__key_back_aliases.copy()
self.__key_reverse_aliases = {}
self.__key_aliases_snake = {}
self.input_files = {}
self.output_files = {}
for key in self.__key_aliases:
alias = self.__key_aliases[key]
self.__key_back_aliases_from_forward[alias] = key
self.__key_aliases_snake[camel_to_snake(key)] = alias
def get_input_file_prop(self, key):
corresponding_key = self.__input_file_keys[key]
ret = []
for f in self.input_files:
ret.append(f if corresponding_key == '' else self.input_files[f][corresponding_key])
if corresponding_key in self.__may_be_united:
return one_or_set(ret)
return join(ret)
def get_output_file_prop(self, key):
log_file = self.log_file
if key == 'logGUID':
return stringify_weird(self.output_files[log_file]['guid'])
if key == 'scopeLog':
return stringify_weird(self.output_files[log_file]['scope'])
corresponding_key = self.__output_file_keys[key]
ret = []
for f in self.output_files:
if key != 'scopeOut' or f != log_file:
ret.append(f if corresponding_key == '' else self.output_files[f][corresponding_key])
if corresponding_key in self.__may_be_united:
return one_or_set(ret)
return join(ret)
def load(self, new_desc):
try:
if isinstance(new_desc, basestring): # Python 2
new_desc = json.loads(new_desc)
except Exception:
if isinstance(new_desc, str): # Python 3
new_desc = json.loads(new_desc)
if "PandaID" in new_desc:
log.info("Parsing description to be of readable, easy to use format")
fixed = {}
self.input_files = get_input_files(new_desc)
self.output_files = get_output_files(new_desc)
for key in new_desc:
value = new_desc[key]
if key not in self.__input_file_keys and key not in self.__output_file_keys:
old_key = key
if key in self.__key_aliases:
key = self.__key_aliases[key]
else:
key = camel_to_snake(key)
if key != old_key:
self.__key_back_aliases_from_forward[key] = old_key
self.__key_reverse_aliases[old_key] = key
fixed[key] = parse_value(value)
new_desc = fixed
else:
self.input_files = new_desc['input_files']
self.output_files = new_desc['output_files']
self.__holder = new_desc
def to_json(self, decompose=False, **kwargs):
if decompose:
prep = {}
for k in self.__holder:
if k not in ['input_files', 'output_files']:
if k in self.__key_back_aliases_from_forward:
rev = self.__key_back_aliases_from_forward[k]
else:
rev = snake_to_camel(k)
prep[rev] = stringify_weird(self.__holder[k])
for k in self.__output_file_keys:
prep[k] = self.get_output_file_prop(k)
for k in self.__input_file_keys:
prep[k] = self.get_input_file_prop(k)
else:
prep = self.__holder.copy()
prep['input_files'] = self.input_files
prep['output_files'] = self.output_files
return json.dumps(prep, **kwargs)
def get_description_parameter(self, key):
if self.__holder is not None:
if key in self.__holder:
return self.__holder[key]
if key in self.__input_file_keys:
log.warning(("Old key JobDescription.%s is used. Better to use JobDescription.input_files[][%s] to"
" access and manipulate this value.\n" % (key, self.__input_file_keys[key])) +
self.get_traceback())
return self.get_input_file_prop(key)
if key in self.__output_file_keys:
log.warning(("Old key JobDescription.%s is used. Better to use JobDescription.output_files[][%s] to"
" access and manipulate this value.\n" % (key, self.__output_file_keys[key])) +
self.get_traceback())
return self.get_output_file_prop(key)
snake_key = camel_to_snake(key)
if snake_key in self.__key_aliases_snake:
log.warning(("Old key JobDescription.%s is used. Better to use JobDescription.%s to access and "
"manipulate this value.\n" % (key, self.__key_aliases_snake[snake_key])) +
self.get_traceback())
return stringify_weird(self.__holder[self.__key_aliases_snake[snake_key]])
if key in self.__soft_key_aliases:
return self.get_description_parameter(self.__soft_key_aliases[key])
raise AttributeError("Description parameter not found")
def set_description_parameter(self, key, value):
if self.__holder is not None:
if key in self.__holder:
self.__holder[key] = value
return True
if key in self.__input_file_keys:
err = "Key JobDescription.%s is read-only\n" % key
if key == 'inFiles':
err += "Use JobDescription.input_files to manipulate input files"
else:
err += "Use JobDescription.input_files[][%s] to set up this parameter in files description" %\
self.__input_file_keys[key]
raise AttributeError(err)
if key in self.__output_file_keys:
err = "Key JobDescription.%s is read-only\n" % key
if key == 'outFiles':
err += "Use JobDescription.output_files to manipulate output files"
else:
err += "Use JobDescription.output_files[][%s] to set up this parameter in files description" %\
self.__output_file_keys[key]
raise AttributeError(err)
snake_key = camel_to_snake(key)
if snake_key in self.__key_aliases_snake:
log.warning(("Old key JobDescription.%s is used. Better to use JobDescription.%s to access and"
"manipulate this value.\n" % (key, self.__key_aliases_snake[snake_key])) +
self.get_traceback())
self.__holder[self.__key_aliases_snake[snake_key]] = parse_value(value)
if key in self.__soft_key_aliases:
return self.set_description_parameter(self.__soft_key_aliases[key], value)
return False
def get_traceback(self):
tb = list(reversed(traceback.extract_stack()))
tb_str = '\n'
for ii in enumerate(tb):
if ii[0] < 3:
continue # we don't need inner scopes of this and subsequent calls
i = ii[1]
tb_str += '{file}:{line} (in {module}): {call}\n'.format(file=i[0],
line=i[1],
module=i[2],
call=i[3])
thread = threading.currentThread()
return 'Traceback: (latest call first)' + tb_str + 'Thread: %s(%d)' % (thread.getName(), thread.ident)
def __getattr__(self, key):
"""
Reflection of description values into Job instance properties if they are not shadowed.
If there is no own property with corresponding name, the value of Description is used.
Params and return described in __getattr__ interface.
"""
try:
return object.__getattribute__(self, key)
except AttributeError:
return self.get_description_parameter(key)
def __setattr__(self, key, value):
"""
Reflection of description values into Job instance properties if they are not shadowed.
If there is no own property with corresponding name, the value of Description is set.
Params and return described in __setattr__ interface.
"""
try:
object.__getattribute__(self, key)
return object.__setattr__(self, key, value)
except AttributeError:
if not self.set_description_parameter(key, value):
return object.__setattr__(self, key, value)
if __name__ == "__main__":
import sys
logging.basicConfig()
log.setLevel(logging.DEBUG)
jd = JobDescription()
with open(sys.argv[1], "r") as f:
contents = f.read()
jd.load(contents)
log.debug(jd.id)
log.debug(jd.command)
log.debug(jd.PandaID)
log.debug(jd.scopeOut)
log.debug(jd.scopeLog)
log.debug(jd.fileDestinationSE)
log.debug(jd.inFiles)
log.debug(json.dumps(jd.output_files, indent=4, sort_keys=True))
log.debug(jd.to_json(True, indent=4, sort_keys=True))
|
#!/usr/bin/env python
"""
Module :py:class:`DLDGraphics` for MCP DLD detectors for COLTRIMS experiments
===============================================================================
from psana.hexanode.DLDGraphics import DLDGraphics
kwargs = {'STAT_NHITS':True,...}
p = DLDProcessor()
s = DLDStatistics(p, **kwargs)
# event loop with statustics accumulation
draw_plots(s, **kwargs)
Created on 2019-11-20 by Mikhail Dubrovin
"""
#----------
USAGE = 'Run example: python .../psana/hexanode/examples/ex-....py'
#----------
import logging
logger = logging.getLogger(__name__)
import numpy as np
#import psana.pyalgos.generic.Utils as gu
from psana.pyalgos.generic.NDArrUtils import print_ndarr
from psana.pyalgos.generic.Graphics import hist1d, show, move_fig, save_fig, move, save, plotImageLarge, plotGraph
#----------
def plot_image(img,\
figsize=(11,10),\
axwin=(0.10, 0.08, 0.88, 0.88),\
cmap='inferno',\
title='x-y image',\
xlabel='x',\
ylabel='y',\
titwin=None,\
fnm='img.png',\
amp_range=None,\
img_range=None,\
origin='upper',\
hwin_x0y0=(10,10),\
prefix='plot',\
do_save=False) :
"""draws figure with image
"""
s = img.shape
_img_range = (0, s[1], s[0], 0) if img_range is None else img_range
imgnb = img[1:-2,1:-2]
_amp_range = (0, imgnb.mean() + 4*imgnb.std()) if amp_range is None else amp_range
#_amp_range = (0, 0.2*img.max())
axim = plotImageLarge(img, img_range=_img_range, amp_range=_amp_range, figsize=figsize,\
title=title, origin=origin, window=axwin, cmap=cmap)
axim.set_xlabel(xlabel, fontsize=18)
axim.set_ylabel(ylabel, fontsize=18)
axim.set_title(title, fontsize=12)
move(hwin_x0y0[0], hwin_x0y0[1])
save('%s-%s' % (prefix, fnm), do_save)
#show()
#----------
def h1d(hlst,\
bins=None,\
amp_range=None,\
weights=None,\
color=None,\
show_stat=True,\
log=False,\
figsize=(6,5),\
axwin=(0.15, 0.12, 0.78, 0.80),\
title='Title',\
xlabel='x',\
ylabel='y',\
titwin=None,\
fnm='hist.png',\
hwin_x0y0=(10,10),\
prefix='plot',\
do_save=False) :
"""draws figure with 1d- histogram
"""
fig, axhi, hi = hist1d(np.array(hlst), bins, amp_range, weights, color, show_stat,\
log, figsize, axwin, title, xlabel, ylabel, titwin)
move(hwin_x0y0[0], hwin_x0y0[1])
save('%s-%s' % (prefix, fnm), do_save)
return fig, axhi, hi
#----------
def plot_graph(x, y,\
figsize=(7,6),\
pfmt='r-',\
lw=2,\
xlimits=None,\
ylimits=None,\
title='py vs. px',\
xlabel='px',\
ylabel='py',\
fnm='graph.png',\
hwin_x0y0=(10,10),\
prefix='plot',\
do_save=False) :
"""draws figure with graph
"""
fig, ax = plotGraph(x, y, figsize=figsize, pfmt=pfmt, lw=lw)
ax.set_xlim(xlimits)
ax.set_ylim(ylimits)
ax.set_xlabel(xlabel, fontsize=18)
ax.set_ylabel(ylabel, fontsize=18)
ax.set_title(title, fontsize=12)
move(hwin_x0y0[0], hwin_x0y0[1])
save('%s-%s' % (prefix, fnm), do_save)
#----------
#class DLDGraphics :
# """ holds, fills, and provide access to statistical arrays for MCP DLD data processing
# """
# def __init__(self, stats, **kwargs) :
# self.stats = stats
# logger.info('In DLDGraphics, **kwargs: %s' % str(kwargs))
# def draw_histograms(self, prefix='plot', do_save=True, hwin_x0y0=(0,0)) :
# plot_histograms(self.stats, prefix, do_save, hwin_x0y0)
# show()
#----------
def draw_plots(sp, prefix='plot', do_save=True, hwin_x0y0=(0,400)) :
"""Plots/saves histograms
"""
#---------
if sp.STAT_NHITS :
#---------
nbins = 16
limits = (-0.5,15.5)
is_log = True
h1d(np.array(sp.lst_nhits_u1), bins=nbins, amp_range=limits, log=is_log,\
title ='Number of hits U1', xlabel='Number of hits U1', ylabel='Events',\
fnm='nhits_u1.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_nhits_u2), bins=nbins, amp_range=limits, log=is_log,\
title ='Number of hits U2', xlabel='Number of hits U2', ylabel='Events',\
fnm='nhits_u2.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_nhits_v1), bins=nbins, amp_range=limits, log=is_log,\
title ='Number of hits V1', xlabel='Number of hits V1', ylabel='Events',\
fnm='nhits_v1.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_nhits_v2), bins=nbins, amp_range=limits, log=is_log,\
title ='Number of hits V2', xlabel='Number of hits V2', ylabel='Events',\
fnm='nhits_v2.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_nhits_w1), bins=nbins, amp_range=limits, log=is_log,\
# title ='Number of hits W1', xlabel='Number of hits W1', ylabel='Events',\
# fnm='nhits_w1.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_nhits_w2), bins=nbins, amp_range=limits, log=is_log,\
# title ='Number of hits W2', xlabel='Number of hits W2', ylabel='Events',\
# fnm='nhits_w2.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_nhits_mcp), bins=nbins, amp_range=limits, log=is_log,\
title ='Number of hits MCP', xlabel='Number of hits MCP', ylabel='Events',\
fnm='nhits_mcp.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_nparts), bins=nbins, amp_range=limits, log=is_log,\
title ='Number of particles', xlabel='Number of particles', ylabel='Events',\
fnm='nparticles.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
if sp.STAT_TIME_CH :
#---------
nbins = 300
limits = (1000,4000)
#limits = (0,10000)
#print_ndarr(sp.lst_u1, 'U1')
h1d(np.array(sp.lst_u1), bins=nbins, amp_range=limits, log=True,\
title ='Time U1', xlabel='U1 (ns)', ylabel='Events',\
fnm='time_u1_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#print_ndarr(sp.lst_u2, 'U2')
h1d(np.array(sp.lst_u2), bins=nbins, amp_range=limits, log=True,\
title ='Time U2', xlabel='U2 (ns)', ylabel='Events',\
fnm='time_u2_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_v1), bins=nbins, amp_range=limits, log=True,\
title ='Time V1', xlabel='V1 (ns)', ylabel='Events',\
fnm='time_v1_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_v2), bins=nbins, amp_range=limits, log=True,\
title ='Time V2', xlabel='V2 (ns)', ylabel='Events',\
fnm='time_v2_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_w1), bins=nbins, amp_range=limits, log=True,\
# title ='Time W1', xlabel='W1 (ns)', ylabel='Events',\
# fnm='time_w1_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_w2), bins=nbins, amp_range=limits, log=True,\
# title ='Time W2', xlabel='W2 (ns)', ylabel='Events',\
# fnm='time_w2_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#print_ndarr(sp.lst_mcp, 'MCP')
h1d(np.array(sp.lst_mcp), bins=nbins, amp_range=limits, log=True,\
title ='Time MCP', xlabel='MCP (ns)', ylabel='Events',\
fnm='time_mcp_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
if sp.STAT_TIME_SUMS :
#---------
nbins = 200
limits = (0,200) # (50,180)
#nbins = 250
#limits = (0,5000)
#print_ndarr(sp.lst_time_sum_u, 'U')
h1d(np.array(sp.lst_time_sum_u), bins=nbins, amp_range=limits, log=True,\
title ='Time sum U', xlabel='Time sum U (ns)', ylabel='Events',\
fnm='time_sum_u_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#print_ndarr(sp.lst_time_sum_v, 'V')
h1d(np.array(sp.lst_time_sum_v), bins=nbins, amp_range=limits, log=True,\
title ='Time sum V', xlabel='Time sum V (ns)', ylabel='Events',\
fnm='time_sum_v_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#print_ndarr(sp.lst_time_sum_w, 'W')
#h1d(np.array(sp.lst_time_sum_w), bins=nbins, amp_range=limits, log=True,\
# title ='Time sum W', xlabel='Time sum W (ns)', ylabel='Events',\
# fnm='time_sum_w_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
if sp.STAT_TIME_SUMS :
#---------
nbins = 160
limits = (-80,80)
h1d(np.array(sp.lst_time_sum_u_corr), bins=nbins, amp_range=limits, log=True,\
title ='Time sum U corrected', xlabel='Time sum U (ns) corrected', ylabel='Events',\
fnm='time_sum_u_ns_corr.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_time_sum_v_corr), bins=nbins, amp_range=limits, log=True,\
title ='Time sum V corrected', xlabel='Time sum V (ns) corrected', ylabel='Events',\
fnm='time_sum_v_ns_corr.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_time_sum_w_corr), bins=nbins, amp_range=limits, log=True,\
# title ='Time sum W corrected', xlabel='Time sum W (ns) corrected', ylabel='Events',\
# fnm='time_sum_w_ns_corr.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
if sp.STAT_UVW :
#---------
nbins = 200
limits = (-100,100)
h1d(np.array(sp.lst_u), bins=nbins, amp_range=limits, log=True,\
title ='U (mm)', xlabel='U (mm)', ylabel='Events',\
fnm='u_mm.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_v), bins=nbins, amp_range=limits, log=True,\
title ='V (mm)', xlabel='V (mm)', ylabel='Events',\
fnm='v_mm.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_w), bins=nbins, amp_range=limits, log=True,\
# title ='W (mm)', xlabel='W (mm)', ylabel='Events',\
# fnm='w_mm.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
if sp.STAT_UVW :
#---------
nbins = 300
limits = (-150,150)
h1d(np.array(sp.lst_u_ns), bins=nbins, amp_range=limits, log=True,\
title ='U (ns)', xlabel='U (ns)', ylabel='Events',\
fnm='u_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_v_ns), bins=nbins, amp_range=limits, log=True,\
title ='V (ns)', xlabel='V (ns)', ylabel='Events',\
fnm='v_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_w_ns), bins=nbins, amp_range=limits, log=True,\
# title ='W (ns)', xlabel='W (ns)', ylabel='Events',\
# fnm='w_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
if sp.STAT_CORRELATIONS :
#---------
#print_ndarr(sp.lst_time_sum_u, 'time_sum_u')
#print_ndarr(sp.lst_u_ns, 'lst_u_ns ')
xlimits=(-100,100)
#ylimits=(20,120)
ylimits=(50,180)
plot_graph(sp.lst_u_ns, sp.lst_time_sum_u, figsize=(8,7), pfmt='b, ', lw=1, xlimits=xlimits, ylimits=ylimits,\
title='t sum vs. U', xlabel='U (ns)', ylabel='t sum U (ns)',\
fnm='t_sum_vs_u_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
plot_graph(sp.lst_v_ns, sp.lst_time_sum_v, figsize=(8,7), pfmt='b, ', lw=1, xlimits=xlimits, ylimits=ylimits,\
title='t sum vs. V', xlabel='V (ns)', ylabel='t sum V (ns)',\
fnm='t_sum_vs_v_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#plot_graph(sp.lst_w_ns, sp.lst_time_sum_w, figsize=(8,7), pfmt='b, ', lw=1, xlimits=xlimits, ylimits=ylimits,\
# title='t sum vs. W', xlabel='W (ns)', ylabel='t sum W (ns)',\
# fnm='t_sum_vs_w_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
xlimits=(-100,100)
ylimits=(-80,20)
#---------
plot_graph(sp.lst_u_ns, sp.lst_time_sum_u_corr, figsize=(8,7), pfmt='b, ', lw=1, xlimits=xlimits, ylimits=ylimits,\
title='t sum corrected vs. U', xlabel='U (ns)', ylabel='t sum corrected U (ns)',\
fnm='t_sum_corr_vs_u_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
plot_graph(sp.lst_v_ns, sp.lst_time_sum_v_corr, figsize=(8,7), pfmt='b, ', lw=1, xlimits=xlimits, ylimits=ylimits,\
title='t sum_corrected vs. V', xlabel='V (ns)', ylabel='t sum corrected V (ns)',\
fnm='t_sum_corr_vs_v_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#plot_graph(sp.lst_w_ns, sp.lst_time_sum_w_corr, figsize=(8,7), pfmt='b, ', lw=1, xlimits=xlimits, ylimits=ylimits,\
# title='t sum_corrected vs. W', xlabel='W (ns)', ylabel='t sum corrected W (ns)',\
# fnm='t_sum_corr_vs_w_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
if sp.STAT_XY_COMPONENTS :
#---------
nbins = 200
limits = (-50,50)
h1d(np.array(sp.lst_Xuv), bins=nbins, amp_range=limits, log=True,\
title ='Xuv', xlabel='Xuv (mm)', ylabel='Events',\
fnm='Xuv_mm.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_Xuw), bins=nbins, amp_range=limits, log=True,\
# title ='Xuw', xlabel='Xuw (mm)', ylabel='Events',\
# fnm='Xuw_mm.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_Xvw), bins=nbins, amp_range=limits, log=True,\
# title ='Xvw', xlabel='Xvw (mm)', ylabel='Events',\
# fnm='Xvw_mm.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_Yuv), bins=nbins, amp_range=limits, log=True,\
title ='Yuv', xlabel='Yuv (mm)', ylabel='Events',\
fnm='Yuv_mm.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_Yuw), bins=nbins, amp_range=limits, log=True,\
# title ='Yuw', xlabel='Yuw (mm)', ylabel='Events',\
# fnm='Yuw_mm.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_Yvw), bins=nbins, amp_range=limits, log=True,\
# title ='Yvw', xlabel='Yvw (mm)', ylabel='Events',\
# fnm='Yvw_mm.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
if sp.STAT_REFLECTIONS :
#---------
#nbins = 150
#limits = (-100, 5900)
nbins = 300
limits = (-500, 2500)
h1d(np.array(sp.lst_refl_u1), bins=nbins, amp_range=limits, log=True,\
title ='Reflection U1', xlabel='Reflection U1 (ns)', ylabel='Events',\
fnm='refl_u1_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_refl_u2), bins=nbins, amp_range=limits, log=True,\
title ='Reflection U2', xlabel='Reflection U2 (ns)', ylabel='Events',\
fnm='refl_u2_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_refl_v1), bins=nbins, amp_range=limits, log=True,\
title ='Reflection V1', xlabel='Reflection V1 (ns)', ylabel='Events',\
fnm='refl_v1_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_refl_v2), bins=nbins, amp_range=limits, log=True,\
title ='Reflection V2', xlabel='Reflection V2 (ns)', ylabel='Events',\
fnm='refl_v2_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_refl_w1), bins=nbins, amp_range=limits, log=True,\
# title ='Reflection W1', xlabel='Reflection W1 (ns)', ylabel='Events',\
# fnm='refl_w1_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#h1d(np.array(sp.lst_refl_w2), bins=nbins, amp_range=limits, log=True,\
# title ='Reflection W2', xlabel='Reflection W2 (ns)', ylabel='Events',\
# fnm='refl_w2_ns.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
if sp.STAT_MISC :
#---------
h1d(np.array(sp.list_dr), bins=160, amp_range=(0,40), log=True,\
title ='Deviation', xlabel='Deviation (mm)', ylabel='Events',\
fnm='deviation_mm.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_consist_indicator), bins=64, amp_range=(0,64), log=True,\
title ='Consistence indicator', xlabel='Consistence indicator (bit)', ylabel='Events',\
fnm='consistence_indicator.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
h1d(np.array(sp.lst_rec_method), bins=64, amp_range=(0,32), log=True,\
title ='Reconstruction method', xlabel='Method id (bit)', ylabel='Events',\
fnm='reconstruction_method.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
if sp.STAT_XY_2D :
#---------
amp_limits = (0,5)
imrange=(sp.img_x_bins.vmin(), sp.img_x_bins.vmax(), sp.img_y_bins.vmax(), sp.img_y_bins.vmin())
plot_image(sp.img_xy_uv, amp_range=amp_limits, img_range=imrange, fnm='xy_uv.png',\
title='XY_uv image', xlabel='x', ylabel='y', titwin='XY_uv image',\
hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#plot_image(sp.img_xy_uw, amp_range=amp_limits, img_range=imrange, fnm='xy_uw.png',\
# title='XY_uw image', xlabel='x', ylabel='y', titwin='XY_uw image',\
# hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#plot_image(sp.img_xy_vw, amp_range=amp_limits, img_range=imrange, fnm='xy_vw.png',\
# title='XY_vw image', xlabel='x', ylabel='y', titwin='XY_vw image',\
# hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
plot_image(sp.img_xy_1, amp_range=amp_limits, img_range=imrange, fnm='xy_1.png',\
title='XY image hit1', xlabel='x', ylabel='y', titwin='XY image hit1',\
hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
plot_image(sp.img_xy_2, amp_range=amp_limits, img_range=imrange, fnm='xy_2.png',\
title='XY image hit2', xlabel='x', ylabel='y', titwin='XY image hit2',\
hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#---------
if sp.STAT_PHYSICS :
#---------
#sp.t_ns_bins = HBins((1400., 2900.), t_ns_nbins, vtype=np.float32)
ht = sp.t_ns_bins
amp_limits = (0,5)
imrange=(ht.vmin(), ht.vmax(), ht.vmin(), ht.vmax())
plot_image(sp.ti_vs_tj, amp_range=amp_limits, img_range=imrange, fnm='ti_vs_tj.png',\
title='ti vs tj correlations', xlabel='tj (ns)', ylabel='ti (ns)', titwin='PIPICO', origin='lower',\
hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
limits = ht.vmin(), ht.vmax()
t_arr = np.array(sp.lst_t_all)
h1d(t_arr, bins=ht.nbins(), amp_range=limits, log=True,\
title ='time of all hits', xlabel='t_all (ns)', ylabel='Events',\
fnm='t_all.png', hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
t_all = ht.bin_count(t_arr)
sum_bkg = t_all.sum()
sum_cor = sp.ti_vs_tj.sum()
print('number of entries for 1-d ti (bkg):', sum_bkg)
print('number of entries for ti vs tj (cor):', sum_cor)
bkg = np.outer(t_all,t_all)/sum_bkg
print_ndarr(bkg, 'bkg:\n')
plot_image(bkg, amp_range=amp_limits, img_range=imrange, fnm='t_corr_bkg.png',\
title='ti vs tj background', xlabel='tj (ns)', ylabel='ti (ns)', titwin='PIPICO', origin='lower',\
hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
imrange=(ht.vmin(), ht.vmax(), sp.r_mm_bins.vmin(), sp.r_mm_bins.vmax())
plot_image(sp.rsy_vs_t, amp_range=amp_limits, img_range=imrange, fnm='rsy_vs_t.png',\
title='r*sign(y) vs t (All hits)', xlabel='t (ns)', ylabel='r*sign(y) (mm)', titwin='r vs t (All hits)',\
origin='lower', figsize=(12,5), axwin=(0.08, 0.10, 0.95, 0.84),\
hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#=========
#=========
show()
#=========
#=========
#---------
#if sp.STAT_XY_RESOLUTION :
#---------
# npa_binx = np.array(sp.lst_binx)
# npa_biny = np.array(sp.lst_biny)
# max_binx = npa_binx.max()
# max_biny = npa_biny.max()
# print('binx.min/max: %d %d' % (npa_binx.min(), max_binx))
# print('biny.min/max: %d %d' % (npa_biny.min(), max_biny))
# max_bins = max(max_binx, max_biny) + 1
# sp.img_xy_res = np.zeros((max_bins, max_bins), dtype=np.float64)
# sp.img_xy_sta = np.zeros((max_bins, max_bins), dtype=np.int32)
# sp.img_xy_res[npa_biny, npa_binx] += sp.lst_resol_fwhm # np.maximum(arr_max, nda)
# sp.img_xy_res[npa_biny, npa_binx] += 1
# sp.img_xy_res /= np.maximum(sp.img_xy_sta,1)
# plot_image(sp.img_xy_res, amp_range=None, img_range=(0,max_bins, 0,max_bins),\
# fnm='xy_res.png', title='Resolution FWHM (mm)', xlabel='x bins', ylabel='y bins', titwin='Resolution FWHM',\
# hwin_x0y0=hwin_x0y0, prefix=prefix, do_save=do_save)
#----------
|
def count_robots(a):
|
'''
Created on Aug 4, 2020
@author: willg
'''
import fnmatch
import json
import os
import re
from datetime import datetime
import humanize
from pathlib import Path
import shutil
import common
user_delimiter = "C,'6WeWq~w,S24!z;L+EM$vL{3M,HMKjy9U2dfH8F-'mwH'2@K.qaQGpg*!StX*:D7^&P;d4@AcWS3)8f64~6CB^B4{s`>9+*brV"
backup_folder = "../backups/"
meta = {
"command_count": {}
}
SLASH_TERMS_CONVERSIONS = {
"flag show": 'getflag',
"flag set": 'setflag',
"flag remove": 'setflag',
"update rt": 'rtupdate',
"update ct": 'ctupdate',
"setting prefix": 'setprefix',
"setting theme": 'defaulttheme',
"setting graph": 'defaultgraph',
"setting ignore_large_times": 'defaultlargetimes',
"blacklist user add": 'blacklistuser',
"blacklist user remove": 'blacklistuser',
"blacklist word add": 'blacklistword',
"blacklist word remove": 'removeblacklistword',
"sha add": 'addsha',
"sha remove": 'removesha',
"admin add": 'addadmin',
"admin remove": 'removeadmin'
}
def initialize():
global meta
if os.path.isfile(common.JSON_META_FILE):
with open(common.JSON_META_FILE) as f:
meta = json.load(f)
def save_metadata():
counts = meta["command_count"]
meta["command_count"] = {k:counts[k] for k in sorted(counts.keys(),reverse=True)}
with open(common.JSON_META_FILE,'w') as f:
json.dump(meta, f)
def log_command(command):
command = SLASH_TERMS_CONVERSIONS.get(command, command)
for name in dir(common.main):
if re.fullmatch("([A-Z]+_)*TERMS",name):
command_terms = common.main.__getattribute__(name)
if command in command_terms:
meta["command_count"][name] = meta["command_count"].get(name, 0) + 1
def backup_files(to_back_up=common.FILES_TO_BACKUP):
Path(backup_folder).mkdir(parents=True, exist_ok=True)
todays_backup_path = backup_folder + str(datetime.date(datetime.now())) + "/"
Path(todays_backup_path).mkdir(parents=True, exist_ok=True)
#Create backup folders
for local_dir in common.ALL_PATHS:
Path(f"{todays_backup_path}{local_dir}").mkdir(parents=True, exist_ok=True)
for file_name in to_back_up:
try:
common.check_create(file_name)
temp_file_n = file_name
if os.path.exists(todays_backup_path + temp_file_n):
# don't backup the database more than once, otherwise server will run out of disk
#To the above comment, we'll lose the current day's data on Ctrl+C if we don't ^
#if file_name == common.ROOM_DATA_TRACKING_DATABASE_FILE:
# continue
for i in range(50):
temp_file_n = file_name + "_" + str(i)
if not os.path.exists(todays_backup_path + temp_file_n):
break
shutil.copy2(file_name, todays_backup_path + temp_file_n)
except Exception as e:
print(e)
else:
if file_name == common.FULL_MESSAGE_LOGGING_FILE:
os.remove(common.FULL_MESSAGE_LOGGING_FILE)
common.check_create(common.FULL_MESSAGE_LOGGING_FILE)
async def prune_backups():
print(f"{str(datetime.now())}: Pruning backups...")
for folder in os.listdir(backup_folder):
try:
# Fix previously zipped files
path = backup_folder + folder
if ".zip" in path:
print("Unzipping", path)
new_path = path.replace(".zip","")
await common.run_command_async(f'unzip {path} -d {new_path}')
await common.run_command_async(f'rm {path}')
os.system(f'mv {new_path}/*/*/* {new_path}/')
await common.run_command_async(f'rm -rf {new_path}/backups')
path = new_path
create_time = datetime.strptime(folder.replace(".zip", ""),'%Y-%m-%d').date()
delta = datetime.date(datetime.now()) - create_time
if delta.days > 14 and create_time.day != 1:
if os.path.exists(path+"/tablebot_data"):
print("Deleting", path)
shutil.rmtree(path+"/tablebot_data")
shutil.rmtree(path + "/discord_server_settings")
elif delta.days >= 1:
db_path = path+"/tablebot_data/room_data_tracking.db"
db_path_zip = db_path + ".zip"
if not os.path.exists(db_path_zip) and os.path.exists(db_path):
print("Zipping", db_path)
await common.run_command_async(f'zip -r {db_path_zip} {db_path}')
await common.run_command_async(f'rm -rf {db_path}')
except Exception as e:
print(f"{str(datetime.now())}: Pruning backups has exception: {e}")
pass
print(f"{str(datetime.now())}: Pruning backups complete data")
def get_commands_from_txt(to_find, needle_function, log_file, limit=None):
results = []
needle = needle_function(to_find)
with open(log_file, "r", encoding='utf-8') as f:
for line in f:
if "?lookup " in line.lower():
continue
if needle.lower() in line.lower():
results.append(line)
if limit is not None and len(results) >= limit:
return results
return results
def get_all_commands(discord_id, limit=None):
results = []
backups_path = Path(backup_folder)
current_logging_path = Path(common.LOGGING_PATH)
all_paths = list(backups_path.iterdir()) + [current_logging_path]
needle_function = lambda x: f"User ID: {x}"
for dated_folder in all_paths:
if dated_folder.is_dir():
full_log_files = [p for p in dated_folder.glob(f'**/{common.FULL_LOGGING_FILE_NAME}*') if p.is_file()]
for log_file in full_log_files:
new_limit = None if limit is None else limit - len(results)
results.extend(get_commands_from_txt(discord_id, needle_function, log_file, limit=new_limit))
if limit is not None and len(results) >= limit:
return results
return results
def hard_check(discord_username, limit=None):
results = []
backups_path = Path(backup_folder)
current_logging_path = Path(common.LOGGING_PATH)
all_paths = sorted(list(backups_path.iterdir()), key=lambda x:x.name) + [current_logging_path]
needle_function = lambda x: x.lower()
for dated_folder in all_paths:
if dated_folder.is_dir():
full_log_files = [p for p in dated_folder.glob(f'**/messages_logging*') if p.is_file()]
for log_file in full_log_files:
new_limit = None if limit is None else limit - len(results)
results.extend(get_commands_from_txt(discord_username, needle_function, log_file, limit=new_limit))
if limit is not None and len(results) >= limit:
return results
return results
def count_lines_of_code():
lines_count = 0
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*.py'):
with open(file, encoding='utf-8') as f:
for _ in f:
lines_count += 1
return lines_count
def get_from_stats_file(stats_file=common.STATS_FILE):
global user_delimiter
total_pictures = 0
total_commands = 0
total_code_lines = 0
servers = set()
users = set()
with open(stats_file, "r+", encoding='utf-8') as f:
total_pictures = int(f.readline().strip("\n"))
total_commands = int(f.readline().strip("\n"))
total_code_lines = int(f.readline().strip("\n"))
for line_ in f:
line_ = line_.strip("\n")
if line_ == user_delimiter:
break
servers.add(line_)
for line_ in f:
line_ = line_.strip("\n")
users.add(line_)
return total_pictures, total_commands, total_code_lines, servers, users
def get_combined_stats_from_both(stats_file=common.STATS_FILE, commands_logging=common.MESSAGE_LOGGING_FILE):
stats_1 = get_from_stats_file(stats_file)
stats_2 = get_from_messages_logging_file(commands_logging)
total_pictures = stats_1[0] + stats_2[0]
total_commands = stats_1[1] + stats_2[1]
total_code_lines = stats_1[2] + stats_2[2]
stats_1[3].update(stats_2[3])
stats_1[4].update(stats_2[4])
return total_pictures, total_commands, total_code_lines, stats_1[3], stats_1[4]
def get_from_messages_logging_file(commands_logging=common.MESSAGE_LOGGING_FILE):
users = set()
servers = set()
war_picture_count = 0
total_commands = 0
common.check_create(commands_logging)
with open(commands_logging, "r+", encoding='utf-8') as f:
for line_ in f:
total_commands += 1
try:
if "?wp" in line_[line_.index(" - Command: ") + len(" - Command: "):]:
war_picture_count += 1
index_start = line_.index(" - User: ") + len(" - User: ")
end_index = line_.index(" - Command: ", index_start)
users.add(line_[index_start:end_index])
index_start = line_.index("Server: ") + len("Server: ")
end_index = line_.index(" - Channel: ", index_start)
servers.add(line_[index_start:end_index].strip())
except Exception:
pass
return war_picture_count, total_commands, 0, servers, users
def dump_to_stats_file(stats_file=common.STATS_FILE, commands_logging=common.MESSAGE_LOGGING_FILE):
global user_delimiter
war_picture_count, total_commands, total_code_lines, servers, users = get_combined_stats_from_both(stats_file, commands_logging)
temp_stats = f"{stats_file}_temp"
with open(temp_stats, "w+", encoding="utf-8", errors="replace") as temp_out:
temp_out.write(str(war_picture_count) + "\n")
temp_out.write(str(total_commands) + "\n")
temp_out.write(str(total_code_lines) + "\n")
for server in servers:
temp_out.write(server + "\n")
temp_out.write(user_delimiter + "\n")
for user in users:
temp_out.write(str(user) + "\n")
os.remove(stats_file)
os.rename(temp_stats, stats_file)
os.remove(commands_logging)
common.check_create(commands_logging)
def stats(num_bots:int, client=None, stats_file=common.STATS_FILE, commands_logging=common.MESSAGE_LOGGING_FILE):
str_build = ""
war_picture_count, total_commands, total_code_lines, servers, users = get_combined_stats_from_both(stats_file, commands_logging)
str_build += "Number of servers that have MKW Table Bot: **" + str(len(client.guilds if client is not None else servers)) + "**\n"
str_build += "First server ever: **The Funkynuts" + "**\n"
str_build += "\n"
str_build += "Number of people who have used MKW Table Bot: **" + str(len(users)) + "**\n"
str_build += "First user ever: **Chippy#8126" + "**\n"
str_build += "\n"
str_build += "Number of table pictures generated: **" + str(war_picture_count) + "**\n"
str_build += "Total commands MKW Table Bot has recieved: **" + str(total_commands) + "**\n"
str_build += "\n"
#4133
#str_build += "Lines of high quality code written to make this bot a reality: **" + str(count_lines_of_code()) + "**\n"
str_build += "Lines of high quality code written to make this bot a reality: **" + str(total_code_lines) + "**\n"
str_build += "\n"
right_now = datetime.now()
current_time = right_now.strftime('%I:%M:%S%p')
str_build += "Current server (and BadWolf's) time: **" + current_time + "**\n"
ago = None
with open(commands_logging, "rb+") as f:
line_num = 0
try:
f.seek(-2, os.SEEK_END)
while True:
if f.read(1) == b'\n':
line_num += 1
if line_num == 2:
break
f.seek(-2, os.SEEK_CUR)
last_line = f.readline().decode()
last_message_time = last_line.split("S")[0][:-2]
last_message_obj = datetime.strptime(last_message_time, '%Y-%m-%d %H:%M:%S.%f')
ago = right_now - last_message_obj
except:
pass
if ago is not None:
str_build += "Last command before your stats command was **" + humanize.naturaltime(ago) + "**\n"
else:
str_build += "Last command before your stats command was **" + "N/A" + "**\n"
str_build += "Number of wars being tabled with the bot right now: **" + str(num_bots) + "**\n"
str_build += "\n\nNotable beta testers: **\n\t- Chippy#8126\n\t- callum#6560\n\t- PhillyGator#0850**"
str_build += "\n\nSpecial thanks to: **\n\t- callum#6560's dad for solving the last piece to the tag recognition AI**"
return str_build
if __name__ == '__main__':
print(hard_check("Dash8r#2342"))
print(count_lines_of_code())
|
#
# @author Kevin Jesse
# @email kevin.r.jesse@gmail.com
#
from operator import itemgetter
import re
import random
import control
import responseCtrl
import movieCtrl
import database_connect
cur = database_connect.db_connect()
def genre(meta_info):
"""
:param meta_info: this contains the input scoring information
:param cur: database connector
:return: returning the search from the database for movies similar to the user top scored input
"""
glist = sorted(meta_info, key=itemgetter(2), reverse=True)
# sqlstring = """SELECT primarytitle FROM title INNER JOIN ratings ON title.tconst=ratings.tconst WHERE genres = ANY (VALUES ('Comedy'))"""
# cur.execute(sqlstring)
# rows = cur.fetchall()
#sqlstring = """SELECT * FROM ratings"""
sqlstring = """SELECT title.tconst, primarytitle FROM title INNER JOIN ratings ON title.tconst=ratings.tconst WHERE genres LIKE '%""" + glist[0][0]
if len(glist) > 2:
sqlstring += """%' AND genres LIKE '%""" + glist[1][0] + """%' AND genres LIKE '%""" +glist[2][0] + """%'"""
elif len(glist) == 2:
sqlstring += """%' AND genres LIKE '%""" + glist[1][0] + """%'"""
else:
sqlstring += """%'"""
sqlstring += """ORDER BY numvotes DESC LIMIT 1000"""
cur.execute(sqlstring)
rows = cur.fetchall()
return rows
def genreStrat(input, strategies, cache_results, curr_movie, qLib, model, resource, database, history, tfidfmodel, tfidfdict):
"""
:param userid: userid generated from chatbox.php
:param input: user text input
:param strategies: what strategy the application is on (what task is being accomplished)
:param cache_results: results of previous database query used for follow up requests
:param qLib: question library
:param model: word to vec (not used for now)
:param resource: language resource
:param database: (not used for now)
:param history: user and AI response history used to know what the previous question was
:param tfidfmodel: tf-idf model
:param tfidfdict: tf-idf dict
:return: text output, next question, strategies (updated), cache results (updated)
"""
output = ''
qtup = None
prev_qtup = history[-1]
if prev_qtup[1] == "g0" or prev_qtup[1] == "g1":
meta_info = control.ScoreInput(model, database, resource, input, history, tfidfmodel, tfidfdict)
cache_results = genre(meta_info), meta_info
if not cache_results[0]:
output += "I could not find anything from the genre you specified. "
qtup = random.choice(filter(lambda x: x[1] == 'g0', qLib[strategies[-1]]))
output += qtup[0]
return output, qtup, strategies, cache_results, None
curr_movie = random.choice(cache_results[0])
output += "I found " + curr_movie[1] + " from the genre you specified. "
qtup = random.choice(filter(lambda x: x[1] == 'g2', qLib[strategies[-1]]))
output += qtup[0]
elif prev_qtup[1] == "g2":
if responseCtrl.responseBinSim(input):
output += "Great! "
qtup = random.choice(filter(lambda x: x[1] == 'g6', qLib[strategies[-1]]))
output += qtup[0]
#movieCtrl.storeMovieRec(curr_movie)
else:
output += "That is too bad. "
qtup = random.choice(filter(lambda x: x[1] == 'g3', qLib[strategies[-1]]))
output += qtup[0]
elif prev_qtup[1] == "g3":
if responseCtrl.responseBinSim(input):
cache_results[0].pop(0)
curr_movie = random.choice(cache_results[0])
output += "Cool! I found " + curr_movie[1] + " from the genre you specified. "
qtup = random.choice(filter(lambda x: x[1] == 'g2', qLib[strategies[-1]]))
output += qtup[0]
else:
output += "OK. "
qtup = random.choice(filter(lambda x: x[1] == 'g4', qLib[strategies[-1]]))
output += qtup[0]
elif prev_qtup[1] == "g4":
if responseCtrl.responseBinSim(input):
output += "Genres it is then! "
qtup = random.choice(filter(lambda x: x[1] == 'g1', qLib[strategies[-1]]))
output += qtup[0]
else:
output += "OK. "
qtup = random.choice(filter(lambda x: x[1] == 'g5', qLib[strategies[-1]]))
output += qtup[0]
elif prev_qtup[1] == "g5":
if responseCtrl.responseBinSim(input):
output += "OK. "
strategies.append("continue")
qtup = random.choice(filter(lambda x: x[1] == 'c1', qLib[strategies[-1]]))
output += qtup[0]
else:
qtup = random.choice(filter(lambda x: x[1] == 'g1', qLib[strategies[-1]]))
output += qtup[0]
elif prev_qtup[1] == "g6":
strategies.append("continue")
if responseCtrl.responseBinSim(input):
#print curr_movie
data = movieCtrl.moviebyID(curr_movie[0])
output += data[1] + " (" + data[3] + ") is " + data[8] + " minutes and is a " + \
data[4].replace(' ', ', ') + " film. Produced by " + data[7] + ", this film's rating is " + data[6] + ". "
qtup = random.choice(filter(lambda x: x[1] == 'c1', qLib[strategies[-1]]))
output += qtup[0]
else:
qtup = random.choice(filter(lambda x: x[1] == 'c2', qLib[strategies[-1]]))
output += qtup[0]
return output, qtup, strategies, cache_results, curr_movie
|
import rhinoscriptsyntax as rs
obj = rs.GetObject("Select a srf", rs.filter.surface)
# obj = rs.GetObject("Select object", rs.filter.surface + rs.filter.polysurface)
intervalx = rs.GetReal("intervalx", 1)
intervaly = rs.GetReal("intervaly", 2)
Secx = rs.GetReal("mullion width", 0.15)
Secy = rs.GetReal("mullion depth", 0.05)
louverW = rs.GetReal("louverW width", 0.5)
vec1 = (-Secx/2, -Secy, 0)
vec2 = (-Secx/2, -Secy/2, 0)
vec3 = (-louverW/2, -louverW/2, 0)
def profile2(plane, vec):
rs.ViewCPlane(None, plane)
sec = rs.AddLine((0,0,0), (louverW,0,0))
sec = rs.RotateObject(sec, plane.Origin, 45.0, plane.ZAxis, copy=True)
# if sec: rs.DeleteObjects(sec)
return sec
def sweepSec(crv, plane, vec):
# rs.AddPlaneSurface( plane, 1, 1 )
rect = profile2(plane, vec)
sweep = rs.AddSweep1(crv, rect, closed=True)
sweep = rs.CapPlanarHoles(sweep)
if rect: rs.DeleteObjects(rect)
if crv: rs.DeleteObjects(crv)
return sweep
def flipBool(tf):
return abs(tf-1)
def intervals(srf, uv, spacing):
domains = []
domain = rs.SurfaceDomain(srf, uv)
i = spacing
while i < domain[1]:
domains.append(i)
i = i+spacing
return domains
def intervalpts(srf, uv, spacing):
spacings = intervals(srf, uv, spacing)
ptlist = []
for i in spacings:
coord = []
coord.append(i)
coord.insert(flipBool(uv), 0)
ptlist.append(coord)
return ptlist
def isoframe(srf, uv, spacing, vec):
points = intervalpts(srf, uv, spacing)
print points
sweeps = []
for i in points:
point = rs.EvaluateSurface(srf, i[0], i[1])
parameter = rs.SurfaceClosestPoint(srf, point)
plane = rs.SurfaceFrame(srf, parameter)
crv = rs.ExtractIsoCurve( srf, parameter, flipBool(uv))
direction = rs.CurveTangent(crv, 0)
newplane = rs.PlaneFromNormal(point, direction, plane.ZAxis)
sweeps.append(sweepSec(crv, newplane, vec))
return sweeps
def framelouver(srf):
frames = []
frames.append(isoframe(srf, 0, intervalx, vec3))
return frames
framelouver(obj)
|
from fontbakery.checkrunner import (
INFO
, WARN
, ERROR
, SKIP
, PASS
, FAIL
, Section
)
import os
from .shared_conditions import is_variable_font
from fontbakery.callable import condition, check, disable
from fontbakery.message import Message
from fontbakery.constants import(
# TODO: priority levels are not yet part of the new runner/reporters.
# How did we ever use this information?
# Check priority levels:
CRITICAL
, IMPORTANT
# , NORMAL
# , LOW
# , TRIVIAL
)
from fontbakery.fonts_spec import spec_factory
spec_imports = (
('.', ('general', 'cmap', 'head', 'os2', 'post', 'name',
'hhea', 'dsig', 'hmtx', 'gpos', 'gdef', 'kern', 'glyf',
'fvar', 'shared_conditions', 'loca')
),
)
# this is from the output of
# $ fontbakery check-specification fontbakery.specifications.googlefonts -L
expected_check_ids = [
'com.google.fonts/check/001' # Checking file is named canonically.
, 'com.google.fonts/check/002' # Checking all files are in the same directory.
, 'com.google.fonts/check/003' # Does DESCRIPTION file contain broken links?
, 'com.google.fonts/check/004' # Is this a propper HTML snippet?
, 'com.google.fonts/check/005' # DESCRIPTION.en_us.html must have more than 200 bytes.
, 'com.google.fonts/check/006' # DESCRIPTION.en_us.html must have less than 1000 bytes.
, 'com.google.fonts/check/007' # Font designer field in METADATA.pb must not be 'unknown'.
, 'com.google.fonts/check/008' # Fonts have consistent underline thickness?
, 'com.google.fonts/check/009' # Fonts have consistent PANOSE proportion?
, 'com.google.fonts/check/010' # Fonts have consistent PANOSE family type?
, 'com.google.fonts/check/011' # Fonts have equal numbers of glyphs?
, 'com.google.fonts/check/012' # Fonts have equal glyph names?
, 'com.google.fonts/check/013' # Fonts have equal unicode encodings?
, 'com.google.fonts/check/014' # Make sure all font files have the same version value.
, 'com.google.fonts/check/015' # Font has post table version 2?
, 'com.google.fonts/check/016' # Checking OS/2 fsType.
, 'com.google.fonts/check/018' # Checking OS/2 achVendID.
, 'com.google.fonts/check/019' # Substitute copyright, registered and trademark symbols in name table entries.
, 'com.google.fonts/check/020' # Checking OS/2 usWeightClass.
, 'com.google.fonts/check/028' # Check font has a license.
, 'com.google.fonts/check/029' # Check copyright namerecords match license file.
, 'com.google.fonts/check/030' # "License URL matches License text on name table?
, 'com.google.fonts/check/031' # Description strings in the name table must not contain copyright info.
, 'com.google.fonts/check/032' # Description strings in the name table must not exceed 200 characters.
, 'com.google.fonts/check/033' # Checking correctness of monospaced metadata.
, 'com.google.fonts/check/034' # Check if OS/2 xAvgCharWidth is correct.
, 'com.google.fonts/check/035' # Checking with ftxvalidator.
, 'com.google.fonts/check/036' # Checking with ots-sanitize.
, 'com.google.fonts/check/037' # Checking with Microsoft Font Validator.
, 'com.google.fonts/check/038' # FontForge validation outputs error messages?
, 'com.google.fonts/check/039' # FontForge checks.
, 'com.google.fonts/check/040' # Checking OS/2 usWinAscent & usWinDescent.
, 'com.google.fonts/check/041' # Checking Vertical Metric Linegaps.
, 'com.google.fonts/check/042' # Checking OS/2 Metrics match hhea Metrics.
, 'com.google.fonts/check/043' # Checking unitsPerEm value is reasonable.
, 'com.google.fonts/check/044' # Checking font version fields.
, 'com.google.fonts/check/045' # Does the font have a DSIG table?
, 'com.google.fonts/check/046' # Font contains the first few mandatory glyphs (.null or NULL, CR and space)?
, 'com.google.fonts/check/047' # Font contains glyphs for whitespace characters?
, 'com.google.fonts/check/048' # Font has **proper** whitespace glyph names?
, 'com.google.fonts/check/049' # Whitespace glyphs have ink?
, 'com.google.fonts/check/050' # Whitespace glyphs have coherent widths?
, 'com.google.fonts/check/052' # Font contains all required tables?
, 'com.google.fonts/check/053' # Are there unwanted tables?
, 'com.google.fonts/check/054' # Show hinting filesize impact.
, 'com.google.fonts/check/055' # Version format is correct in 'name' table?
, 'com.google.fonts/check/056' # Font has old ttfautohint applied?
, 'com.google.fonts/check/057' # Name table entries should not contain line-breaks.
, 'com.google.fonts/check/058' # Glyph names are all valid?
, 'com.google.fonts/check/059' # Font contains unique glyph names?
, 'com.google.fonts/check/061' # EPAR table present in font?
, 'com.google.fonts/check/062' # Is 'gasp' table correctly set?
, 'com.google.fonts/check/063' # Does GPOS table have kerning information?
, 'com.google.fonts/check/064' # Is there a caret position declared for every ligature?
, 'com.google.fonts/check/065' # Is there kerning info for non-ligated sequences?
, 'com.google.fonts/check/066' # Is there a "kern" table declared in the font?
, 'com.google.fonts/check/067' # Make sure family name does not begin with a digit.
, 'com.google.fonts/check/068' # Does full font name begin with the font family name?
, 'com.google.fonts/check/069' # Is there any unused data at the end of the glyf table?
, 'com.google.fonts/check/070' # Font has all expected currency sign characters?
, 'com.google.fonts/check/071' # Font follows the family naming recommendations?
, 'com.google.fonts/check/072' # Font enables smart dropout control in "prep" table instructions?
, 'com.google.fonts/check/073' # MaxAdvanceWidth is consistent with values in the Hmtx and Hhea tables?
, 'com.google.fonts/check/074' # Are there non-ASCII characters in ASCII-only NAME table entries?
, 'com.google.fonts/check/075' # Check for points out of bounds.
, 'com.google.fonts/check/076' # Check glyphs have unique unicode codepoints.
, 'com.google.fonts/check/077' # Check all glyphs have codepoints assigned.
#, 'com.google.fonts/check/078' # Check that glyph names do not exceed max length.
, 'com.google.fonts/check/079' # Monospace font has hhea.advanceWidthMax equal to each glyph's advanceWidth?
, 'com.google.fonts/check/081' # METADATA.pb: Fontfamily is listed on Google Fonts API?
, 'com.google.fonts/check/083' # METADATA.pb: check if fonts field only has unique "full_name" values.
, 'com.google.fonts/check/084' # METADATA.pb: check if fonts field only contains unique style:weight pairs.
, 'com.google.fonts/check/085' # METADATA.pb license is "APACHE2", "UFL" or "OFL"?
, 'com.google.fonts/check/086' # METADATA.pb should contain at least "menu" and "latin" subsets.
, 'com.google.fonts/check/087' # METADATA.pb subsets should be alphabetically ordered.
, 'com.google.fonts/check/088' # METADATA.pb: Copyright notice is the same in all fonts?
, 'com.google.fonts/check/089' # Check that METADATA.pb family values are all the same.
, 'com.google.fonts/check/090' # METADATA.pb: According Google Fonts standards, families should have a Regular style.
, 'com.google.fonts/check/091' # METADATA.pb: Regular should be 400.
, 'com.google.fonts/check/092' # Checks METADATA.pb font.name field matches family name declared on the name table.
, 'com.google.fonts/check/093' # Checks METADATA.pb font.post_script_name matches postscript name declared on the name table.
, 'com.google.fonts/check/094' # METADATA.pb font.full_name value matches fullname declared on the name table?
, 'com.google.fonts/check/095' # METADATA.pb font.name value should be same as the family name declared on the name table.
, 'com.google.fonts/check/096' # METADATA.pb font.full_name and font.post_script_name fields have equivalent values ?
, 'com.google.fonts/check/097' # METADATA.pb font.filename and font.post_script_name fields have equivalent values?
, 'com.google.fonts/check/098' # METADATA.pb font.name field contains font name in right format?
, 'com.google.fonts/check/099' # METADATA.pb font.full_name field contains font name in right format?
, 'com.google.fonts/check/100' # METADATA.pb font.filename field contains font name in right format?
, 'com.google.fonts/check/101' # METADATA.pb font.post_script_name field contains font name in right format?
, 'com.google.fonts/check/102' # Copyright notice on METADATA.pb matches canonical pattern?
, 'com.google.fonts/check/103' # Copyright notice on METADATA.pb does not contain Reserved Font Name?
, 'com.google.fonts/check/104' # METADATA.pb: Copyright notice shouldn't exceed 500 chars.
, 'com.google.fonts/check/105' # Filename is set canonically in METADATA.pb?
, 'com.google.fonts/check/106' # METADATA.pb font.style "italic" matches font internals?
, 'com.google.fonts/check/107' # METADATA.pb font.style "normal" matches font internals?
, 'com.google.fonts/check/108' # METADATA.pb font.name and font.full_name fields match the values declared on the name table?
, 'com.google.fonts/check/109' # METADATA.pb: Check if fontname is not camel cased.
, 'com.google.fonts/check/110' # METADATA.pb: Check font name is the same as family name.
, 'com.google.fonts/check/111' # METADATA.pb: Check that font weight has a canonical value.
, 'com.google.fonts/check/112' # Checking OS/2 usWeightClass matches weight specified at METADATA.pb.
, 'com.google.fonts/check/113' # METADATA.pb weight matches postScriptName.
, 'com.google.fonts/check/115' # METADATA.pb: Font styles are named canonically?
, 'com.google.fonts/check/116' # Is font em size (ideally) equal to 1000?
, 'com.google.fonts/check/117' # Version number has increased since previous release on Google Fonts?
, 'com.google.fonts/check/118' # Glyphs are similiar to Google Fonts version?
, 'com.google.fonts/check/119' # TTFAutohint x-height increase value is same as in previous release on Google Fonts ?
, 'com.google.fonts/check/129' # Checking OS/2 fsSelection value.
, 'com.google.fonts/check/130' # Checking post.italicAngle value.
, 'com.google.fonts/check/131' # Checking head.macStyle value.
, 'com.google.fonts/check/152' # Name table strings must not contain 'Reserved Font Name'.
, 'com.google.fonts/check/153' # Check if each glyph has the recommended amount of contours.
, 'com.google.fonts/check/154' # Check font has same encoded glyphs as version hosted on fonts.google.com
, 'com.google.fonts/check/155' # Copyright field for this font on METADATA.pb matches all copyright notice entries on the name table ?
, 'com.google.fonts/check/156' # Font has all mandatory 'name' table entries ?
, 'com.google.fonts/check/157' # Check name table: FONT_FAMILY_NAME entries.
, 'com.google.fonts/check/158' # Check name table: FONT_SUBFAMILY_NAME entries.
, 'com.google.fonts/check/159' # Check name table: FULL_FONT_NAME entries.
, 'com.google.fonts/check/160' # Check name table: POSTSCRIPT_NAME entries.
, 'com.google.fonts/check/161' # Check name table: TYPOGRAPHIC_FAMILY_NAME entries.
, 'com.google.fonts/check/162' # Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries.
, 'com.google.fonts/check/163' # Combined length of family and style must not exceed 20 characters.
, 'com.google.fonts/check/164' # Length of copyright notice must not exceed 500 characters.
, 'com.google.fonts/check/165' # Familyname must be unique according to namecheck.fontdata.com
, 'com.google.fonts/check/166' # Check for font-v versioning
, 'com.google.fonts/check/167' # The variable font 'wght' (Weight) axis coordinate must be 400 on the 'Regular' instance.
, 'com.google.fonts/check/168' # The variable font 'wdth' (Width) axis coordinate must be 100 on the 'Regular' instance.
, 'com.google.fonts/check/169' # The variable font 'slnt' (Slant) axis coordinate must be zero on the 'Regular' instance.
, 'com.google.fonts/check/170' # The variable font 'ital' (Italic) axis coordinate must be zero on the 'Regular' instance.
, 'com.google.fonts/check/171' # The variable font 'opsz' (Optical Size) axis coordinate should be between 9 and 13 on the 'Regular' instance.
, 'com.google.fonts/check/172' # The variable font 'wght' (Weight) axis coordinate must be 700 on the 'Bold'
, 'com.google.fonts/check/174' # Check a static ttf can be generated from a variable font.
, 'com.google.fonts/check/180' # Does the number of glyphs in the loca table match the maxp table?
, 'com.google.fonts/check/ttx-roundtrip' # Checking with fontTools.ttx
, 'com.google.fonts/check/has_ttfautohint_params' # Font has ttfautohint params
, 'com.google.fonts/check/vttclean' # There must not be VTT Talk sources in the font.
]
specification = spec_factory(default_section=Section("Google Fonts"))
# -------------------------------------------------------------------
@condition
def style(font):
"""Determine font style from canonical filename."""
from fontbakery.constants import STYLE_NAMES
filename = os.path.basename(font)
if '-' in filename:
stylename = os.path.splitext(filename)[0].split('-')[1]
if stylename in [name.replace(' ', '') for name in STYLE_NAMES]:
return stylename
return None
@condition
def expected_os2_weight(style):
"""The weight name and the expected OS/2 usWeightClass value inferred from
the style part of the font name
The Google Font's API which serves the fonts can only serve
the following weights values with the corresponding subfamily styles:
250, Thin
275, ExtraLight
300, Light
400, Regular
500, Medium
600, SemiBold
700, Bold
800, ExtraBold
900, Black
Thin is not set to 100 because of legacy Windows GDI issues:
https://www.adobe.com/devnet/opentype/afdko/topic_font_wt_win.html
"""
if not style:
return None
# Weight name to value mapping:
GF_API_WEIGHTS = {
"Thin": 250,
"ExtraLight": 275,
"Light": 300,
"Regular": 400,
"Medium": 500,
"SemiBold": 600,
"Bold": 700,
"ExtraBold": 800,
"Black": 900
}
if style == "Italic":
weight_name = "Regular"
elif style.endswith("Italic"):
weight_name = style.replace("Italic", "")
else:
weight_name = style
expected = GF_API_WEIGHTS[weight_name]
return weight_name, expected
@check(
id = 'com.google.fonts/check/001',
misc_metadata = {
'priority': CRITICAL
}
)
def com_google_fonts_check_001(font):
"""Checking file is named canonically.
A font's filename must be composed in the following manner:
<familyname>-<stylename>.ttf
e.g. Nunito-Regular.ttf, Oswald-BoldItalic.ttf
Variable fonts must use the "-VF" suffix such:
e.g. Roboto-VF.ttf, Barlow-VF.ttf,
Example-Roman-VF.ttf, Familyname-Italic-VF.ttf
"""
from fontbakery.constants import STYLE_NAMES
from fontTools.ttLib import TTFont
filename = os.path.basename(font)
basename = os.path.splitext(filename)[0]
# remove spaces in style names
valid_style_suffixes = [name.replace(' ', '') for name in STYLE_NAMES]
valid_varfont_suffixes = ["VF",
"Italic-VF",
"Roman-VF"]
suffix = basename.split('-')
suffix.pop(0)
suffix = '-'.join(suffix)
if ('-' in basename and
(suffix in valid_varfont_suffixes
and is_variable_font(TTFont(font)))
or (suffix in valid_style_suffixes
and not is_variable_font(TTFont(font)))):
yield PASS, f"{font} is named canonically."
else:
yield FAIL, ('Style name used in "{}" is not canonical.'
' You should rebuild the font using'
' any of the following'
' style names: "{}".').format(font,
'", "'.join(STYLE_NAMES))
@condition
def family_directory(fonts):
"""Get the path of font project directory."""
if fonts:
return os.path.dirname(fonts[0])
@condition
def descfile(family_directory):
"""Get the path of the DESCRIPTION file of a given font project."""
if family_directory:
descfilepath = os.path.join(family_directory, "DESCRIPTION.en_us.html")
if os.path.exists(descfilepath):
return descfilepath
@condition
def description(descfile):
"""Get the contents of the DESCRIPTION file of a font project."""
if not descfile:
return
import io
return io.open(descfile, "r", encoding="utf-8").read()
@check(
id = 'com.google.fonts/check/003',
conditions = ['description']
)
def com_google_fonts_check_003(description):
"""Does DESCRIPTION file contain broken links?"""
from lxml.html import HTMLParser
import defusedxml.lxml
import requests
doc = defusedxml.lxml.fromstring(description, parser=HTMLParser())
broken_links = []
for link in doc.xpath('//a/@href'):
if link.startswith("mailto:") and \
"@" in link and \
"." in link.split("@")[1]:
yield INFO, (f"Found an email address: {link}")
continue
try:
response = requests.head(link, allow_redirects=True, timeout=10)
code = response.status_code
if code != requests.codes.ok:
broken_links.append(("url: '{}' "
"status code: '{}'").format(link, code))
except requests.exceptions.Timeout:
yield WARN, ("Timedout while attempting to access: '{}'."
" Please verify if that's a broken link.").format(link)
except requests.exceptions.RequestException:
broken_links.append(link)
if len(broken_links) > 0:
yield FAIL, ("The following links are broken"
" in the DESCRIPTION file:"
" '{}'").format("', '".join(broken_links))
else:
yield PASS, "All links in the DESCRIPTION file look good!"
@check(
id = 'com.google.fonts/check/004',
conditions = ['descfile']
)
def com_google_fonts_check_004(descfile, description):
"""Is this a proper HTML snippet?
When packaging families for google/fonts, if there is no
DESCRIPTION.en_us.html file, the add_font.py metageneration tool will
insert a dummy description file which contains invalid html.
This file needs to either be replaced with an existing description file
or edited by hand."""
if "<p>" not in description or "</p>" not in description:
yield FAIL, f"{descfile} does not look like a propper HTML snippet."
else:
yield PASS, f"{descfile} is a propper HTML file."
@check(
id = 'com.google.fonts/check/005',
conditions = ['description']
)
def com_google_fonts_check_005(description):
"""DESCRIPTION.en_us.html must have more than 200 bytes."""
if len(description) <= 200:
yield FAIL, ("DESCRIPTION.en_us.html must"
" have size larger than 200 bytes.")
else:
yield PASS, "DESCRIPTION.en_us.html is larger than 200 bytes."
@check(
id = 'com.google.fonts/check/006',
conditions = ['description']
)
def com_google_fonts_check_006(description):
"""DESCRIPTION.en_us.html must have less than 1000 bytes."""
if len(description) >= 1000:
yield FAIL, ("DESCRIPTION.en_us.html must"
" have size smaller than 1000 bytes.")
else:
yield PASS, "DESCRIPTION.en_us.html is smaller than 1000 bytes."
@condition
def family_metadata(family_directory):
from fontbakery.utils import get_FamilyProto_Message
if family_directory:
pb_file = os.path.join(family_directory, "METADATA.pb")
if os.path.exists(pb_file):
return get_FamilyProto_Message(pb_file)
@check(
id = 'com.google.fonts/check/007',
conditions = ['family_metadata']
)
def com_google_fonts_check_007(family_metadata):
"""Font designer field in METADATA.pb must not be 'unknown'."""
if family_metadata.designer.lower() == 'unknown':
yield FAIL, f"Font designer field is '{family_metadata.designer}'."
else:
yield PASS, "Font designer field is not 'unknown'."
@check(
id = 'com.google.fonts/check/011',
conditions = ['is_ttf']
)
def com_google_fonts_check_011(ttFonts):
"""Fonts have equal numbers of glyphs?"""
fonts = list(ttFonts)
failed = False
max_style = None
max_count = 0
for ttFont in fonts:
fontname = ttFont.reader.file.name
stylename = style(fontname)
this_count = len(ttFont['glyf'].glyphs)
if this_count > max_count:
max_count = this_count
max_style = stylename
for ttFont in fonts:
fontname = ttFont.reader.file.name
stylename = style(fontname)
this_count = len(ttFont['glyf'].glyphs)
if this_count != max_count:
failed = True
yield FAIL, ("{} has {} glyphs while"
" {} has {} glyphs.").format(stylename,
this_count,
max_style,
max_count)
if not failed:
yield PASS, ("All font files in this family have"
" an equal total ammount of glyphs.")
@check(
id = 'com.google.fonts/check/012',
conditions = ['is_ttf']
)
def com_google_fonts_check_012(ttFonts):
"""Fonts have equal glyph names?"""
fonts = list(ttFonts)
all_glyphnames = set()
for ttFont in fonts:
all_glyphnames |= set(ttFont["glyf"].glyphs.keys())
missing = {}
available = {}
for glyphname in all_glyphnames:
missing[glyphname] = []
available[glyphname] = []
failed = False
for ttFont in fonts:
fontname = ttFont.reader.file.name
stylename = style(fontname)
these_ones = set(ttFont["glyf"].glyphs.keys())
for glyphname in all_glyphnames:
if glyphname not in these_ones:
failed = True
missing[glyphname].append(stylename)
else:
available[glyphname].append(stylename)
for gn in missing.keys():
if missing[gn]:
yield FAIL, ("Glyphname '{}' is defined on {}"
" but is missing on"
" {}.").format(gn,
', '.join(missing[gn]),
', '.join(available[gn]))
if not failed:
yield PASS, "All font files have identical glyph names."
@check(
id = 'com.google.fonts/check/016'
)
def com_google_fonts_check_016(ttFont):
"""Checking OS/2 fsType.
Fonts must have their fsType field set to zero.
This setting is known as Installable Embedding, meaning
that none of the DRM restrictions are enabled on the fonts.
More info available at:
https://docs.microsoft.com/en-us/typography/opentype/spec/os2#fstype
"""
value = ttFont['OS/2'].fsType
if value != 0:
FSTYPE_RESTRICTIONS = {
0x0002: ("* The font must not be modified, embedded or exchanged in"
" any manner without first obtaining permission of"
" the legal owner."),
0x0004: ("The font may be embedded, and temporarily loaded on the"
" remote system, but documents that use it must"
" not be editable."),
0x0008: ("The font may be embedded but must only be installed"
" temporarily on other systems."),
0x0100: ("The font may not be subsetted prior to embedding."),
0x0200: ("Only bitmaps contained in the font may be embedded."
" No outline data may be embedded.")
}
restrictions = ""
for bit_mask in FSTYPE_RESTRICTIONS.keys():
if value & bit_mask:
restrictions += FSTYPE_RESTRICTIONS[bit_mask]
if value & 0b1111110011110001:
restrictions += ("* There are reserved bits set,"
" which indicates an invalid setting.")
yield FAIL, ("OS/2 fsType is a legacy DRM-related field.\n"
"In this font it is set to {} meaning that:\n"
"{}\n"
"No such DRM restrictions can be enabled on the"
" Google Fonts collection, so the fsType field"
" must be set to zero (Installable Embedding) instead.\n"
"Fonts with this setting indicate that they may be embedded"
" and permanently installed on the remote system"
" by an application.\n\n"
" More detailed info is available at:\n"
" https://docs.microsoft.com/en-us"
"/typography/opentype/spec/os2#fstype"
"").format(value, restrictions)
else:
yield PASS, ("OS/2 fsType is properly set to zero.")
@condition
def registered_vendor_ids():
"""Get a list of vendor IDs from Microsoft's website."""
from bs4 import BeautifulSoup
from pkg_resources import resource_filename
registered_vendor_ids = {}
CACHED = resource_filename('fontbakery',
'data/fontbakery-microsoft-vendorlist.cache')
content = open(CACHED, encoding='utf-8').read()
soup = BeautifulSoup(content, 'html.parser')
IDs = [chr(c + ord('a')) for c in range(ord('z') - ord('a') + 1)]
IDs.append("vendor-id-and-name-list")
for section_id in IDs:
section = soup.find('h2', {'id': section_id})
table = section.find_next_sibling('table')
if not table: continue
#print ("table: '{}'".format(table))
for row in table.findAll('tr'):
#print("ROW: '{}'".format(row))
cells = row.findAll('td')
# pad the code to make sure it is a 4 char string,
# otherwise eg "CF " will not be matched to "CF"
code = cells[0].string.strip()
code = code + (4 - len(code)) * ' '
labels = [label for label in cells[1].stripped_strings]
registered_vendor_ids[code] = labels[0]
return registered_vendor_ids
@check(
id = 'com.google.fonts/check/018',
conditions = ['registered_vendor_ids']
)
def com_google_fonts_check_018(ttFont, registered_vendor_ids):
"""Checking OS/2 achVendID."""
SUGGEST_MICROSOFT_VENDORLIST_WEBSITE = (
" You should set it to your own 4 character code,"
" and register that code with Microsoft at"
" https://www.microsoft.com"
"/typography/links/vendorlist.aspx")
vid = ttFont['OS/2'].achVendID
bad_vids = ['UKWN', 'ukwn', 'PfEd']
if vid is None:
yield FAIL, Message("not set", "OS/2 VendorID is not set." +
SUGGEST_MICROSOFT_VENDORLIST_WEBSITE)
elif vid in bad_vids:
yield FAIL, Message("bad", ("OS/2 VendorID is '{}',"
" a font editor default.").format(vid) +
SUGGEST_MICROSOFT_VENDORLIST_WEBSITE)
elif vid not in registered_vendor_ids.keys():
yield WARN, Message("unknown", ("OS/2 VendorID value '{}' is not"
" a known registered id.").format(vid) +
SUGGEST_MICROSOFT_VENDORLIST_WEBSITE)
else:
yield PASS, f"OS/2 VendorID '{vid}' looks good!"
@check(
id = 'com.google.fonts/check/019'
)
def com_google_fonts_check_019(ttFont):
"""Substitute copyright, registered and trademark
symbols in name table entries."""
failed = False
replacement_map = [("\u00a9", '(c)'),
("\u00ae", '(r)'),
("\u2122", '(tm)')]
for name in ttFont['name'].names:
string = str(name.string, encoding=name.getEncoding())
for mark, ascii_repl in replacement_map:
new_string = string.replace(mark, ascii_repl)
if string != new_string:
yield FAIL, ("NAMEID #{} contains symbol that should be"
" replaced by '{}'.").format(name.nameID,
ascii_repl)
failed = True
if not failed:
yield PASS, ("No need to substitute copyright, registered and"
" trademark symbols in name table entries of this font.")
@check(
id = 'com.google.fonts/check/020',
conditions=['style']
)
def com_google_fonts_check_020(font, ttFont, style):
"""Checking OS/2 usWeightClass."""
from fontbakery.specifications.shared_conditions import is_ttf
weight_name, expected_value = expected_os2_weight(style)
value = ttFont['OS/2'].usWeightClass
if value != expected_value:
if is_ttf(ttFont) and \
(weight_name == 'Thin' and value == 100) or \
(weight_name == 'ExtraLight' and value == 200):
yield WARN, ("{}:{} is OK on TTFs, but OTF files with those values"
" will cause bluring on Windows."
" GlyphsApp users must set a Instance Custom Parameter"
" for the Thin and ExtraLight styles to 250 and 275,"
" so that if OTFs are exported then it will not"
" blur on Windows.")
else:
yield FAIL, ("OS/2 usWeightClass expected value for"
" '{}' is {} but this font has"
" {}.").format(weight_name, expected_value, value)
else:
yield PASS, "OS/2 usWeightClass value looks good!"
@condition
def licenses(family_directory):
"""Get a list of paths for every license
file found in a font project."""
licenses = []
if family_directory:
for license in ['OFL.txt', 'LICENSE.txt']:
license_path = os.path.join(family_directory, license)
if os.path.exists(license_path):
licenses.append(license_path)
return licenses
@condition
def license_path(licenses):
"""Get license path."""
# return license if there is exactly one license
return licenses[0] if len(licenses) == 1 else None
@condition
def license(license_path):
"""Get license filename."""
if license_path:
return os.path.basename(license_path)
@check(
id = 'com.google.fonts/check/028'
)
def com_google_fonts_check_028(licenses):
"""Check font has a license."""
if len(licenses) > 1:
yield FAIL, Message("multiple",
("More than a single license file found."
" Please review."))
elif not licenses:
yield FAIL, Message("none",
("No license file was found."
" Please add an OFL.txt or a LICENSE.txt file."
" If you are running fontbakery on a Google Fonts"
" upstream repo, which is fine, just make sure"
" there is a temporary license file in"
" the same folder."))
else:
yield PASS, "Found license at '{}'".format(licenses[0])
@check(
id = 'com.google.fonts/check/029',
conditions = ['license'],
misc_metadata = {
'priority': CRITICAL
})
def com_google_fonts_check_029(ttFont, license):
"""Check copyright namerecords match license file."""
from fontbakery.constants import (NAMEID_LICENSE_DESCRIPTION,
# NAMEID_LICENSE_INFO_URL,
PLACEHOLDER_LICENSING_TEXT,
# NAMEID_STR,
PLATID_STR)
from unidecode import unidecode
failed = False
placeholder = PLACEHOLDER_LICENSING_TEXT[license]
entry_found = False
for i, nameRecord in enumerate(ttFont["name"].names):
if nameRecord.nameID == NAMEID_LICENSE_DESCRIPTION:
entry_found = True
value = nameRecord.toUnicode()
if value != placeholder:
failed = True
yield FAIL, Message("wrong", \
("License file {} exists but"
" NameID {} (LICENSE DESCRIPTION) value"
" on platform {} ({})"
" is not specified for that."
" Value was: \"{}\""
" Must be changed to \"{}\""
"").format(license,
NAMEID_LICENSE_DESCRIPTION,
nameRecord.platformID,
PLATID_STR[nameRecord.platformID],
unidecode(value),
unidecode(placeholder)))
if not entry_found:
yield FAIL, Message("missing", \
("Font lacks NameID {} "
"(LICENSE DESCRIPTION). A proper licensing entry"
" must be set.").format(NAMEID_LICENSE_DESCRIPTION))
elif not failed:
yield PASS, "Licensing entry on name table is correctly set."
@condition
def familyname(font):
filename = os.path.basename(font)
filename_base = os.path.splitext(filename)[0]
return filename_base.split('-')[0]
@check(
id = 'com.google.fonts/check/030',
conditions = ['familyname'],
misc_metadata = {
'priority': CRITICAL
}
)
def com_google_fonts_check_030(ttFont, familyname):
""""License URL matches License text on name table?"""
from fontbakery.constants import (NAMEID_LICENSE_DESCRIPTION,
NAMEID_LICENSE_INFO_URL,
PLACEHOLDER_LICENSING_TEXT)
LEGACY_UFL_FAMILIES = ["Ubuntu", "UbuntuCondensed", "UbuntuMono"]
LICENSE_URL = {
'OFL.txt': 'http://scripts.sil.org/OFL',
'LICENSE.txt': 'http://www.apache.org/licenses/LICENSE-2.0',
'UFL.txt': 'https://www.ubuntu.com/legal/terms-and-policies/font-licence'
}
LICENSE_NAME = {
'OFL.txt': 'Open Font',
'LICENSE.txt': 'Apache',
'UFL.txt': 'Ubuntu Font License'
}
detected_license = False
for license in ['OFL.txt', 'LICENSE.txt', 'UFL.txt']:
placeholder = PLACEHOLDER_LICENSING_TEXT[license]
for nameRecord in ttFont['name'].names:
string = nameRecord.string.decode(nameRecord.getEncoding())
if nameRecord.nameID == NAMEID_LICENSE_DESCRIPTION and\
string == placeholder:
detected_license = license
break
if detected_license == "UFL.txt" and familyname not in LEGACY_UFL_FAMILIES:
yield FAIL, Message("ufl",
("The Ubuntu Font License is only acceptable on"
" the Google Fonts collection for legacy font"
" families that already adopted such license."
" New Families should use eigther Apache or"
" Open Font License."))
else:
found_good_entry = False
if detected_license:
failed = False
expected = LICENSE_URL[detected_license]
for nameRecord in ttFont['name'].names:
if nameRecord.nameID == NAMEID_LICENSE_INFO_URL:
string = nameRecord.string.decode(nameRecord.getEncoding())
if string == expected:
found_good_entry = True
else:
failed = True
yield FAIL, Message("licensing-inconsistency",
("Licensing inconsistency in name table"
" entries! NameID={} (LICENSE DESCRIPTION)"
" indicates {} licensing, but NameID={}"
" (LICENSE URL) has '{}'. Expected: '{}'"
"").format(NAMEID_LICENSE_DESCRIPTION,
LICENSE_NAME[detected_license],
NAMEID_LICENSE_INFO_URL,
string, expected))
if not found_good_entry:
yield FAIL, Message("no-license-found",
("A known license URL must be provided in the"
" NameID {} (LICENSE INFO URL) entry."
" Currently accepted licenses are Apache or"
" Open Font License. For a small set of legacy"
" families the Ubuntu Font License may be"
" acceptable as well."
"").format(NAMEID_LICENSE_INFO_URL))
else:
if failed:
yield FAIL, Message("bad-entries",
("Even though a valid license URL was seen in"
" NAME table, there were also bad entries."
" Please review NameIDs {} (LICENSE DESCRIPTION)"
" and {} (LICENSE INFO URL)."
"").format(NAMEID_LICENSE_DESCRIPTION,
NAMEID_LICENSE_INFO_URL))
else:
yield PASS, "Font has a valid license URL in NAME table."
@check(
id = 'com.google.fonts/check/032',
rationale = """
An old FontLab version had a bug which caused it to store
copyright notices in nameID 10 entries.
In order to detect those and distinguish them from actual
legitimate usage of this name table entry, we expect that
such strings do not exceed a reasonable length of 200 chars.
Longer strings are likely instances of the FontLab bug.
"""
)
def com_google_fonts_check_032(ttFont):
"""Description strings in the name table must not exceed 200 characters."""
from fontbakery.constants import NAMEID_DESCRIPTION
failed = False
for name in ttFont['name'].names:
if (name.nameID == NAMEID_DESCRIPTION and
len(name.string.decode(name.getEncoding())) > 200):
failed = True
break
if failed:
yield WARN, ("A few name table entries with ID={} (NAMEID_DESCRIPTION)"
" are longer than 200 characters."
" Please check whether those entries are copyright notices"
" mistakenly stored in the description string entries by"
" a bug in an old FontLab version."
" If that's the case, then such copyright notices must be"
" removed from these entries."
"").format(NAMEID_DESCRIPTION)
else:
yield PASS, "All description name records have reasonably small lengths."
@condition
def ttfautohint_stats(font):
from ttfautohint import ttfautohint, libttfautohint
from io import BytesIO
from fontTools.ttLib import TTFont
original_buffer = BytesIO()
TTFont(font).save(original_buffer)
dehinted_buffer = ttfautohint(in_buffer=original_buffer.getvalue(),
dehint=True)
return {
"dehinted_size": len(dehinted_buffer),
"hinted_size": os.stat(font).st_size,
"version": libttfautohint.version_string
}
@check(
id = 'com.google.fonts/check/054',
conditions = ['ttfautohint_stats',
'is_ttf']
)
def com_google_fonts_check_054(font, ttfautohint_stats):
"""Show hinting filesize impact.
Current implementation simply logs useful info
but there's no fail scenario for this checker."""
hinted = ttfautohint_stats["hinted_size"]
dehinted = ttfautohint_stats["dehinted_size"]
increase = hinted - dehinted
change = float(hinted)/dehinted - 1
def filesize_formatting(s):
if s < 1024:
return f"{s} bytes"
elif s < 1024*1024:
return "{:.1f}kb".format(s/1024)
else:
return "{:.1f}Mb".format(s/(1024*1024))
hinted_size = filesize_formatting(hinted)
dehinted_size = filesize_formatting(dehinted)
increase = filesize_formatting(increase)
results_table = "Hinting filesize impact:\n\n"
results_table += f"| | {font} |\n"
results_table += "|:--- | ---:|\n"
results_table += f"| Dehinted Size | {dehinted_size} |\n"
results_table += f"| Hinted Size | {hinted_size} |\n"
results_table += f"| Increase | {increase} |\n"
results_table += f"| Change | {change:.1f} % |\n"
yield INFO, results_table
@check(
id = 'com.google.fonts/check/055'
)
def com_google_fonts_check_055(ttFont):
"""Version format is correct in 'name' table?"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import NAMEID_VERSION_STRING
import re
def is_valid_version_format(value):
return re.match(r'Version\s0*[1-9]+\.\d+', value)
failed = False
version_entries = get_name_entry_strings(ttFont, NAMEID_VERSION_STRING)
if len(version_entries) == 0:
failed = True
yield FAIL, Message("no-version-string",
("Font lacks a NAMEID_VERSION_STRING (nameID={})"
" entry").format(NAMEID_VERSION_STRING))
for ventry in version_entries:
if not is_valid_version_format(ventry):
failed = True
yield FAIL, Message("bad-version-strings",
("The NAMEID_VERSION_STRING (nameID={}) value must"
" follow the pattern \"Version X.Y\" with X.Y"
" between 1.000 and 9.999."
" Current version string is:"
" \"{}\"").format(NAMEID_VERSION_STRING,
ventry))
if not failed:
yield PASS, "Version format in NAME table entries is correct."
@check(
id = 'com.google.fonts/check/has_ttfautohint_params',
)
def com_google_fonts_check_has_ttfautohint_params(ttFont):
""" Font has ttfautohint params? """
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import NAMEID_VERSION_STRING
def ttfautohint_version(value):
# example string:
#'Version 1.000; ttfautohint (v0.93) -l 8 -r 50 -G 200 -x 14 -w "G"
import re
results = re.search(r'ttfautohint \(v(.*)\) ([^;]*)', value)
if results:
return results.group(1), results.group(2)
version_strings = get_name_entry_strings(ttFont, NAMEID_VERSION_STRING)
failed = True
for vstring in version_strings:
values = ttfautohint_version(vstring)
if values:
ttfa_version, params = values
if params:
yield PASS, f"Font has ttfautohint params ({params})"
failed = False
if failed:
yield FAIL, "Font is lacking ttfautohint params on its version strings on the name table."
@check(
id = 'com.google.fonts/check/056',
conditions = ['ttfautohint_stats',
'is_ttf']
)
def com_google_fonts_check_056(ttFont, ttfautohint_stats):
"""Font has old ttfautohint applied?
1. find which version was used, by inspecting name table entries
2. find which version of ttfautohint is installed
"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import NAMEID_VERSION_STRING
def ttfautohint_version(values):
import re
for value in values:
results = re.search(r'ttfautohint \(v(.*)\)', value)
if results:
return results.group(1)
def installed_version_is_newer(installed, used):
installed = list(map(int, installed.split(".")))
used = list(map(int, used.split(".")))
return installed > used
version_strings = get_name_entry_strings(ttFont, NAMEID_VERSION_STRING)
ttfa_version = ttfautohint_version(version_strings)
if len(version_strings) == 0:
yield FAIL, Message("lacks-version-strings",
"This font file lacks mandatory "
"version strings in its name table.")
elif ttfa_version is None:
yield INFO, ("Could not detect which version of"
" ttfautohint was used in this font."
" It is typically specified as a comment"
" in the font version entries of the 'name' table."
" Such font version strings are currently:"
" {}").format(version_strings)
else:
installed_ttfa = ttfautohint_stats["version"]
try:
if installed_version_is_newer(installed_ttfa,
ttfa_version):
yield WARN, ("ttfautohint used in font = {};"
" installed = {}; Need to re-run"
" with the newer version!").format(ttfa_version,
installed_ttfa)
else:
yield PASS, (f"ttfautohint available in the system ({installed_ttfa}) is older"
f" than the one used in the font ({ttfa_version}).")
except ValueError:
yield FAIL, Message("parse-error",
("Failed to parse ttfautohint version values:"
" installed = '{}';"
" used_in_font = '{}'").format(installed_ttfa,
ttfa_version))
@check(
id = 'com.google.fonts/check/061',
rationale = """
The EPAR table is/was a way of expressing common licensing permissions
and restrictions in metadata; while almost nothing supported it,
Dave Crossland wonders that adding it to everything in Google Fonts
could help make it more popular.
More info is available at:
https://davelab6.github.io/epar/
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/226'
}
)
def com_google_fonts_check_061(ttFont):
"""EPAR table present in font?"""
if "EPAR" not in ttFont:
yield INFO, ("EPAR table not present in font."
" To learn more see"
" https://github.com/googlefonts/"
"fontbakery/issues/818")
else:
yield PASS, "EPAR table present in font."
@check(
id = 'com.google.fonts/check/062',
conditions = ['is_ttf'],
rationale = """
Traditionally version 0 'gasp' tables were set
so that font sizes below 8 ppem had no grid
fitting but did have antialiasing. From 9-16
ppem, just grid fitting. And fonts above
17ppem had both antialiasing and grid fitting
toggled on. The use of accelerated graphics
cards and higher resolution screens make this
approach obsolete. Microsoft's DirectWrite
pushed this even further with much improved
rendering built into the OS and apps. In this
scenario it makes sense to simply toggle all
4 flags ON for all font sizes.
"""
)
def com_google_fonts_check_062(ttFont):
"""Is 'gasp' table set to optimize rendering?"""
if "gasp" not in ttFont.keys():
yield FAIL, ("Font is missing the 'gasp' table."
" Try exporting the font with autohinting enabled.")
else:
if not isinstance(ttFont["gasp"].gaspRange, dict):
yield FAIL, "'gasp' table has no values."
else:
failed = False
if 0xFFFF not in ttFont["gasp"].gaspRange:
yield WARN, ("'gasp' table does not have an entry for all"
" font sizes (gaspRange 0xFFFF).")
else:
gasp_meaning = {
0x01: "- Use gridfitting",
0x02: "- Use grayscale rendering",
0x04: "- Use gridfitting with ClearType symmetric smoothing",
0x08: "- Use smoothing along multiple axes with ClearType®"
}
table = []
for key in ttFont["gasp"].gaspRange.keys():
value = ttFont["gasp"].gaspRange[key]
meaning = []
for flag, info in gasp_meaning.items():
if value & flag:
meaning.append(info)
meaning = "\n\t".join(meaning)
table.append(f"PPM <= {key}:\n\tflag = 0x{value:02X}\n\t{meaning}")
table = "\n".join(table)
yield INFO, ("These are the ppm ranges declared on the"
f" gasp table:\n\n{table}\n")
for key in ttFont["gasp"].gaspRange.keys():
if key != 0xFFFF:
yield WARN, ("'gasp' table has a gaspRange of {} that"
" may be unneccessary.").format(key)
failed = True
else:
value = ttFont["gasp"].gaspRange[0xFFFF]
if value != 0x0F:
failed = True
yield WARN, (f"gaspRange 0xFFFF value {value:%02X}"
" should be set to 0x0F.")
if not failed:
yield PASS, ("'gasp' table is correctly set, with one "
"gaspRange:value of 0xFFFF:0x0F.")
@check(
id = 'com.google.fonts/check/067'
)
def com_google_fonts_check_067(ttFont):
"""Make sure family name does not begin with a digit.
Font family names which start with a numeral are often not
discoverable in Windows applications.
"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import NAMEID_FONT_FAMILY_NAME
failed = False
for familyname in get_name_entry_strings(ttFont, NAMEID_FONT_FAMILY_NAME):
digits = map(str, range(0, 10))
if familyname[0] in digits:
yield FAIL, ("Font family name '{}'"
" begins with a digit!").format(familyname)
failed = True
if failed is False:
yield PASS, "Font family name first character is not a digit."
# TODO: extend this to check for availability of all required currency symbols.
@check(
id = 'com.google.fonts/check/070'
)
def com_google_fonts_check_070(ttFont):
"""Font has all expected currency sign characters?"""
def font_has_char(ttFont, codepoint):
for subtable in ttFont['cmap'].tables:
if codepoint in subtable.cmap:
return True
#otherwise
return False
failed = False
OPTIONAL = {
#TODO: Do we want to check for this one?
#0x20A0: "EUROPEAN CURRENCY SIGN"
}
MANDATORY = {
0x20AC: "EURO SIGN"
# TODO: extend this list
}
for codepoint, charname in OPTIONAL.items():
if not font_has_char(ttFont, codepoint):
failed = True
yield WARN, f"Font lacks \"{charname}\" character (unicode: 0x{codepoint:04X})"
for codepoint, charname in MANDATORY.items():
if not font_has_char(ttFont, codepoint):
failed = True
yield FAIL, f"Font lacks \"{charname}\" character (unicode: 0x{codepoint:04X})"
if not failed:
yield PASS, "Font has all expected currency sign characters."
@check(
id = 'com.google.fonts/check/074',
rationale = """
The OpenType spec requires ASCII for the POSTSCRIPT_NAME (nameID 6).
For COPYRIGHT_NOTICE (nameID 0) ASCII is required because that
string should be the same in CFF fonts which also have this
requirement in the OpenType spec.
Note:
A common place where we find non-ASCII strings is on name table
entries with NameID > 18, which are expressly for localising
the ASCII-only IDs into Hindi / Arabic / etc.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1663'
}
)
def com_google_fonts_check_074(ttFont):
"""Are there non-ASCII characters in ASCII-only NAME table entries?"""
from fontbakery.constants import (NAMEID_COPYRIGHT_NOTICE,
NAMEID_POSTSCRIPT_NAME)
bad_entries = []
for name in ttFont["name"].names:
if name.nameID == NAMEID_COPYRIGHT_NOTICE or \
name.nameID == NAMEID_POSTSCRIPT_NAME:
string = name.string.decode(name.getEncoding())
try:
string.encode('ascii')
except:
bad_entries.append(name)
yield INFO, ("Bad string at"
" [nameID {}, '{}']:"
" '{}'"
"").format(name.nameID,
name.getEncoding(),
string.encode("ascii",
errors='xmlcharrefreplace'))
if len(bad_entries) > 0:
yield FAIL, ("There are {} strings containing"
" non-ASCII characters in the ASCII-only"
" NAME table entries.").format(len(bad_entries))
else:
yield PASS, ("None of the ASCII-only NAME table entries"
" contain non-ASCII characteres.")
@condition
def listed_on_gfonts_api(family_metadata):
if not family_metadata:
return False
import requests
url = ('http://fonts.googleapis.com'
'/css?family={}').format(family_metadata.name.replace(' ', '+'))
r = requests.get(url)
return r.status_code == 200
@check(
id = 'com.google.fonts/check/081',
conditions = ['family_metadata']
)
def com_google_fonts_check_081(listed_on_gfonts_api):
"""METADATA.pb: Fontfamily is listed on Google Fonts API?"""
if not listed_on_gfonts_api:
yield WARN, "Family not found via Google Fonts API."
else:
yield PASS, "Font is properly listed via Google Fonts API."
# Temporarily disabled as requested at
# https://github.com/googlefonts/fontbakery/issues/1728
@disable
@check(
id = 'com.google.fonts/check/082',
conditions = ['family_metadata']
)
def com_google_fonts_check_082(family_metadata):
"""METADATA.pb: Designer exists in Google Fonts profiles.csv?"""
PROFILES_GIT_URL = ("https://github.com/google/"
"fonts/blob/master/designers/profiles.csv")
PROFILES_RAW_URL = ("https://raw.githubusercontent.com/google/"
"fonts/master/designers/profiles.csv")
if family_metadata.designer == "":
yield FAIL, ("METADATA.pb field \"designer\" MUST NOT be empty!")
elif family_metadata.designer == "Multiple Designers":
yield SKIP, ("Found \"Multiple Designers\" at METADATA.pb, which"
" is OK, so we won't look for it at profiles.csv")
else:
from urllib import request
import csv
try:
handle = request.urlopen(PROFILES_RAW_URL)
designers = []
for row in csv.reader(handle):
if not row:
continue
designers.append(row[0].decode("utf-8"))
if family_metadata.designer not in designers:
yield WARN, ("METADATA.pb: Designer \"{}\" is not listed"
" in profiles.csv"
" (at \"{}\")").format(family_metadata.designer,
PROFILES_GIT_URL)
else:
yield PASS, ("Found designer \"{}\""
" at profiles.csv").format(family_metadata.designer)
except:
yield WARN, f"Failed to fetch \"{PROFILES_RAW_URL}\""
@check(
id = 'com.google.fonts/check/083',
conditions = ['family_metadata']
)
def com_google_fonts_check_083(family_metadata):
"""METADATA.pb: check if fonts field only has
unique "full_name" values.
"""
fonts = {}
for f in family_metadata.fonts:
fonts[f.full_name] = f
if len(set(fonts.keys())) != len(family_metadata.fonts):
yield FAIL, ("Found duplicated \"full_name\" values"
" in METADATA.pb fonts field.")
else:
yield PASS, ("METADATA.pb \"fonts\" field only has"
" unique \"full_name\" values.")
@check(
id = 'com.google.fonts/check/084',
conditions = ['family_metadata']
)
def com_google_fonts_check_084(family_metadata):
"""METADATA.pb: check if fonts field
only contains unique style:weight pairs.
"""
pairs = {}
for f in family_metadata.fonts:
styleweight = f"{f.style}:{f.weight}"
pairs[styleweight] = 1
if len(set(pairs.keys())) != len(family_metadata.fonts):
yield FAIL, ("Found duplicated style:weight pair"
" in METADATA.pb fonts field.")
else:
yield PASS, ("METADATA.pb \"fonts\" field only has"
" unique style:weight pairs.")
@check(
id = 'com.google.fonts/check/085',
conditions = ['family_metadata']
)
def com_google_fonts_check_085(family_metadata):
"""METADATA.pb license is "APACHE2", "UFL" or "OFL"?"""
licenses = ["APACHE2", "OFL", "UFL"]
if family_metadata.license in licenses:
yield PASS, ("Font license is declared"
" in METADATA.pb as \"{}\"").format(family_metadata.license)
else:
yield FAIL, ("METADATA.pb license field (\"{}\")"
" must be one of the following:"
" {}").format(family_metadata.license,
licenses)
@check(
id = 'com.google.fonts/check/086',
conditions = ['family_metadata']
)
def com_google_fonts_check_086(family_metadata):
"""METADATA.pb should contain at least "menu" and "latin" subsets."""
missing = []
for s in ["menu", "latin"]:
if s not in list(family_metadata.subsets):
missing.append(s)
if missing != []:
yield FAIL, ("Subsets \"menu\" and \"latin\" are mandatory,"
" but METADATA.pb is missing"
" \"{}\"").format(" and ".join(missing))
else:
yield PASS, "METADATA.pb contains \"menu\" and \"latin\" subsets."
@check(
id = 'com.google.fonts/check/087',
conditions = ['family_metadata']
)
def com_google_fonts_check_087(family_metadata):
"""METADATA.pb subsets should be alphabetically ordered."""
expected = list(sorted(family_metadata.subsets))
if list(family_metadata.subsets) != expected:
yield FAIL, ("METADATA.pb subsets are not sorted "
"in alphabetical order: Got ['{}']"
" and expected ['{}']").format("', '".join(family_metadata.subsets),
"', '".join(expected))
else:
yield PASS, "METADATA.pb subsets are sorted in alphabetical order."
@check(
id = 'com.google.fonts/check/088',
conditions = ['family_metadata']
)
def com_google_fonts_check_088(family_metadata):
"""METADATA.pb: Copyright notice is the same in all fonts?"""
copyright = None
fail = False
for f in family_metadata.fonts:
if copyright and f.copyright != copyright:
fail = True
copyright = f.copyright
if fail:
yield FAIL, ("METADATA.pb: Copyright field value"
" is inconsistent across family")
else:
yield PASS, "Copyright is consistent across family"
@check(
id = 'com.google.fonts/check/089',
conditions = ['family_metadata']
)
def com_google_fonts_check_089(family_metadata):
"""Check that METADATA.pb family values are all the same."""
name = ""
fail = False
for f in family_metadata.fonts:
if name and f.name != name:
fail = True
name = f.name
if fail:
yield FAIL, ("METADATA.pb: Family name is not the same"
" in all metadata \"fonts\" items.")
else:
yield PASS, ("METADATA.pb: Family name is the same"
" in all metadata \"fonts\" items.")
@condition
def has_regular_style(family_metadata):
fonts = family_metadata.fonts if family_metadata else []
for f in fonts:
if f.weight == 400 and f.style == "normal":
return True
return False
@check(
id = 'com.google.fonts/check/090',
conditions = ['family_metadata']
)
def com_google_fonts_check_090(family_metadata):
"""METADATA.pb: According Google Fonts standards,
families should have a Regular style.
"""
if has_regular_style(family_metadata):
yield PASS, "Family has a Regular style."
else:
yield FAIL, ("This family lacks a Regular"
" (style: normal and weight: 400)"
" as required by Google Fonts standards.")
@check(
id = 'com.google.fonts/check/091',
conditions = ['family_metadata',
'has_regular_style']
)
def com_google_fonts_check_091(family_metadata):
"""METADATA.pb: Regular should be 400."""
badfonts = []
for f in family_metadata.fonts:
if f.full_name.endswith("Regular") and f.weight != 400:
badfonts.append(f"{f.filename} (weight: {f.weight})")
if len(badfonts) > 0:
yield FAIL, ("METADATA.pb: Regular font weight must be 400."
" Please fix these: {}").format(", ".join(badfonts))
else:
yield PASS, "Regular has weight = 400."
@condition
def font_metadata(family_metadata, font):
if not family_metadata:
return
for f in family_metadata.fonts:
if font.endswith(f.filename):
return f
@check(
id = 'com.google.fonts/check/092',
conditions=['font_metadata']
)
def com_google_fonts_check_092(ttFont, font_metadata):
"""Checks METADATA.pb font.name field matches
family name declared on the name table.
"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import NAMEID_FONT_FAMILY_NAME
familynames = get_name_entry_strings(ttFont, NAMEID_FONT_FAMILY_NAME)
if len(familynames) == 0:
yield FAIL, Message("missing",
("This font lacks a FONT_FAMILY_NAME entry"
" (nameID={}) in the name"
" table.").format(NAMEID_FONT_FAMILY_NAME))
else:
if font_metadata.name not in familynames:
yield FAIL, Message("mismatch",
("Unmatched family name in font:"
" TTF has \"{}\" while METADATA.pb"
" has \"{}\"").format(familynames[0],
font_metadata.name))
else:
yield PASS, ("Family name \"{}\" is identical"
" in METADATA.pb and on the"
" TTF file.").format(font_metadata.name)
@check(
id = 'com.google.fonts/check/093',
conditions = ['font_metadata']
)
def com_google_fonts_check_093(ttFont, font_metadata):
"""Checks METADATA.pb font.post_script_name matches
postscript name declared on the name table.
"""
failed = False
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import NAMEID_POSTSCRIPT_NAME
postscript_names = get_name_entry_strings(ttFont, NAMEID_POSTSCRIPT_NAME)
if len(postscript_names) == 0:
failed = True
yield FAIL, Message("missing",
("This font lacks a POSTSCRIPT_NAME"
" entry (nameID={}) in the "
"name table.").format(NAMEID_POSTSCRIPT_NAME))
else:
for psname in postscript_names:
if psname != font_metadata.post_script_name:
failed = True
yield FAIL, Message("mismatch",
("Unmatched postscript name in font:"
" TTF has \"{}\" while METADATA.pb"
" has \"{}\"."
"").format(psname,
font_metadata.post_script_name))
if not failed:
yield PASS, ("Postscript name \"{}\" is identical"
" in METADATA.pb and on the"
" TTF file.").format(font_metadata.post_script_name)
@check(
id = 'com.google.fonts/check/094',
conditions = ['font_metadata']
)
def com_google_fonts_check_094(ttFont, font_metadata):
"""METADATA.pb font.full_name value matches
fullname declared on the name table?
"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import NAMEID_FULL_FONT_NAME
full_fontnames = get_name_entry_strings(ttFont, NAMEID_FULL_FONT_NAME)
if len(full_fontnames) == 0:
yield FAIL, Message("lacks-entry",
("This font lacks a FULL_FONT_NAME"
" entry (nameID={}) in the"
" name table.").format(NAMEID_FULL_FONT_NAME))
else:
for full_fontname in full_fontnames:
if full_fontname != font_metadata.full_name:
yield FAIL, Message("mismatch",
("Unmatched fullname in font:"
" TTF has \"{}\" while METADATA.pb"
" has \"{}\".").format(full_fontname,
font_metadata.full_name))
else:
yield PASS, ("Font fullname \"{}\" is identical"
" in METADATA.pb and on the"
" TTF file.").format(full_fontname)
@check(
id = 'com.google.fonts/check/095',
conditions=['font_metadata', 'style']
)
def com_google_fonts_check_095(ttFont, style, font_metadata):
"""METADATA.pb font.name value should be same as
the family name declared on the name table.
"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import (RIBBI_STYLE_NAMES,
NAMEID_FONT_FAMILY_NAME,
NAMEID_TYPOGRAPHIC_FAMILY_NAME,
NAMEID_STR)
if style in RIBBI_STYLE_NAMES:
font_familynames = get_name_entry_strings(ttFont, NAMEID_FONT_FAMILY_NAME)
nameid = NAMEID_FONT_FAMILY_NAME
else:
font_familynames = get_name_entry_strings(ttFont, NAMEID_TYPOGRAPHIC_FAMILY_NAME)
nameid = NAMEID_TYPOGRAPHIC_FAMILY_NAME
if len(font_familynames) == 0:
yield FAIL, Message("lacks-entry",
("This font lacks a {} entry"
" (nameID={}) in the"
" name table.").format(NAMEID_STR[nameid],
nameid))
else:
for font_familyname in font_familynames:
if font_familyname != font_metadata.name:
yield FAIL, Message("mismatch",
("Unmatched familyname in font:"
" TTF has \"{}\" while METADATA.pb has"
" name=\"{}\".").format(font_familyname,
font_metadata.name))
else:
yield PASS, ("OK: Family name \"{}\" is identical"
" in METADATA.pb and on the"
" TTF file.").format(font_metadata.name)
@check(
id = 'com.google.fonts/check/096',
conditions = ['font_metadata']
)
def com_google_fonts_check_096(font_metadata):
"""METADATA.pb font.full_name and font.post_script_name
fields have equivalent values ?
"""
import re
regex = re.compile(r"\W")
post_script_name = regex.sub("", font_metadata.post_script_name)
fullname = regex.sub("", font_metadata.full_name)
if fullname != post_script_name:
yield FAIL, ("METADATA.pb font full_name=\"{}\""
" does not match post_script_name ="
" \"{}\"").format(font_metadata.full_name,
font_metadata.post_script_name)
else:
yield PASS, ("METADATA.pb font fields \"full_name\" and"
" \"post_script_name\" have equivalent values.")
@check(
id = 'com.google.fonts/check/097',
conditions = ['font_metadata']
)
def com_google_fonts_check_097(font_metadata):
"""METADATA.pb font.filename and font.post_script_name
fields have equivalent values?
"""
import re
regex = re.compile(r"\W")
post_script_name = regex.sub("", font_metadata.post_script_name)
filename = regex.sub("", os.path.splitext(font_metadata.filename)[0])
if filename != post_script_name:
yield FAIL, ("METADATA.pb font filename=\"{}\" does not match"
" post_script_name=\"{}\"."
"").format(font_metadata.filename,
font_metadata.post_script_name)
else:
yield PASS, ("METADATA.pb font fields \"filename\" and"
" \"post_script_name\" have equivalent values.")
@condition
def font_familynames(ttFont):
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import NAMEID_FONT_FAMILY_NAME
return get_name_entry_strings(ttFont, NAMEID_FONT_FAMILY_NAME)
@condition
def typographic_familynames(ttFont):
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import NAMEID_TYPOGRAPHIC_FAMILY_NAME
return get_name_entry_strings(ttFont, NAMEID_TYPOGRAPHIC_FAMILY_NAME)
@condition
def font_familyname(font_familynames):
# This assumes that all familyname
# name table entries are identical.
# FIX-ME: Maybe we should have a check for that.
# Have we ever seen this kind of
# problem in the wild, though ?
return font_familynames[0]
@check(
id = 'com.google.fonts/check/098',
conditions = ['style',
'font_metadata']
)
def com_google_fonts_check_098(style,
font_metadata,
font_familynames,
typographic_familynames):
"""METADATA.pb font.name field contains font name in right format?"""
from fontbakery.constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES:
familynames = font_familynames
else:
familynames = typographic_familynames
for font_familyname in familynames:
if font_familyname in font_metadata.name:
yield PASS, ("METADATA.pb font.name field contains"
" font name in right format."
" ('{}' in '{}')").format(font_familyname,
font_metadata.name)
else:
yield FAIL, ("METADATA.pb font.name field (\"{}\")"
" does not match correct font name format (\"{}\")."
"").format(font_metadata.name,
font_familyname)
@check(
id = 'com.google.fonts/check/099',
conditions = ['style',
'font_metadata']
)
def com_google_fonts_check_099(style,
font_metadata,
font_familynames,
typographic_familynames):
"""METADATA.pb font.full_name field contains font name in right format?"""
from fontbakery.constants import RIBBI_STYLE_NAMES
if style in RIBBI_STYLE_NAMES:
familynames = font_familynames
if familynames == []:
yield SKIP, "No FONT_FAMILYNAME"
else:
familynames = typographic_familynames
if familynames == []:
yield SKIP, "No TYPOGRAPHIC_FAMILYNAME"
for font_familyname in familynames:
if font_familyname in font_metadata.full_name:
yield PASS, ("METADATA.pb font.full_name field contains"
" font name in right format."
" ('{}' in '{}')").format(font_familyname,
font_metadata.full_name)
else:
yield FAIL, ("METADATA.pb font.full_name field (\"{}\")"
" does not match correct font name format (\"{}\")."
"").format(font_metadata.full_name,
font_familyname)
@check(
id = 'com.google.fonts/check/100',
conditions = ['style', # This means the font filename
# (source of truth here) is good
'font_metadata']
)
def com_google_fonts_check_100(font,
font_metadata):
"""METADATA.pb font.filename field contains font name in right format?"""
expected = os.path.basename(font)
if font_metadata.filename == expected:
yield PASS, ("METADATA.pb filename field contains"
" font name in right format.")
else:
yield FAIL, ("METADATA.pb filename field (\"{}\") does not match"
" correct font name format (\"{}\")."
"").format(font_metadata.filename,
expected)
@check(
id = 'com.google.fonts/check/101',
conditions = ['font_metadata',
'font_familynames']
)
def com_google_fonts_check_101(font_metadata,
font_familynames):
"""METADATA.pb font.post_script_name field
contains font name in right format?
"""
for font_familyname in font_familynames:
psname = "".join(str(font_familyname).split())
if psname in "".join(font_metadata.post_script_name.split("-")):
yield PASS, ("METADATA.pb postScriptName field"
" contains font name in right format.")
else:
yield FAIL, ("METADATA.pb postScriptName (\"{}\")"
" does not match correct font name format (\"{}\")."
"").format(font_metadata.post_script_name,
font_familyname)
@check(
id = 'com.google.fonts/check/102',
conditions = ['font_metadata']
)
def com_google_fonts_check_102(font_metadata):
"""Copyright notice on METADATA.pb matches canonical pattern?"""
import re
from unidecode import unidecode
does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)',
font_metadata.copyright)
if does_match:
yield PASS, ("METADATA.pb copyright field '{}'"
" matches canonical pattern.").format(font_metadata.copyright)
else:
yield FAIL, ("METADATA.pb: Copyright notices should match"
" a pattern similar to:"
" 'Copyright 2017 The Familyname"
" Project Authors (git url)'\n"
"But instead we have got:"
" '{}'").format(unidecode(font_metadata.copyright))
@check(
id = 'com.google.fonts/check/103',
conditions = ['font_metadata']
)
def com_google_fonts_check_103(font_metadata):
"""Copyright notice on METADATA.pb should not contain 'Reserved Font Name'."""
from unidecode import unidecode
if "Reserved Font Name" in font_metadata.copyright:
yield WARN, ("METADATA.pb: copyright field (\"{}\")"
" contains \"Reserved Font Name\"."
" This is an error except in a few specific"
" rare cases.").format(unidecode(font_metadata.copyright))
else:
yield PASS, ("METADATA.pb copyright field"
" does not contain \"Reserved Font Name\".")
@check(
id = 'com.google.fonts/check/104',
conditions = ['font_metadata']
)
def com_google_fonts_check_104(font_metadata):
"""METADATA.pb: Copyright notice shouldn't exceed 500 chars."""
if len(font_metadata.copyright) > 500:
yield FAIL, ("METADATA.pb: Copyright notice exceeds"
" maximum allowed lengh of 500 characteres.")
else:
yield PASS, "Copyright notice string is shorter than 500 chars."
@condition
def canonical_filename(font_metadata):
if not font_metadata:
return
style_names = {
"normal": "",
"italic": "Italic"
}
WEIGHT_VALUE_TO_NAME = {
100: "Thin",
200: "ExtraLight",
300: "Light",
400: "",
500: "Medium",
600: "SemiBold",
700: "Bold",
800: "ExtraBold",
900: "Black"
}
familyname = font_metadata.name.replace(" ", "")
style_weight = "{}{}".format(WEIGHT_VALUE_TO_NAME.get(font_metadata.weight),
style_names.get(font_metadata.style))
if style_weight == "":
style_weight = "Regular"
return f"{familyname}-{style_weight}.ttf"
@check(
id = 'com.google.fonts/check/105',
conditions = ['font_metadata',
'canonical_filename']
)
def com_google_fonts_check_105(font_metadata, canonical_filename):
"""METADATA.pb: Filename is set canonically?"""
if canonical_filename != font_metadata.filename:
yield FAIL, ("METADATA.pb: filename field (\"{}\")"
" does not match "
"canonical name \"{}\".".format(font_metadata.filename,
canonical_filename))
else:
yield PASS, "Filename in METADATA.pb is set canonically."
@check(
id = 'com.google.fonts/check/106',
conditions = ['font_metadata']
)
def com_google_fonts_check_106(ttFont, font_metadata):
"""METADATA.pb font.style "italic" matches font internals?"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import (NAMEID_FULL_FONT_NAME,
MACSTYLE_ITALIC)
if font_metadata.style != "italic":
yield SKIP, "This check only applies to italic fonts."
else:
font_fullname = get_name_entry_strings(ttFont, NAMEID_FULL_FONT_NAME)
if len(font_fullname) == 0:
yield SKIP, "Font lacks fullname entries in name table."
# this fail scenario was already checked above
# (passing those previous checks is a prerequisite for this one)
# FIXME: Could we pack this into a condition ?
else:
# FIXME: here we only check the first name entry.
# Should we iterate over them all ? Or should we check
# if they're all the same?
font_fullname = font_fullname[0]
if not bool(ttFont["head"].macStyle & MACSTYLE_ITALIC):
yield FAIL, Message("bad-macstyle",
"METADATA.pb style has been set to italic"
" but font macStyle is improperly set.")
elif not font_fullname.split("-")[-1].endswith("Italic"):
yield FAIL, Message("bad-fullfont-name",
("Font macStyle Italic bit is set"
" but nameID {} (\"{}\") is not ended with"
" \"Italic\"").format(NAMEID_FULL_FONT_NAME,
font_fullname))
else:
yield PASS, ("OK: METADATA.pb font.style \"italic\""
" matches font internals.")
@check(
id = 'com.google.fonts/check/107',
conditions = ['font_metadata']
)
def com_google_fonts_check_107(ttFont, font_metadata):
"""METADATA.pb font.style "normal" matches font internals?"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import (NAMEID_FONT_FAMILY_NAME,
NAMEID_FULL_FONT_NAME,
MACSTYLE_ITALIC)
if font_metadata.style != "normal":
yield SKIP, "This check only applies to normal fonts."
# FIXME: declare a common condition called "normal_style"
else:
font_familyname = get_name_entry_strings(ttFont, NAMEID_FONT_FAMILY_NAME)
font_fullname = get_name_entry_strings(ttFont, NAMEID_FULL_FONT_NAME)
if len(font_familyname) == 0 or len(font_fullname) == 0:
yield SKIP, ("Font lacks familyname and/or"
" fullname entries in name table.")
# FIXME: This is the same SKIP condition as in check/106
# so we definitely need to address them with a common condition!
else:
font_familyname = font_familyname[0]
font_fullname = font_fullname[0]
if bool(ttFont["head"].macStyle & MACSTYLE_ITALIC):
yield FAIL, Message("bad-macstyle",
("METADATA.pb style has been set to normal"
" but font macStyle is improperly set."))
elif font_familyname.split("-")[-1].endswith('Italic'):
yield FAIL, Message("familyname-italic",
("Font macStyle indicates a non-Italic font, but"
" nameID {} (FONT_FAMILY_NAME: \"{}\") ends with"
" \"Italic\".").format(NAMEID_FONT_FAMILY_NAME,
font_familyname))
elif font_fullname.split("-")[-1].endswith("Italic"):
yield FAIL, Message("fullfont-italic",
("Font macStyle indicates a non-Italic font but"
" nameID {} (FULL_FONT_NAME: \"{}\") ends with"
" \"Italic\".").format(NAMEID_FULL_FONT_NAME,
font_fullname))
else:
yield PASS, ("METADATA.pb font.style \"normal\""
" matches font internals.")
@check(
id = 'com.google.fonts/check/108',
conditions = ['font_metadata']
)
def com_google_fonts_check_108(ttFont, font_metadata):
"""METADATA.pb font.name and font.full_name fields match
the values declared on the name table?
"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import (NAMEID_FONT_FAMILY_NAME,
NAMEID_FULL_FONT_NAME)
font_familyname = get_name_entry_strings(ttFont, NAMEID_FONT_FAMILY_NAME)[0]
font_fullname = get_name_entry_strings(ttFont, NAMEID_FULL_FONT_NAME)[0]
# FIXME: common condition/name-id check as in the two previous checks.
if font_fullname != font_metadata.full_name:
yield FAIL, Message("fullname-mismatch",
("METADATA.pb: Fullname (\"{}\")"
" does not match name table"
" entry \"{}\" !").format(font_metadata.full_name,
font_fullname))
elif font_familyname != font_metadata.name:
yield FAIL, Message("familyname-mismatch",
("METADATA.pb Family name \"{}\")"
" does not match name table"
" entry \"{}\" !").format(font_metadata.name,
font_familyname))
else:
yield PASS, ("METADATA.pb familyname and fullName fields"
" match corresponding name table entries.")
# TODO: Design special case handling for whitelists/blacklists
# https://github.com/googlefonts/fontbakery/issues/1540
@condition
def whitelist_camelcased_familyname(font):
familynames = [
"BenchNine",
"FakeFont",
"McLaren",
"MedievalSharp",
"UnifrakturCook",
"UnifrakturMaguntia"
]
for familyname in familynames:
if familyname in font:
return True
@check(
id = 'com.google.fonts/check/109',
conditions = ['font_metadata',
'not whitelist_camelcased_familyname']
)
def com_google_fonts_check_109(font_metadata):
"""METADATA.pb: Check if fontname is not camel cased."""
import re
if bool(re.match(r'([A-Z][a-z]+){2,}', font_metadata.name)):
yield FAIL, ("METADATA.pb: '{}' is a CamelCased name."
" To solve this, simply use spaces"
" instead in the font name.").format(font_metadata.name)
else:
yield PASS, "Font name is not camel-cased."
@check(
id = 'com.google.fonts/check/110',
conditions = ['family_metadata', # that's the family-wide metadata!
'font_metadata'] # and this one's specific to a single file
)
def com_google_fonts_check_110(family_metadata, font_metadata):
"""METADATA.pb: Check font name is the same as family name."""
if font_metadata.name != family_metadata.name:
yield FAIL, ("METADATA.pb: {}: Family name \"{}\""
" does not match"
" font name: \"{}\"").format(font_metadata.filename,
family_metadata.name,
font_metadata.name)
else:
yield PASS, "Font name is the same as family name."
@check(
id = 'com.google.fonts/check/111',
conditions = ['font_metadata']
)
def com_google_fonts_check_111(font_metadata):
"""METADATA.pb: Check that font weight has a canonical value."""
first_digit = font_metadata.weight / 100
if (font_metadata.weight % 100) != 0 or \
(first_digit < 1 or first_digit > 9):
yield FAIL, ("METADATA.pb: The weight is declared"
" as {} which is not a"
" multiple of 100"
" between 100 and 900.").format(font_metadata.weight)
else:
yield PASS, "Font weight has a canonical value."
@check(
id = 'com.google.fonts/check/112',
conditions = ['font_metadata']
)
def com_google_fonts_check_112(ttFont,
font_metadata):
"""Checking OS/2 usWeightClass matches weight specified at METADATA.pb."""
# Weight name to value mapping:
GF_API_WEIGHT_NAMES = {250: "Thin",
275: "ExtraLight",
300: "Light",
400: "Regular",
500: "Medium",
600: "SemiBold",
700: "Bold",
800: "ExtraBold",
900: "Black"}
CSS_WEIGHT_NAMES = {
100: "Thin",
200: "ExtraLight",
300: "Light",
400: "Regular",
500: "Medium",
600: "SemiBold",
700: "Bold",
800: "ExtraBold",
900: "Black"
}
gf_weight = GF_API_WEIGHT_NAMES.get(ttFont["OS/2"].usWeightClass,
"bad Google Fonts API weight value")
css_weight = CSS_WEIGHT_NAMES.get(font_metadata.weight,
"bad CSS weight value")
if gf_weight != css_weight:
yield FAIL, ("OS/2 usWeightClass ({}:\"{}\") does not match"
" weight specified at METADATA.pb ({}:\"{}\")."
"").format(ttFont["OS/2"].usWeightClass,
gf_weight,
font_metadata.weight,
css_weight)
else:
yield PASS, ("OS/2 usWeightClass matches"
" weight specified at METADATA.pb")
@check(
id = 'com.google.fonts/check/113',
conditions = ['font_metadata']
)
def com_google_fonts_check_113(font_metadata):
"""METADATA.pb weight matches postScriptName."""
WEIGHTS = {
"Thin": 100,
"ThinItalic": 100,
"ExtraLight": 200,
"ExtraLightItalic": 200,
"Light": 300,
"LightItalic": 300,
"Regular": 400,
"Italic": 400,
"Medium": 500,
"MediumItalic": 500,
"SemiBold": 600,
"SemiBoldItalic": 600,
"Bold": 700,
"BoldItalic": 700,
"ExtraBold": 800,
"ExtraBoldItalic": 800,
"Black": 900,
"BlackItalic": 900
}
pair = []
for k, weight in WEIGHTS.items():
if weight == font_metadata.weight:
pair.append((k, weight))
if not pair:
yield FAIL, ("METADATA.pb: Font weight value ({})"
" is invalid.").format(font_metadata.weight)
elif not (font_metadata.post_script_name.endswith('-' + pair[0][0]) or
font_metadata.post_script_name.endswith('-' + pair[1][0])):
yield FAIL, ("METADATA.pb: Mismatch between postScriptName (\"{}\")"
" and weight value ({}). The name must be"
" ended with \"{}\" or \"{}\"."
"").format(font_metadata.post_script_name,
pair[0][1],
pair[0][0],
pair[1][0])
else:
yield PASS, "Weight value matches postScriptName."
@check(
id = 'com.google.fonts/check/115',
conditions = ['font_metadata']
)
def com_google_fonts_check_115(ttFont, font_metadata):
"""METADATA.pb: Font styles are named canonically?"""
from fontbakery.constants import MACSTYLE_ITALIC
def find_italic_in_name_table():
for entry in ttFont["name"].names:
if "italic" in entry.string.decode(entry.getEncoding()).lower():
return True
return False
def is_italic():
return (ttFont["head"].macStyle & MACSTYLE_ITALIC or
ttFont["post"].italicAngle or
find_italic_in_name_table())
if font_metadata.style not in ["italic", "normal"]:
yield SKIP, ("This check only applies to font styles declared"
" as \"italic\" or \"regular\" on METADATA.pb.")
else:
if is_italic() and font_metadata.style != "italic":
yield FAIL, ("The font style is {}"
" but it should be italic").format(font_metadata.style)
elif not is_italic() and font_metadata.style != "normal":
yield FAIL, ("The font style is {}"
" but it should be normal").format(font_metadata.style)
else:
yield PASS, "Font styles are named canonically."
@check(
id = 'com.google.fonts/check/116'
)
def com_google_fonts_check_116(ttFont):
"""Is font em size (ideally) equal to 1000?"""
upm_height = ttFont["head"].unitsPerEm
if upm_height != 1000:
yield WARN, ("Font em size ({}) is not"
" equal to 1000.").format(upm_height)
else:
yield PASS, "Font em size is equal to 1000."
@condition
def remote_styles(family_metadata):
"""Get a dictionary of TTFont objects of all font files of
a given family as currently hosted at Google Fonts.
"""
def download_family_from_Google_Fonts(family_name):
"""Return a zipfile containing a font family hosted on fonts.google.com"""
from zipfile import ZipFile
from fontbakery.utils import download_file
url_prefix = 'https://fonts.google.com/download?family='
url = '{}{}'.format(url_prefix, family_name.replace(' ', '+'))
return ZipFile(download_file(url))
def fonts_from_zip(zipfile):
'''return a list of fontTools TTFonts'''
from fontTools.ttLib import TTFont
from io import BytesIO
fonts = []
for file_name in zipfile.namelist():
if file_name.lower().endswith(".ttf"):
file_obj = BytesIO(zipfile.open(file_name).read())
fonts.append([file_name, TTFont(file_obj)])
return fonts
if (not listed_on_gfonts_api(family_metadata) or
not family_metadata):
return None
remote_fonts_zip = download_family_from_Google_Fonts(family_metadata.name)
rstyles = {}
for remote_filename, remote_font in fonts_from_zip(remote_fonts_zip):
if '-' in remote_filename[:-4]:
remote_style = remote_filename[:-4].split('-')[1]
rstyles[remote_style] = remote_font
return rstyles
@condition
def api_gfonts_ttFont(style, remote_styles):
"""Get a TTFont object of a font downloaded from Google Fonts
corresponding to the given TTFont object of
a local font being checked.
"""
if remote_styles and style in remote_styles:
return remote_styles[style]
@condition
def github_gfonts_ttFont(ttFont, license):
"""Get a TTFont object of a font downloaded
from Google Fonts git repository.
"""
if not license:
return
from fontbakery.utils import download_file
from fontTools.ttLib import TTFont
from urllib.request import HTTPError
LICENSE_DIRECTORY = {
"OFL.txt": "ofl",
"UFL.txt": "ufl",
"LICENSE.txt": "apache"
}
filename = os.path.basename(ttFont.reader.file.name)
fontname = filename.split('-')[0].lower()
url = ("https://github.com/google/fonts/raw/master"
"/{}/{}/{}").format(LICENSE_DIRECTORY[license],
fontname,
filename)
try:
fontfile = download_file(url)
return TTFont(fontfile)
except HTTPError:
return None
@check(
id = 'com.google.fonts/check/117',
conditions = ['api_gfonts_ttFont',
'github_gfonts_ttFont']
)
def com_google_fonts_check_117(ttFont,
api_gfonts_ttFont,
github_gfonts_ttFont):
"""Version number has increased since previous release on Google Fonts?"""
v_number = ttFont["head"].fontRevision
api_gfonts_v_number = api_gfonts_ttFont["head"].fontRevision
github_gfonts_v_number = github_gfonts_ttFont["head"].fontRevision
failed = False
if v_number == api_gfonts_v_number:
failed = True
yield FAIL, ("Version number {} is equal to"
" version on Google Fonts.").format(v_number)
if v_number < api_gfonts_v_number:
failed = True
yield FAIL, ("Version number {} is less than"
" version on Google Fonts ({})."
"").format(v_number,
api_gfonts_v_number)
if v_number == github_gfonts_v_number:
failed = True
yield FAIL, ("Version number {} is equal to"
" version on Google Fonts GitHub repo."
"").format(v_number)
if v_number < github_gfonts_v_number:
failed = True
yield FAIL, ("Version number {} is less than"
" version on Google Fonts GitHub repo ({})."
"").format(v_number,
github_gfonts_v_number)
if not failed:
yield PASS, ("Version number {} is greater than"
" version on Google Fonts GitHub ({})"
" and production servers ({})."
"").format(v_number,
github_gfonts_v_number,
api_gfonts_v_number)
@check(
id = 'com.google.fonts/check/118',
conditions = ['api_gfonts_ttFont']
)
def com_google_fonts_check_118(ttFont, api_gfonts_ttFont):
"""Glyphs are similiar to Google Fonts version?"""
def glyphs_surface_area(ttFont):
"""Calculate the surface area of a glyph's ink"""
from fontTools.pens.areaPen import AreaPen
glyphs = {}
glyph_set = ttFont.getGlyphSet()
area_pen = AreaPen(glyph_set)
for glyph in glyph_set.keys():
glyph_set[glyph].draw(area_pen)
area = area_pen.value
area_pen.value = 0
glyphs[glyph] = area
return glyphs
bad_glyphs = []
these_glyphs = glyphs_surface_area(ttFont)
gfonts_glyphs = glyphs_surface_area(api_gfonts_ttFont)
shared_glyphs = set(these_glyphs) & set(gfonts_glyphs)
this_upm = ttFont['head'].unitsPerEm
gfonts_upm = api_gfonts_ttFont['head'].unitsPerEm
for glyph in shared_glyphs:
# Normalize area difference against comparison's upm
this_glyph_area = (these_glyphs[glyph] / this_upm) * gfonts_upm
gfont_glyph_area = (gfonts_glyphs[glyph] / gfonts_upm) * this_upm
if abs(this_glyph_area - gfont_glyph_area) > 7000:
bad_glyphs.append(glyph)
if bad_glyphs:
yield WARN, ("Following glyphs differ greatly from"
" Google Fonts version: [{}]").format(", ".join(bad_glyphs))
else:
yield PASS, ("Glyphs are similar in"
" comparison to the Google Fonts version.")
@check(
id = 'com.google.fonts/check/119',
conditions = ['api_gfonts_ttFont']
)
def com_google_fonts_check_119(ttFont, api_gfonts_ttFont):
"""TTFAutohint x-height increase value is same as in
previous release on Google Fonts?"""
def ttfauto_fpgm_xheight_rounding(fpgm_tbl, which):
"""Find the value from the fpgm table which controls ttfautohint's
increase xheight parameter, '--increase-x-height'.
This implementation is based on ttfautohint v1.6.
This function has been tested on every font in the fonts/google repo
which has an fpgm table. Results have been stored in a spreadsheet:
http://tinyurl.com/jmlfmh3
For more information regarding the fpgm table read:
http://tinyurl.com/jzekfyx"""
import re
fpgm_tbl = '\n'.join(fpgm_tbl)
xheight_pattern = r'(MPPEM\[ \].*\nPUSHW\[ \].*\n)([0-9]{1,5})'
warning = None
try:
xheight_val = int(re.search(xheight_pattern, fpgm_tbl).group(2))
except AttributeError:
warning = ("No instruction for xheight rounding found"
" on the {} font").format(which)
xheight_val = None
return (warning, xheight_val)
inc_xheight = None
gf_inc_xheight = None
if "fpgm" in ttFont:
fpgm_tbl = ttFont["fpgm"].program.getAssembly()
msg, inc_xheight = \
ttfauto_fpgm_xheight_rounding(fpgm_tbl, "this fontfile")
if msg: yield WARN, msg
if 'fpgm' in api_gfonts_ttFont:
gfonts_fpgm_tbl = api_gfonts_ttFont["fpgm"].program.getAssembly()
warn, gf_inc_xheight = \
ttfauto_fpgm_xheight_rounding(gfonts_fpgm_tbl, "GFonts release")
if msg: yield WARN, msg
if inc_xheight != gf_inc_xheight:
yield FAIL, ("TTFAutohint --increase-x-height is {}. "
"It should match the previous"
" version's value ({}).").format(inc_xheight,
gf_inc_xheight)
else:
yield PASS, ("TTFAutohint --increase-x-height is the same as in"
" the previous Google Fonts release ({}).").format(inc_xheight)
@check(
id = 'com.google.fonts/check/129',
conditions = ['style']
)
def com_google_fonts_check_129(ttFont, style):
"""Checking OS/2 fsSelection value."""
from fontbakery.utils import check_bit_entry
from fontbakery.constants import (STYLE_NAMES,
RIBBI_STYLE_NAMES,
FSSEL_REGULAR,
FSSEL_ITALIC,
FSSEL_BOLD)
# Checking fsSelection REGULAR bit:
expected = "Regular" in style or \
(style in STYLE_NAMES and
style not in RIBBI_STYLE_NAMES and
"Italic" not in style)
yield check_bit_entry(ttFont, "OS/2", "fsSelection",
expected,
bitmask=FSSEL_REGULAR,
bitname="REGULAR")
# Checking fsSelection ITALIC bit:
expected = "Italic" in style
yield check_bit_entry(ttFont, "OS/2", "fsSelection",
expected,
bitmask=FSSEL_ITALIC,
bitname="ITALIC")
# Checking fsSelection BOLD bit:
expected = style in ["Bold", "BoldItalic"]
yield check_bit_entry(ttFont, "OS/2", "fsSelection",
expected,
bitmask=FSSEL_BOLD,
bitname="BOLD")
@check(
id = 'com.google.fonts/check/130',
conditions = ['style']
)
def com_google_fonts_check_130(ttFont, style):
"""Checking post.italicAngle value."""
failed = False
value = ttFont["post"].italicAngle
# Checking that italicAngle <= 0
if value > 0:
failed = True
yield FAIL, Message("positive",
("The value of post.italicAngle must be"
" changed from {} to {}.").format(value, -value))
# Checking that italicAngle is less than 20 degrees:
if abs(value) > 20:
failed = True
yield FAIL, Message(">20 degrees",
("The value of post.italicAngle must be"
" changed from {} to -20.").format(value))
# Checking if italicAngle matches font style:
if "Italic" in style:
if ttFont['post'].italicAngle == 0:
failed = True
yield FAIL, Message("zero-italic",
("Font is italic, so post.italicAngle"
" should be non-zero."))
else:
if ttFont["post"].italicAngle != 0:
failed = True
yield FAIL, Message("non-zero-normal",
("Font is not italic, so post.italicAngle"
" should be equal to zero."))
if not failed:
yield PASS, ("Value of post.italicAngle is {}"
" with style='{}'.").format(value, style)
@check(
id = 'com.google.fonts/check/131',
conditions = ['style'],
rationale = """
The values of the flags on the macStyle entry on the 'head' OpenType
table that describe whether a font is bold and/or italic
must be coherent with the actual style of the font as inferred
by its filename.
"""
)
def com_google_fonts_check_131(ttFont, style):
"""Checking head.macStyle value."""
from fontbakery.utils import check_bit_entry
from fontbakery.constants import (MACSTYLE_ITALIC,
MACSTYLE_BOLD)
# Checking macStyle ITALIC bit:
expected = "Italic" in style
yield check_bit_entry(ttFont, "head", "macStyle",
expected,
bitmask=MACSTYLE_ITALIC,
bitname="ITALIC")
# Checking macStyle BOLD bit:
expected = style in ["Bold", "BoldItalic"]
yield check_bit_entry(ttFont, "head", "macStyle",
expected,
bitmask=MACSTYLE_BOLD,
bitname="BOLD")
@check(
id = 'com.google.fonts/check/153',
conditions = ['is_ttf'],
rationale = """
Visually QAing thousands of glyphs by hand is tiring. Most glyphs can only
be constructured in a handful of ways. This means a glyph's contour count
will only differ slightly amongst different fonts, e.g a 'g' could either
be 2 or 3 contours, depending on whether its double story or single story.
However, a quotedbl should have 2 contours, unless the font belongs to a
display family.
"""
)
def com_google_fonts_check_153(ttFont):
"""Check if each glyph has the recommended amount of contours.
This check is useful to assure glyphs aren't incorrectly constructed.
The desired_glyph_data module contains the 'recommended' countour count
for encoded glyphs. The contour counts are derived from fonts which were
chosen for their quality and unique design decisions for particular glyphs.
In the future, additional glyph data can be included. A good addition would
be the 'recommended' anchor counts for each glyph.
"""
from fontbakery.glyphdata import desired_glyph_data as glyph_data
from fontbakery.utils import (get_font_glyph_data,
pretty_print_list)
from fontbakery.constants import (PLATFORM_ID__WINDOWS,
PLAT_ENC_ID__UCS2)
# rearrange data structure:
desired_glyph_data = {}
for glyph in glyph_data:
desired_glyph_data[glyph['unicode']] = glyph
bad_glyphs = []
desired_glyph_contours = {f: desired_glyph_data[f]['contours']
for f in desired_glyph_data}
font_glyph_data = get_font_glyph_data(ttFont)
if font_glyph_data is None:
yield FAIL, "This font lacks cmap data."
else:
font_glyph_contours = {f['unicode']: list(f['contours'])[0]
for f in font_glyph_data}
shared_glyphs = set(desired_glyph_contours) & set(font_glyph_contours)
for glyph in shared_glyphs:
if font_glyph_contours[glyph] not in desired_glyph_contours[glyph]:
bad_glyphs.append([glyph,
font_glyph_contours[glyph],
desired_glyph_contours[glyph]])
if len(bad_glyphs) > 0:
cmap = ttFont['cmap'].getcmap(PLATFORM_ID__WINDOWS,
PLAT_ENC_ID__UCS2).cmap
bad_glyphs_name = [("Glyph name: {}\t"
"Contours detected: {}\t"
"Expected: {}").format(cmap[name],
count,
pretty_print_list(expected))
for name, count, expected in bad_glyphs]
yield WARN, (("This check inspects the glyph outlines and detects the"
" total number of contours in each of them. The expected"
" values are infered from the typical ammounts of"
" contours observed in a large collection of reference"
" font families. The divergences listed below may simply"
" indicate a significantly different design on some of"
" your glyphs. On the other hand, some of these may flag"
" actual bugs in the font such as glyphs mapped to an"
" incorrect codepoint. Please consider reviewing"
" the design and codepoint assignment of these to make"
" sure they are correct.\n"
"\n"
"The following glyphs do not have the recommended"
" number of contours:\n"
"\n{}").format('\n'.join(bad_glyphs_name)))
else:
yield PASS, "All glyphs have the recommended amount of contours"
@check(
id = 'com.google.fonts/check/154',
conditions = ['api_gfonts_ttFont']
)
def com_google_fonts_check_154(ttFont, api_gfonts_ttFont):
"""Check font has same encoded glyphs as version hosted on
fonts.google.com"""
cmap = ttFont['cmap'].getcmap(3, 1).cmap
gf_cmap = api_gfonts_ttFont['cmap'].getcmap(3, 1).cmap
missing_codepoints = set(gf_cmap.keys()) - set(cmap.keys())
if missing_codepoints:
hex_codepoints = ['0x' + hex(c).upper()[2:].zfill(4) for c
in missing_codepoints]
yield FAIL, ("Font is missing the following glyphs"
" from the previous release"
" [{}]").format(', '.join(hex_codepoints))
else:
yield PASS, ('Font has all the glyphs from the previous release')
@check(
id = 'com.google.fonts/check/155',
conditions = ['font_metadata']
)
def com_google_fonts_check_155(ttFont, font_metadata):
"""Copyright field for this font on METADATA.pb matches
all copyright notice entries on the name table ?"""
from fontbakery.constants import NAMEID_COPYRIGHT_NOTICE
from unidecode import unidecode
failed = False
for nameRecord in ttFont['name'].names:
string = nameRecord.string.decode(nameRecord.getEncoding())
if nameRecord.nameID == NAMEID_COPYRIGHT_NOTICE and\
string != font_metadata.copyright:
failed = True
yield FAIL, ("Copyright field for this font on METADATA.pb ('{}')"
" differs from a copyright notice entry"
" on the name table:"
" '{}'").format(font_metadata.copyright,
unidecode(string))
if not failed:
yield PASS, ("Copyright field for this font on METADATA.pb matches"
" copyright notice entries on the name table.")
@condition
def familyname_with_spaces(familyname):
FAMILY_WITH_SPACES_EXCEPTIONS = {'VT323': 'VT323',
'K2D': 'K2D',
'PressStart2P': 'Press Start 2P',
'ABeeZee': 'ABeeZee',
'IBMPlexMono': 'IBM Plex Mono',
'IBMPlexSans': 'IBM Plex Sans',
'IBMPlexSerif': 'IBM Plex Serif'}
if familyname in FAMILY_WITH_SPACES_EXCEPTIONS.keys():
return FAMILY_WITH_SPACES_EXCEPTIONS[familyname]
result = []
for c in familyname:
if c.isupper():
result.append(" ")
result.append(c)
result = ''.join(result).strip()
def of_special_case(s):
"""Special case for family names such as
MountainsofChristmas which would need to
have the "of" split apart from "Mountains".
See also: https://github.com/googlefonts/fontbakery/issues/1489
"Failure to handle font family with 3 words in it"
"""
if s[-2:] == "of":
return s[:-2] + " of"
else:
return s
result = " ".join(map(of_special_case, result.split(" ")))
if result[-3:] == "S C":
return result[:-3] + "SC"
else:
return result
def get_only_weight(value):
onlyWeight = {"BlackItalic": "Black",
"BoldItalic": "",
"ExtraBold": "ExtraBold",
"ExtraBoldItalic": "ExtraBold",
"ExtraLightItalic": "ExtraLight",
"LightItalic": "Light",
"MediumItalic": "Medium",
"SemiBoldItalic": "SemiBold",
"ThinItalic": "Thin"}
if value in onlyWeight.keys():
return onlyWeight[value]
else:
return value
@check(
id = 'com.google.fonts/check/156',
conditions = ['style'],
misc_metadata = {
'priority': IMPORTANT
})
def com_google_fonts_check_156(ttFont, style):
"""Font has all mandatory 'name' table entries ?"""
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import (RIBBI_STYLE_NAMES,
NAMEID_STR,
NAMEID_FONT_FAMILY_NAME,
NAMEID_FONT_SUBFAMILY_NAME,
NAMEID_FULL_FONT_NAME,
NAMEID_POSTSCRIPT_NAME,
NAMEID_TYPOGRAPHIC_FAMILY_NAME,
NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME)
required_nameIDs = [NAMEID_FONT_FAMILY_NAME,
NAMEID_FONT_SUBFAMILY_NAME,
NAMEID_FULL_FONT_NAME,
NAMEID_POSTSCRIPT_NAME]
if style not in RIBBI_STYLE_NAMES:
required_nameIDs += [NAMEID_TYPOGRAPHIC_FAMILY_NAME,
NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME]
failed = False
# The font must have at least these name IDs:
for nameId in required_nameIDs:
if len(get_name_entry_strings(ttFont, nameId)) == 0:
failed = True
yield FAIL, ("Font lacks entry with"
" nameId={} ({})").format(nameId,
NAMEID_STR[nameId])
if not failed:
yield PASS, "Font contains values for all mandatory name table entries."
@check(
id = 'com.google.fonts/check/157',
conditions = ['style'],
misc_metadata = {
'priority': IMPORTANT
})
def com_google_fonts_check_157(ttFont, style, familyname_with_spaces):
""" Check name table: FONT_FAMILY_NAME entries. """
from fontbakery.utils import name_entry_id
from fontbakery.constants import (NAMEID_FONT_FAMILY_NAME,
PLATFORM_ID__MACINTOSH,
PLATFORM_ID__WINDOWS)
failed = False
only_weight = get_only_weight(style)
for name in ttFont['name'].names:
if name.nameID == NAMEID_FONT_FAMILY_NAME:
if name.platformID == PLATFORM_ID__MACINTOSH:
expected_value = familyname_with_spaces
elif name.platformID == PLATFORM_ID__WINDOWS:
if style in ['Regular',
'Italic',
'Bold',
'Bold Italic']:
expected_value = familyname_with_spaces
else:
expected_value = " ".join([familyname_with_spaces,
only_weight]).strip()
else:
failed = True
yield FAIL, ("Font should not have a "
"{} entry!").format(name_entry_id(name))
continue
string = name.string.decode(name.getEncoding()).strip()
if string != expected_value:
yield FAIL, ("Entry {} on the 'name' table: "
"Expected '{}' "
"but got '{}'.").format(name_entry_id(name),
expected_value,
string)
if not failed:
yield PASS, "FONT_FAMILY_NAME entries are all good."
@check(
id = 'com.google.fonts/check/158',
conditions = ['style'],
misc_metadata = {
'priority': IMPORTANT
})
def com_google_fonts_check_158(ttFont, style, familyname_with_spaces):
""" Check name table: FONT_SUBFAMILY_NAME entries. """
from fontbakery.utils import name_entry_id
from fontbakery.constants import (NAMEID_FONT_SUBFAMILY_NAME,
PLATFORM_ID__MACINTOSH,
PLATFORM_ID__WINDOWS,
STYLE_NAMES)
failed = False
style_with_spaces = style.replace('Italic',
' Italic').strip()
for name in ttFont['name'].names:
if name.nameID == NAMEID_FONT_SUBFAMILY_NAME:
if style_with_spaces not in STYLE_NAMES:
yield FAIL, ("Style name '{}' inferred from filename"
" is not canonical."
" Valid options are: {}").format(style_with_spaces,
STYLE_NAMES)
failed = True
continue
if name.platformID == PLATFORM_ID__MACINTOSH:
expected_value = style_with_spaces
elif name.platformID == PLATFORM_ID__WINDOWS:
if style_with_spaces in ["Bold", "Bold Italic"]:
expected_value = style_with_spaces
else:
if "Italic" in style:
expected_value = "Italic"
else:
expected_value = "Regular"
else:
yield FAIL, ("Font should not have a "
"{} entry!").format(name_entry_id(name))
failed = True
continue
string = name.string.decode(name.getEncoding()).strip()
if string != expected_value:
yield FAIL, ("Entry {} on the 'name' table: "
"Expected '{}' "
"but got '{}'.").format(name_entry_id(name),
expected_value,
string)
if not failed:
yield PASS, "FONT_SUBFAMILY_NAME entries are all good."
@check(
id = 'com.google.fonts/check/159',
conditions = ['style'],
misc_metadata = {
'priority': IMPORTANT
})
def com_google_fonts_check_159(ttFont, style, familyname_with_spaces):
""" Check name table: FULL_FONT_NAME entries. """
from unidecode import unidecode
from fontbakery.utils import name_entry_id
from fontbakery.constants import NAMEID_FULL_FONT_NAME
failed = False
style_with_spaces = style.replace('Italic',
' Italic').strip()
for name in ttFont['name'].names:
if name.nameID == NAMEID_FULL_FONT_NAME:
expected_value = "{} {}".format(familyname_with_spaces,
style_with_spaces)
string = name.string.decode(name.getEncoding()).strip()
if string != expected_value:
failed = True
# special case
# see https://github.com/googlefonts/fontbakery/issues/1436
if style == "Regular" \
and string == familyname_with_spaces:
yield WARN, ("Entry {} on the 'name' table:"
" Got '{}' which lacks 'Regular',"
" but it is probably OK in this case."
"").format(name_entry_id(name),
unidecode(string))
else:
yield FAIL, ("Entry {} on the 'name' table: "
"Expected '{}' "
"but got '{}'.").format(name_entry_id(name),
expected_value,
unidecode(string))
if not failed:
yield PASS, "FULL_FONT_NAME entries are all good."
@check(
id = 'com.google.fonts/check/160',
conditions = ['style'],
misc_metadata = {
'priority': IMPORTANT
})
def com_google_fonts_check_160(ttFont, style, familyname):
""" Check name table: POSTSCRIPT_NAME entries. """
from unidecode import unidecode
from fontbakery.utils import name_entry_id
from fontbakery.constants import NAMEID_POSTSCRIPT_NAME
failed = False
for name in ttFont['name'].names:
if name.nameID == NAMEID_POSTSCRIPT_NAME:
expected_value = f"{familyname}-{style}"
string = name.string.decode(name.getEncoding()).strip()
if string != expected_value:
failed = True
yield FAIL, ("Entry {} on the 'name' table: "
"Expected '{}' "
"but got '{}'.").format(name_entry_id(name),
expected_value,
unidecode(string))
if not failed:
yield PASS, "POSTCRIPT_NAME entries are all good."
@check(
id = 'com.google.fonts/check/161',
conditions = ['style'],
misc_metadata = {
'priority': IMPORTANT
})
def com_google_fonts_check_161(ttFont, style, familyname_with_spaces):
""" Check name table: TYPOGRAPHIC_FAMILY_NAME entries. """
from unidecode import unidecode
from fontbakery.utils import name_entry_id
from fontbakery.constants import NAMEID_TYPOGRAPHIC_FAMILY_NAME
failed = False
for name in ttFont['name'].names:
if name.nameID == NAMEID_TYPOGRAPHIC_FAMILY_NAME:
if style in ['Regular',
'Italic',
'Bold',
'Bold Italic']:
yield WARN, ("Font style is '{}' and, for that reason,"
" it is not expected to have a "
"{} entry!").format(style,
name_entry_id(name))
else:
expected_value = familyname_with_spaces
string = name.string.decode(name.getEncoding()).strip()
if string != expected_value:
failed = True
yield FAIL, ("Entry {} on the 'name' table: "
"Expected '{}' "
"but got '{}'.").format(name_entry_id(name),
expected_value,
unidecode(string))
if not failed:
yield PASS, "TYPOGRAPHIC_FAMILY_NAME entries are all good."
@check(
id = 'com.google.fonts/check/162',
conditions=['style'],
misc_metadata = {
'priority': IMPORTANT
})
def com_google_fonts_check_162(ttFont, style):
""" Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries. """
from unidecode import unidecode
from fontbakery.utils import name_entry_id
from fontbakery.constants import NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME
failed = False
style_with_spaces = style.replace('Italic',
' Italic').strip()
for name in ttFont['name'].names:
if name.nameID == NAMEID_TYPOGRAPHIC_SUBFAMILY_NAME:
if style in ['Regular',
'Italic',
'Bold',
'Bold Italic']:
yield WARN, ("Font style is '{}' and, for that reason,"
" it is not expected to have a "
"{} entry!").format(style,
name_entry_id(name))
else:
expected_value = style_with_spaces
string = name.string.decode(name.getEncoding()).strip()
if string != expected_value:
failed = True
yield FAIL, ("Entry {} on the 'name' table: "
"Expected '{}' "
"but got '{}'.").format(name_entry_id(name),
expected_value,
unidecode(string))
if not failed:
yield PASS, "TYPOGRAPHIC_SUBFAMILY_NAME entries are all good."
@check(
id = 'com.google.fonts/check/164',
rationale = """
This is an arbitrary max lentgh for the copyright notice field
of the name table. We simply don't want such notices to be too long.
Typically such notices are actually much shorter than this with
a lenghth of roughtly 70 or 80 characters.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1603'
})
def com_google_fonts_check_164(ttFont):
""" Length of copyright notice must not exceed 500 characters. """
from unidecode import unidecode
from fontbakery.utils import get_name_entries
from fontbakery.constants import NAMEID_COPYRIGHT_NOTICE
failed = False
for notice in get_name_entries(ttFont, NAMEID_COPYRIGHT_NOTICE):
notice_str = notice.string.decode(notice.getEncoding())
if len(notice_str) > 500:
failed = True
yield FAIL, ("The length of the following copyright notice ({})"
" exceeds 500 chars: '{}'"
"").format(len(notice_str),
unidecode(notice_str))
if not failed:
yield PASS, ("All copyright notice name entries on the"
" 'name' table are shorter than 500 characters.")
@check(
id = 'com.google.fonts/check/165',
rationale = """
We need to check names are not already used, and today the best
place to check that is http://namecheck.fontdata.com
""",
conditions = ["familyname"],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/494'
})
def com_google_fonts_check_165(ttFont, familyname):
""" Familyname must be unique according to namecheck.fontdata.com """
FB_ISSUE_TRACKER = "https://github.com/googlefonts/fontbakery/issues"
import requests
url = f"http://namecheck.fontdata.com/?q={familyname}"
try:
response = requests.get(url, timeout=10)
data = response.content.decode("utf-8")
if "fonts by that exact name" in data:
yield INFO, ("The family name '{}' seem to be already in use.\n"
"Please visit {} for more info.").format(familyname, url)
else:
yield PASS, "Font familyname seems to be unique."
except:
yield ERROR, ("Failed to access: '{}'.\n"
"Please report this issue at:\n{}").format(url,
FB_ISSUE_TRACKER)
@check(
id = 'com.google.fonts/check/166',
rationale = """
The git sha1 tagging and dev/release features of Source Foundry font-v
tool are awesome and we would love to consider upstreaming the approach
into fontmake someday. For now we only emit a WARN if a given font does
not yet follow the experimental versioning style, but at some point we
may start enforcing it.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1563'
})
def com_google_fonts_check_166(ttFont):
""" Check for font-v versioning """
from fontv.libfv import FontVersion
fv = FontVersion(ttFont)
if fv.version and (fv.is_development or fv.is_release):
yield PASS, "Font version string looks GREAT!"
else:
yield INFO, ("Version string is: \"{}\"\n"
"The version string must ideally include a git commit hash"
" and either a 'dev' or a 'release' suffix such as in the"
" example below:\n"
"\"Version 1.3; git-0d08353-release\""
"").format(fv.get_name_id5_version_string())
# Disabling this check since the previous implementation was
# bogus due to the way fonttools encodes the data into the TTF
# files and the new attempt at targetting the real problem is
# still not quite right.
# FIXME: reimplement this addressing the actual root cause of the issue.
# See also ongoing discussion at:
# https://github.com/googlefonts/fontbakery/issues/1727
@disable
@check(
id = 'com.google.fonts/check/173',
rationale = """
Advance width values in the Horizontal Metrics (htmx)
table cannot be negative since they are encoded as unsigned
16-bit values. But some programs may infer and report
a negative advance by looking up the x-coordinates of
the glyphs directly on the glyf table.
There are reports of broken versions of Glyphs.app causing
this kind of problem as reported at
https://github.com/googlefonts/fontbakery/issues/1720 and
https://github.com/fonttools/fonttools/pull/1198
This check detects and reports such malformed
glyf table entries.
""",
conditions = ['is_ttf'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1720'
})
def com_google_fonts_check_173(ttFont):
""" Check that advance widths cannot be inferred as negative. """
failed = False
for glyphName in ttFont["glyf"].glyphs:
coords = ttFont["glyf"][glyphName].coordinates
rightX = coords[-3][0]
leftX = coords[-4][0]
advwidth = rightX - leftX
if advwidth < 0:
failed = True
yield FAIL, ("glyph '{}' has bad coordinates on the glyf table,"
" which may lead to the advance width to be"
" interpreted as a negative"
" value ({}).").format(glyphName,
advwidth)
if not failed:
yield PASS, "The x-coordinates of all glyphs look good."
@check(
id = 'com.google.fonts/check/174',
rationale = """
Google Fonts may serve static fonts which have been generated
from variable fonts.
This test will attempt to generate a static ttf using fontTool's
varLib mutator.
The target font will be the mean of each axis e.g:
VF font axes:
min weight, max weight = 400, 800
min width, max width = 50, 100
Target Instance:
weight = 600,
width = 75
""",
conditions = ['is_variable_font'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1727'
})
def com_google_fonts_check_174(ttFont):
""" Check a static ttf can be generated from a variable font. """
import tempfile
from fontTools.varLib import mutator
try:
loc = {k.axisTag: float((k.maxValue + k.minValue) / 2)
for k in ttFont['fvar'].axes}
with tempfile.TemporaryFile() as instance:
font = mutator.instantiateVariableFont(ttFont, loc)
font.save(instance)
yield PASS, ("fontTools.varLib.mutator generated a static font "
"instance")
except Exception as e:
yield FAIL, ("fontTools.varLib.mutator failed to generated a static font "
"instance\n{}".format(repr(e)))
@check(
id = 'com.google.fonts/check/040',
conditions = ['vmetrics']
)
def com_google_fonts_check_040(ttFont, vmetrics):
"""Checking OS/2 usWinAscent & usWinDescent.
A font's winAscent and winDescent values should be greater than the
head table's yMax, abs(yMin) values. If they are less than these
values, clipping can occur on Windows platforms,
https://github.com/RedHatBrand/Overpass/issues/33
If the font includes tall/deep writing systems such as Arabic or
Devanagari, the winAscent and winDescent can be greater than the yMax and
abs(yMin) to accommodate vowel marks.
When the win Metrics are significantly greater than the upm, the
linespacing can appear too loose. To counteract this, enabling the
OS/2 fsSelection bit 7 (Use_Typo_Metrics), will force Windows to use the
OS/2 typo values instead. This means the font developer can control the
linespacing with the typo values, whilst avoiding clipping by setting
the win values to values greater than the yMax and abs(yMin).
"""
failed = False
# OS/2 usWinAscent:
if ttFont['OS/2'].usWinAscent < vmetrics['ymax']:
failed = True
yield FAIL, Message("ascent",
("OS/2.usWinAscent value"
" should be equal or greater than {}, but got"
" {} instead").format(vmetrics['ymax'],
ttFont['OS/2'].usWinAscent))
# OS/2 usWinDescent:
if ttFont['OS/2'].usWinDescent < abs(vmetrics['ymin']):
failed = True
yield FAIL, Message(
"descent", ("OS/2.usWinDescent value"
" should be equal or greater than {}, but got"
" {} instead").format(
abs(vmetrics['ymin']), ttFont['OS/2'].usWinDescent))
if not failed:
yield PASS, "OS/2 usWinAscent & usWinDescent values look good!"
@check(
id = 'com.google.fonts/check/042'
)
def com_google_fonts_check_042(ttFont):
"""Checking OS/2 Metrics match hhea Metrics.
OS/2 and hhea vertical metric values should match. This will produce
the same linespacing on Mac, GNU/Linux and Windows.
Mac OS X uses the hhea values.
Windows uses OS/2 or Win, depending on the OS or fsSelection bit value.
"""
# OS/2 sTypoAscender and sTypoDescender match hhea ascent and descent
if ttFont["OS/2"].sTypoAscender != ttFont["hhea"].ascent:
yield FAIL, Message("ascender",
"OS/2 sTypoAscender and hhea ascent must be equal.")
elif ttFont["OS/2"].sTypoDescender != ttFont["hhea"].descent:
yield FAIL, Message("descender",
"OS/2 sTypoDescender and hhea descent must be equal.")
else:
yield PASS, ("OS/2.sTypoAscender/Descender" " match hhea.ascent/descent.")
@check(
id = 'com.google.fonts/check/072',
conditions = ['is_ttf']
)
def com_google_fonts_check_072(ttFont):
"""Font enables smart dropout control in "prep" table instructions?
B8 01 FF PUSHW 0x01FF
85 SCANCTRL (unconditinally turn on
dropout control mode)
B0 04 PUSHB 0x04
8D SCANTYPE (enable smart dropout control)
Smart dropout control means activating rules 1, 2 and 5:
Rule 1: If a pixel's center falls within the glyph outline,
that pixel is turned on.
Rule 2: If a contour falls exactly on a pixel's center,
that pixel is turned on.
Rule 5: If a scan line between two adjacent pixel centers
(either vertical or horizontal) is intersected
by both an on-Transition contour and an off-Transition
contour and neither of the pixels was already turned on
by rules 1 and 2, turn on the pixel which is closer to
the midpoint between the on-Transition contour and
off-Transition contour. This is "Smart" dropout control.
"""
INSTRUCTIONS = b"\xb8\x01\xff\x85\xb0\x04\x8d"
if ("prep" in ttFont and
INSTRUCTIONS in ttFont["prep"].program.getBytecode()):
yield PASS, ("'prep' table contains instructions"
" enabling smart dropout control.")
else:
yield FAIL, ("'prep' table does not contain TrueType "
" instructions enabling smart dropout control."
" To fix, export the font with autohinting enabled,"
" or run ttfautohint on the font, or run the "
" `gftools fix-nonhinting` script.")
@condition
def vtt_talk_sources(ttFont):
VTT_TALK_TABLES = {
'TSI0', 'TSI1', 'TSI2', 'TSI3', 'TSI5'}
tables_found = []
for table in ttFont.keys():
if table in VTT_TALK_TABLES:
tables_found.append(table)
return tables_found
@check(
id = 'com.google.fonts/check/vttclean'
)
def com_google_fonts_check_vtt_clean(ttFont, vtt_talk_sources):
"""There must not be VTT Talk sources in the font."""
if vtt_talk_sources:
yield FAIL, ("Some tables containing VTT Talk (hinting) sources"
" were found in the font and should be removed in order"
" to reduce total filesize:"
" {}").format(", ".join(vtt_talk_sources))
else:
yield PASS, "There are no tables with VTT Talk sources embedded in the font."
def is_librebarcode(font):
font_filenames = [
"LibreBarcode39-Regular.ttf",
"LibreBarcode39Text-Regular.ttf",
"LibreBarcode128-Regular.ttf",
"LibreBarcode128Text-Regular.ttf",
"LibreBarcode39Extended-Regular.ttf",
"LibreBarcode39ExtendedText-Regular.ttf"
]
for font_filename in font_filenames:
if font_filename in font:
return True
@condition(force=True)
def fontforge_skip_checks(font):
"""Skip by fontforge reported issues for google fonts specific fonts."""
if is_librebarcode(font):
# see https://github.com/graphicore/librebarcode/issues/3
# 0x20: Glyphs have points at extremas
# 0x200: Font doesn't have invalid glyph names
return 0x20 + 0x200
return None
def check_skip_filter(checkid, font=None, **iterargs):
if font and is_librebarcode(font) and checkid in (
# See: https://github.com/graphicore/librebarcode/issues/3
'com.google.fonts/check/033' # Checking correctness of monospaced metadata.
, 'com.google.fonts/check/063' # Does GPOS table have kerning information?
, 'com.google.fonts/check/070' # Font has all expected currency sign characters?
, 'com.google.fonts/check/049' # Whitespace glyphs have ink?
):
return False, ('LibreBarcode is blacklisted for this check, see '
'https://github.com/graphicore/librebarcode/issues/3')
return True, None
specification.check_skip_filter = check_skip_filter
specification.auto_register(globals())
specification.test_expected_checks(expected_check_ids, exclusive=True)
# FIXME: use logging.info or remove?
for section_name, section in specification._sections.items():
print ("{} checks on {}".format(len(section._checks), section_name))
|
import os
import random
import numpy as np
import zipfile
import collections
from mxnet import nd, gluon
from mxnet.gluon import utils as gutils, data as gdata
def data_iter_consecutive(corpus_indices, batch_size, num_steps, ctx=None):
"""Sample mini-batches in a consecutive order from sequential data."""
# Offset for the iterator over the data for uniform starts
offset = int(random.uniform(0, num_steps))
# Slice out data - ignore num_steps and just wrap around
num_indices = ((len(corpus_indices) - offset) // batch_size) * batch_size
indices = nd.array(corpus_indices[offset:(offset + num_indices)], ctx=ctx)
indices = indices.reshape((batch_size, -1))
# Need to leave one last token since targets are shifted by 1
num_epochs = ((num_indices // batch_size) - 1) // num_steps
for i in range(0, num_epochs * num_steps, num_steps):
X = indices[:, i:(i+num_steps)]
Y = indices[:, (i+1):(i+1+num_steps)]
yield X, Y
def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None):
"""Sample mini-batches in a random order from sequential data."""
# Offset for the iterator over the data
offset = int(random.uniform(0, num_steps))
# Subtract 1 extra since we need to account for the sequence length
num_examples = ((len(corpus_indices) - offset - 1) // num_steps) - 1
# Discard half empty batches
num_batches = num_examples // batch_size
example_indices = list(
range(offset, offset + num_examples * num_steps, num_steps))
random.shuffle(example_indices)
# This returns a sequence of the length num_steps starting from pos.
def _data(pos):
return corpus_indices[pos: pos + num_steps]
for i in range(0, batch_size * num_batches, batch_size):
# batch_size indicates the random examples read each time.
batch_indices = example_indices[i:(i+batch_size)]
X = [_data(j) for j in batch_indices]
Y = [_data(j + 1) for j in batch_indices]
yield nd.array(X, ctx), nd.array(Y, ctx)
def load_data_time_machine(num_examples=10000):
"""Load the time machine data set (available in the English book)."""
with open('../data/timemachine.txt') as f:
raw_text = f.read()
lines = raw_text.split('\n')
text = ' '.join(' '.join(lines).lower().split())[:num_examples]
vocab = Vocab(text)
corpus_indices = [vocab[char] for char in text]
return corpus_indices, vocab
def mkdir_if_not_exist(path):
"""Make a directory if it does not exist."""
if not os.path.exists(os.path.join(*path)):
os.makedirs(os.path.join(*path))
class Vocab(object):
def __init__(self, tokens, min_freq=0, use_special_tokens=False):
counter = collections.Counter(tokens)
token_freqs = sorted(counter.items(), key=lambda x: x[0])
token_freqs.sort(key=lambda x: x[1], reverse=True)
if use_special_tokens:
self.pad, self.bos, self.eos, self.unk = (0, 1, 2, 3)
special_tokens = ['<pad>', '<bos>', '<eos>', '<unk>']
else:
self.unk = 0
special_tokens = ['<unk>']
tokens = [token for token, freq in token_freqs
if freq >= min_freq and token not in special_tokens]
self.idx_to_token = []
self.token_to_idx = dict()
for token in special_tokens + tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
else:
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
else:
return [self.idx_to_token[index] for index in indices]
|
import enum
"""
This module contains all the possible random distributions names
that can be set in each of the Space variables
"""
class ExtendedEnum(enum.Enum):
@classmethod
def list(cls):
return list(map(lambda c: c.value, cls))
class IntegerDistributions(ExtendedEnum):
uniform = "uniform"
class ContinuousDistributions(ExtendedEnum):
uniform = "uniform"
log_uniform = "log-uniform"
class CategoricalDistributions(ExtendedEnum):
choice = "choice"
|
import logging
from typing import List, Union
import click
import numpy as np
from vpype import (
LayerType,
LengthType,
LineCollection,
LineIndex,
VectorData,
global_processor,
layer_processor,
multiple_to_layer_ids,
)
from .cli import cli
@cli.command(group="Operations")
@click.argument("x", type=LengthType(), required=True)
@click.argument("y", type=LengthType(), required=True)
@click.argument("width", type=LengthType(), required=True)
@click.argument("height", type=LengthType(), required=True)
@layer_processor
def crop(lines: LineCollection, x: float, y: float, width: float, height: float):
"""Crop the geometries.
The crop area is defined by the (X, Y) top-left corner and the WIDTH and HEIGHT arguments.
All arguments understand supported units.
"""
lines.crop(x, y, x + width, y + height)
return lines
@cli.command(group="Operations")
@click.argument("margin_x", type=LengthType(), required=True)
@click.argument("margin_y", type=LengthType(), required=True)
@click.option(
"-l",
"--layer",
type=LayerType(accept_multiple=True),
default="all",
help="Target layer(s).",
)
@global_processor
def trim(
vector_data: VectorData, margin_x: float, margin_y: float, layer: Union[int, List[int]]
) -> VectorData:
"""Trim the geometries by some margin.
This command trims the geometries by the provided X and Y margins with respect to the
current bounding box.
By default, `trim` acts on all layers. If one or more layer IDs are provided with the
`--layer` option, only these layers will be affected. In this case, the bounding box is
that of the listed layers.
"""
layer_ids = multiple_to_layer_ids(layer, vector_data)
bounds = vector_data.bounds(layer_ids)
if not bounds:
return vector_data
min_x = bounds[0] + margin_x
max_x = bounds[2] - margin_x
min_y = bounds[1] + margin_y
max_y = bounds[3] - margin_y
if min_x > max_x:
min_x = max_x = 0.5 * (min_x + max_x)
if min_y > max_y:
min_y = max_y = 0.5 * (min_y + max_y)
for vid in layer_ids:
lc = vector_data[vid]
lc.crop(min_x, min_y, max_x, max_y)
return vector_data
@cli.command(group="Operations")
@click.option(
"-t",
"--tolerance",
type=LengthType(),
default="0.05mm",
help="Maximum distance between two line endings that should be merged.",
)
@click.option(
"-f", "--no-flip", is_flag=True, help="Disable reversing stroke direction for merging."
)
@layer_processor
def linemerge(lines: LineCollection, tolerance: float, no_flip: bool = True):
"""
Merge lines whose endings overlap or are very close.
Stroke direction is preserved by default, so `linemerge` looks at joining a line's end with
another line's start. With the `--flip` stroke direction will be reversed as required to
further the merge.
By default, gaps of maximum 0.05mm are considered for merging. This can be controlled with
the `--tolerance` option.
"""
lines.merge(tolerance=tolerance, flip=not no_flip)
return lines
@cli.command(group="Operations")
@click.option(
"-f",
"--no-flip",
is_flag=True,
help="Disable reversing stroke direction for optimization.",
)
@layer_processor
def linesort(lines: LineCollection, no_flip: bool = True):
"""
Sort lines to minimize the pen-up travel distance.
Note: this process can be lengthy depending on the total number of line. Consider using
`linemerge` before `linesort` to reduce the total number of line and thus significantly
optimizing the overall plotting time.
"""
if len(lines) < 2:
return lines
index = LineIndex(lines[1:], reverse=not no_flip)
new_lines = LineCollection([lines[0]])
while len(index) > 0:
idx, reverse = index.find_nearest(new_lines[-1][-1])
line = index.pop(idx)
if reverse:
line = np.flip(line)
new_lines.append(line)
logging.info(
f"optimize: reduced pen-up (distance, mean, median) from {lines.pen_up_length()} to "
f"{new_lines.pen_up_length()}"
)
return new_lines
@cli.command(group="Operations")
@click.option(
"-t",
"--tolerance",
type=LengthType(),
default="0.05mm",
help="Controls how far from the original geometry simplified points may lie.",
)
@layer_processor
def linesimplify(lines: LineCollection, tolerance):
"""
Reduce the number of segments in the geometries.
The resulting geometries' points will be at a maximum distance from the original controlled
by the `--tolerance` parameter (0.05mm by default).
"""
if len(lines) < 2:
return lines
# Note: preserve_topology must be False, otherwise non-simple (ie intersecting) MLS will
# not be simplified (see https://github.com/Toblerity/Shapely/issues/911)
mls = lines.as_mls().simplify(tolerance=tolerance, preserve_topology=False)
new_lines = LineCollection(mls)
logging.info(
f"simplify: reduced segment count from {lines.segment_count()} to "
f"{new_lines.segment_count()}"
)
return new_lines
@cli.command(group="Operations")
@click.option(
"-t",
"--tolerance",
type=LengthType(),
default="0.05mm",
help="Controls how close the path beginning and end must be to consider it closed ("
"default: 0.05mm).",
)
@layer_processor
def reloop(lines: LineCollection, tolerance):
"""Randomize the seam location of closed paths.
When plotted, closed path may exhibit a visible mark at the seam, i.e. the location where
the pen begins and ends the stroke. This command randomizes the seam location in order to
help reduce visual effect of this in plots with regular patterns.
Paths are considered closed when their beginning and end points are closer than some
tolerance, which can be set with the `--tolerance` option.
"""
lines.reloop(tolerance=tolerance)
return lines
@cli.command(group="Operations")
@click.option(
"-n", "--count", type=int, default=2, help="How many pass for each line (default: 2).",
)
@layer_processor
def multipass(lines: LineCollection, count: int):
"""
Add multiple passes to each line
Each line is extended with a mirrored copy of itself, optionally multiple times. This is
useful for pens that need several passes to ensure a good quality.
"""
if count < 2:
return lines
new_lines = LineCollection()
for line in lines:
new_lines.append(
np.hstack(
[line] + [line[-2::-1] if i % 2 == 0 else line[1:] for i in range(count - 1)]
)
)
return new_lines
@cli.command(group="Operations")
@layer_processor
def splitall(lines: LineCollection) -> LineCollection:
"""
Split all paths into their constituent segments.
This command may be used together with `linemerge` for cases such as densely-connected
meshes where the latter cannot optimize well enough by itself.
Note that since some paths (especially curved ones) can be made of a large number of
segments, this command may significantly increase the processing time of the pipeline.
"""
new_lines = LineCollection()
for line in lines:
new_lines.extend([line[i : i + 2] for i in range(len(line) - 1)])
return new_lines
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 12 08:44:56 2016
@author: chyam
Purpose: Re-label classes for text classification.
"""
import argparse
import pandas as pd
import preputils as pu
def main():
argparser = argparse.ArgumentParser(description='This script will re-label classes of a text classification dataset.')
argparser.add_argument('-i', '--inFile', help='Input filename', required=True)
argparser.add_argument('-o', '--outFile', help='Output filename', required=True)
args = argparser.parse_args()
### Load data
df = pd.read_csv(args.inFile, delimiter='\t'); print(df.shape)
### change labels:
#==============================================================================
#['attractiveness', 0 -> 0
# 'curiosity', 1 -> discard
# 'disgust', 2 -> 2
# 'fear', 3 -> 2
# 'germanangst', 4 -> 2
# 'happiness', 5 -> 0
# 'indulgence', 6 -> 0
# 'neutral', 7 -> 1
# 'sadness', 8 -> 2
# 'surprise' 9] -> discard
#==============================================================================
sub_df = df[(df.Label != 'curiosity') & (df.Label != 'surprise')]; print(sub_df.shape)
sub_df.loc[(sub_df.Label == 'attractiveness') | (sub_df.Label == 'happiness') | (sub_df.Label == 'indulgence'), 'Label'] = 'happy'
sub_df.loc[(sub_df.Label == 'disgust') | (sub_df.Label == 'fear') | (sub_df.Label == 'germanangst') | (sub_df.Label == 'sadness'), 'Label'] = 'unhappy'
pu.save_data(sub_df, args.outFile)
# Just a message
print("Re-labelled and saved............")
if __name__ == '__main__':
main()
|
from keckdrpframework.primitives.base_primitive import BasePrimitive
import math
class CalcPrelimDisp(BasePrimitive):
"""Calculate dispersion based on configuration parameters.
The parameters of the grating equation are calculates as:
alpha = grating_angle - 13 - adjustment_ange (180 for BH, RH and
0 for all other gratings)
beta = camera_angle - alpha
dispersion = cos(beta)/rho/focal_length x (pixel_scale x binning) * 1.e4
"""
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.logger = context.pipeline_logger
def _perform(self):
# get binning
y_binning = self.action.args.ybinsize
# 0 - compute alpha
preliminary_alpha = self.action.args.grangle - 13.0 - \
self.action.args.adjang
# 1 - compute preliminary angle of diffraction
preliminary_beta = self.action.args.camangle - preliminary_alpha
# 2 - compute preliminary dispersion
preliminary_dispersion = math.cos(preliminary_beta/math.degrees(1.)) / \
self.action.args.rho / self.config.instrument.FCAM * \
(self.config.instrument.PIX*y_binning) * 1.e4
preliminary_dispersion *= math.cos(
self.config.instrument.GAMMA/math.degrees(1.))
self.logger.info("Initial alpha, beta (deg): %.3f, %.3f" %
(preliminary_alpha, preliminary_beta))
self.logger.info("Initial calculated dispersion (A/binned pix): %.3f" %
preliminary_dispersion)
self.context.prelim_disp = preliminary_dispersion
log_string = CalcPrelimDisp.__module__
self.action.args.ccddata.header['HISTORY'] = log_string
self.logger.info(log_string)
return self.action.args
# END: class CalcPrelimDisp()
|
#import Scientific_numerics_package_id
#package = Scientific_numerics_package_id.getNumericsPackageName()
#del Scientific_numerics_package_id
#if package == "Numeric":
# from LinearAlgebra import *
#elif package == "NumPy":
from numpy.oldnumeric.linear_algebra import *
#elif package == "Numarray":
# from numarray.linear_algebra import *
#else:
# raise ImportError("Unknown numerics package " + package)
|
from .models import Collector
|
# Copyright 2021 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from copy import deepcopy
from cortex_internal.lib import util
from cortex_internal.lib.api import get_spec, TaskAPI
from cortex_internal.lib.log import configure_logger
logger = configure_logger("cortex", os.environ["CORTEX_LOG_CONFIG_FILE"])
def start():
cache_dir = os.environ["CORTEX_CACHE_DIR"]
provider = os.environ["CORTEX_PROVIDER"]
project_dir = os.environ["CORTEX_PROJECT_DIR"]
region = os.getenv("AWS_REGION")
api_spec_path = os.environ["CORTEX_API_SPEC"]
task_spec_path = os.environ["CORTEX_TASK_SPEC"]
_, api_spec = get_spec(provider, api_spec_path, cache_dir, region)
_, task_spec = get_spec(provider, task_spec_path, cache_dir, region, spec_name="task-spec.json")
logger.info("loading the task definition from {}".format(api_spec["definition"]["path"]))
task_api = TaskAPI(provider, api_spec)
logger.info("executing the task definition from {}".format(api_spec["definition"]["path"]))
callable_fn = task_api.get_callable(project_dir)
config = deepcopy(api_spec["definition"]["config"])
if task_spec is not None and task_spec.get("config") is not None:
util.merge_dicts_in_place_overwrite(config, task_spec["config"])
callable_fn(config)
if __name__ == "__main__":
start()
|
names = ["shakira", "ndagire", "seruwagi"];
for name in names:
if(len(name) >= 10):
print("Those name is so long cheiiii")
print("Those name isnot so long")
|
def pig_latin(word):
a={'a','e','i','o','u'}
#make vowels case insensitive
vowels=a|set(b.upper() for b in a)
if word[0].isalpha():
if any(i in vowels for i in word):
if word.isalnum():
if word[0] in vowels:
pig_version=word+'way'
else:
first_vowel=min(word.find(vowel) for vowel in vowels if vowel in word)
#alternative way to locate first vowelin word
#first_vowel = next((i for i, ch in enumerate(word) if ch in vowels),None)
pig_version = word[first_vowel:]+word[:first_vowel]+'ay'
return pig_version
return 'word not english'
return 'no vowel in word'
return 'word must start with alphabet'
#for word in ['wIll', 'dog', 'Category', 'chatter', 'trash','andela', 'mo$es', 'electrician', '2twa']:
# print(pig_latin(word))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import argparse
import subprocess
import os
import errno
import getpass
import re
import codecs
import shutil
import platform
"""Utils prepared for makefile
This module will include:
sphinx
offline distribution
...
Example:
None
Attributes:
module_level_variable (int): description
"""
here = os.path.abspath(os.path.dirname(__file__))
# folder
DOC_FOLDER = 'docs'
HTML_FOLDER = os.path.join(DOC_FOLDER, 'html')
DEP_FOLDER = 'deps'
MAC_DEP_FOLDER = os.path.join(DEP_FOLDER, 'macosx')
LINUX_DEP_FOLDER = os.path.join(DEP_FOLDER, 'linux')
SRC_FOLDER = 'src'
PROJECT_NAME = 'udserver'
WORK_DIR_ABPATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
os.chdir(WORK_DIR_ABPATH)
# system
CURRENT_SYSTEM = platform.system()
def mkdir_exist(directory):
"""TODO: Docstring for mkdir_exist.
Args:
directory (str): TODO
"""
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Editor(object):
"""an editor used regex to replace certain lines """
def __init__(self, fpath):
"""TODO: to be defined1. """
self._fpath = fpath
self._swp_lines = []
# unit test mock doesn't support `for line in f`
with open(fpath) as f:
self._swp_lines = [s.rstrip() for s in f.read().splitlines()]
@property
def fpath(self):
return self._fpath
def editline_with_regex(self, regex_tgtline, to_replace):
"""find the first matched line, then replace
Args:
regex_tgtline (str): regular expression used to match the target line
to_replace (str): line you wanna use to replace
"""
for idx, line in enumerate(self._swp_lines):
mobj = re.match(regex_tgtline, line)
if mobj:
self._swp_lines[idx] = to_replace
return
def finish_writing(self):
content = list(map(lambda x: x+'\n', self._swp_lines))
with open(self.fpath, 'w') as f:
f.write(''.join(content))
class ProjectInfo(object):
"""Common information for the project"""
def __init__(self, **kwinfo):
"""init project info
Args:
author_fakename (str): TODO
author_truename (str): TODO
email (str): TODO
project_name (str): TODO
project_version (str): TODO
"""
self._author_fakename = getpass.getuser()
self._author_truename = ProjectInfo.find_pakcage_info('author', SRC_FOLDER, PROJECT_NAME, '__init__.py')
self._email = ProjectInfo.find_pakcage_info('email', SRC_FOLDER, PROJECT_NAME, '__init__.py')
self._project_name = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
self._project_version = ProjectInfo.find_pakcage_info('version', SRC_FOLDER, PROJECT_NAME, '__init__.py')
for key, info in kwinfo.items():
key = '_' + key
setattr(self, key, info)
@property
def author_fakename(self):
return self._author_fakename
@property
def author_truename(self):
return self._author_truename
@property
def email(self):
return self._email
@property
def project_name(self):
return self._project_name
@property
def project_version(self):
return self._project_version
@classmethod
def _read(cls, *parts):
with codecs.open(os.path.join(here, *parts), 'r') as fp:
return fp.read()
@classmethod
def find_pakcage_info(cls, info, *file_paths):
info_file = ProjectInfo._read(*file_paths)
match = re.search(r"^__" + re.escape(info) + r"__ = ['\"]([^'\"]*)['\"]", info_file, re.M)
if match:
return match.group(1)
raise RuntimeError("Unable to find {} string.".format(info))
proj_info = ProjectInfo()
author_fakename = proj_info.author_fakename
author_truename = proj_info.author_truename
email = proj_info.email
project_name = proj_info.project_name
project_version = proj_info.project_version
########################################################################################################################
# exceptions #
########################################################################################################################
class PJUtilsError(Exception):
"""Base pjutils exception"""
class PlatformNotSupportedError(PJUtilsError):
"""General exception in PyOfflineDist"""
class PythonVersionNotSupportedError(PJUtilsError):
"""General exception in PyOfflineDist"""
########################################################################################################################
# sphinx #
########################################################################################################################
class Sphinx(object):
"""Docstring for Sphinx. """
def __init__(self, proj_info):
"""TODO: to be defined1.
Args:
proj_info (ProjectInfo): TODO
"""
self._proj_info = proj_info
self.__docfolder = DOC_FOLDER
self.__htmlfolder = HTML_FOLDER
self.conf_fpath = os.path.abspath(os.path.join(self.__docfolder, 'conf.py'))
self.code_fdpath = os.path.abspath(os.path.join(SRC_FOLDER, self.proj_info.project_name))
self._sphinx_quickstart_cmd = ['sphinx-quickstart', self.__docfolder,
'-p', self.proj_info.project_name,
'-a', self.proj_info.author_fakename,
'-v', self.proj_info.project_version,
'-r', self.proj_info.project_version,
'-l', 'en',
'--ext-autodoc',
'--makefile',
'--quiet']
self._sphinx_apidoc_cmd = ['sphinx-apidoc', self.code_fdpath,
'-o', self.__docfolder,
'-M',
'--force']
# sphinx-build -b html docs html
self._sphinx_buildhtml_cmd = ['sphinx-build',
'-b', 'html',
self.__docfolder, self.__htmlfolder]
# make sure directories exist
mkdir_exist(self.__docfolder)
mkdir_exist(self.__htmlfolder)
@property
def proj_info(self):
return self._proj_info
@property
def sphinx_quickstart_cmd(self):
return self._sphinx_quickstart_cmd
def quickstart(self):
"""TODO: Docstring for quickstart. """
subprocess.call(self.sphinx_quickstart_cmd)
pass
def gen_code_api(self):
"""TODO: Docstring for gen_code_api."""
# edit config file
conf_editor = Editor(self.conf_fpath)
# insert code path for searching
conf_editor.editline_with_regex(r'^# import os', 'import os')
conf_editor.editline_with_regex(r'^# import sys', 'import sys')
conf_editor.editline_with_regex(r'^# sys\.path\.insert', 'sys.path.insert(0, "{}")'.format(self.code_fdpath))
conf_editor.editline_with_regex(
r"""html_theme = 'alabaster'""",
'html_theme = \'default\''.format(
self.code_fdpath))
conf_editor.finish_writing()
# sphinx-apidoc to generate rst from source code
# force regenerate
subprocess.call(self._sphinx_apidoc_cmd)
pass
def rst2html(self):
subprocess.call(self._sphinx_buildhtml_cmd)
pass
########################################################################################################################
# offline distribution #
########################################################################################################################
class PyOfflineDist(object):
"""offline distribution for python project"""
def __init__(self, req_fpath='requirements.txt'):
"""TODO: to be defined1. """
self.__dep_folder = DEP_FOLDER
self.__req_fpath = req_fpath
self._srcpj_abfdpath = os.path.abspath(os.path.join(SRC_FOLDER, PROJECT_NAME))
pass
def freeze_deps(self):
with open(self.__req_fpath, 'w') as f:
self._freeze_deps_cmd = ['pip', 'freeze']
p = subprocess.Popen(self._freeze_deps_cmd, stdout=f)
p.wait()
pass
def __get_dep_folder(self):
if CURRENT_SYSTEM == 'Darwin':
self.__dep_folder = os.path.join(DEP_FOLDER, 'macosx')
elif CURRENT_SYSTEM == 'Linux':
self.__dep_folder = os.path.join(DEP_FOLDER, 'linux')
else:
raise PlatformNotSupportedError('only support for mac or linux')
def download_deps(self):
self.__get_dep_folder()
mkdir_exist(self.__dep_folder)
self._download_deps_cmd = ['pip', 'download',
'-r', self.__req_fpath,
'-d', self.__dep_folder]
subprocess.call(self._download_deps_cmd)
def install_deps(self):
self.__get_dep_folder()
self._install_deps_cmd = ['pip', 'install',
'--no-index',
'--find-links', self.__dep_folder,
'-r', self.__req_fpath]
subprocess.call(self._install_deps_cmd)
def clean_deps(self):
self.__get_dep_folder()
shutil.rmtree(self.__dep_folder, ignore_errors=True)
def pyinstaller_mkbinary(self, script_name):
self._pyinstaller_mkbinary_cmd = ['pyinstaller', '--onefile', script_name]
# enter the source code directory
os.chdir(self._srcpj_abfdpath)
# execute pyinstaller
# generate dist and build folders
subprocess.call(self._pyinstaller_mkbinary_cmd)
# return to previous directory
os.chdir(WORK_DIR_ABPATH)
pass
def clean_binary(self):
# enter the source code directory
os.chdir(self._srcpj_abfdpath)
shutil.rmtree('dist', ignore_errors=True)
shutil.rmtree('build', ignore_errors=True)
# return to previous directory
os.chdir(WORK_DIR_ABPATH)
pass
########################################################################################################################
# Commandline Interface #
########################################################################################################################
def add_subcommands(subparsers):
# sphinx
parser_sphinx = subparsers.add_parser('sphinx', help='sphinx help')
parser_sphinx.add_argument('--quickstart', action='store_true')
parser_sphinx.add_argument('--gen-code-api', action='store_true')
parser_sphinx.add_argument('--rst2html', action='store_true')
# offline_dist
parser_offline_dist = subparsers.add_parser('offline_dist', help='offline_dist help')
parser_offline_dist.add_argument('--freeze-deps', action='store_true')
parser_offline_dist.add_argument('--download-deps', action='store_true')
parser_offline_dist.add_argument('--install-deps', action='store_true')
parser_offline_dist.add_argument('--clean-deps', action='store_true')
parser_offline_dist.add_argument('--mkbinary')
parser_offline_dist.add_argument('--clean-binary', action='store_true')
# subparsers_offline_dist = parser_offline_dist.add_subparsers(dest='offline_dist_command')
#
# parser_download = subparsers_offline_dist.add_parser('download')
# parser_install = subparsers_offline_dist.add_parser('install')
# parser_mkbinary = subparsers_offline_dist.add_parser('mkbinary')
# download
# parser_download.add_argument('--platform', action='store', choices=['macosx-10_10_x86_64', 'linux_x86_64'])
# parser_download.add_argument('--python-version', action='store', choices=['27', '3'])
# install
# mkbinary
pass
def execute_by_options(args):
"""execute by argument dictionary
Args:
args (dict): command line argument dictionary
"""
if args['subcommand'] == 'sphinx':
s = Sphinx(proj_info)
if args['quickstart']:
s.quickstart()
elif args['gen_code_api']:
s.gen_code_api()
elif args['rst2html']:
s.rst2html()
pass
elif args['subcommand'] == 'offline_dist':
pod = PyOfflineDist()
if args['freeze_deps']:
pod.freeze_deps()
elif args['download_deps']:
pod.download_deps()
elif args['install_deps']:
pod.install_deps()
elif args['clean_deps']:
pod.clean_deps()
elif args['mkbinary']:
pod.pyinstaller_mkbinary(args['mkbinary'])
elif args['clean_binary']:
pod.clean_binary()
pass
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='sub-command help', dest='subcommand')
add_subcommands(subparsers)
args = vars(parser.parse_args())
execute_by_options(args)
# if args['sphinx'] ==
if __name__ == '__main__':
main()
|
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pya
from time import sleep
import os
try:
if output_layout == "":
raise NameError
_output_layout = output_layout
except NameError:
_output_layout = input_layout
print("Warning: output_layout was not provided; will do the modifications in place!")
print("Hit CTRL-C to cancel...")
sleep(3)
print("Starting...")
app = pya.Application.instance()
win = app.main_window()
# Load technology file
tech = pya.Technology()
tech.load(tech_file)
layoutOptions = tech.load_layout_options
# Load def/gds file in the main window
cell_view = win.load_layout(input_layout, layoutOptions, 0)
layout_view = cell_view.view()
layout_view.load_layer_props(os.path.splitext(tech_file)[0]+'.lyp')
layout_view.max_hier_levels = 1
layout_view.min_hier_levels = 1
# gets the corresponding layout object
layout = cell_view.layout()
# gets the cell to change is "INV2X"
# cell = layout.cell("Active_area")
cell = cell_view.cell
# finds source layer
layer, purpose = source_layer.split('/')
assert layer and purpose
_source_layer = layout.layer(int(layer), int(purpose))
# finds (or creates) target layer
layer, purpose = target_layer.split('/')
assert layer and purpose
_target_layer = layout.layer(int(layer), int(purpose))
layout.copy_layer(_source_layer, _target_layer)
layout.write(_output_layout)
print("Successfully wrote", _output_layout)
app.exit(0)
|
import torch.nn.functional as F
import torch.nn as nn
from . import BaseTask, register_task
from ..dataset import build_dataset
from ..utils import Evaluator
@register_task("node_classification")
class NodeClassification(BaseTask):
r"""
Node classification tasks.
Attributes
-----------
dataset : NodeClassificationDataset
Task-related dataset
evaluator : Evaluator
offer evaluation metric
Methods
---------
get_graph :
return a graph
get_loss_fn :
return a loss function
"""
def __init__(self, args):
super(NodeClassification, self).__init__()
self.dataset = build_dataset(args.dataset, 'node_classification')
# self.evaluator = Evaluator()
self.logger = args.logger
if hasattr(args, 'validation'):
self.train_idx, self.val_idx, self.test_idx = self.dataset.get_idx(args.validation)
else:
self.train_idx, self.val_idx, self.test_idx = self.dataset.get_idx()
self.evaluator = Evaluator(args.seed)
self.labels = self.dataset.get_labels()
self.multi_label = self.dataset.multi_label
if hasattr(args, 'evaluation_metric'):
self.evaluation_metric = args.evaluation_metric
else:
if args.dataset in ['aifb', 'mutag', 'bgs', 'am']:
self.evaluation_metric = 'acc'
else:
self.evaluation_metric = 'f1'
def get_graph(self):
return self.dataset.g
def get_loss_fn(self):
if self.multi_label:
return nn.BCEWithLogitsLoss()
return F.cross_entropy
def get_evaluator(self, name):
if name == 'acc':
return self.evaluator.cal_acc
elif name == 'f1_lr':
return self.evaluator.nc_with_LR
elif name == 'f1':
return self.evaluator.f1_node_classification
def evaluate(self, logits, mode='test', info=True):
if mode == 'test':
mask = self.test_idx
elif mode == 'valid':
mask = self.val_idx
elif mode == 'train':
mask = self.train_idx
if self.multi_label:
pred = (logits[mask].cpu().numpy() > 0).astype(int)
else:
pred = logits[mask].argmax(dim=1).to('cpu')
if self.evaluation_metric == 'acc':
acc = self.evaluator.cal_acc(self.labels[mask], pred)
return dict(Accuracy=acc)
elif self.evaluation_metric == 'acc-ogbn-mag':
from ogb.nodeproppred import Evaluator
evaluator = Evaluator(name='ogbn-mag')
logits = logits.unsqueeze(dim=1)
input_dict = {"y_true": logits, "y_pred": self.labels[self.test_idx]}
result_dict = evaluator.eval(input_dict)
return result_dict
elif self.evaluation_metric == 'f1':
f1_dict = self.evaluator.f1_node_classification(self.labels[mask], pred)
return f1_dict
else:
raise ValueError('The evaluation metric is not supported!')
def downstream_evaluate(self, logits, evaluation_metric):
if evaluation_metric == 'f1_lr':
micro_f1, macro_f1 = self.evaluator.nc_with_LR(logits, self.labels, self.train_idx, self.test_idx)
return dict(Macro_f1=macro_f1, Mirco_f1=micro_f1)
def get_idx(self):
return self.train_idx, self.val_idx, self.test_idx
def get_labels(self):
return self.labels
|
import datetime
from urllib.parse import quote
from django.conf import settings
from django.contrib.auth import user_logged_in
from django.dispatch import receiver
from django.http import HttpResponse
from django.utils import translation
from django.utils.translation import LANGUAGE_SESSION_KEY, get_language
def is_external_email(email):
return not any([email.endswith("@" + domain) for domain in settings.INSTITUTION_EMAIL_DOMAINS])
def sort_formset(request, formset):
if request.POST: # if not, there will be no cleaned_data and the models should already be sorted anyways
formset.is_valid() # make sure all forms have cleaned_data
formset.forms.sort(key=lambda f: f.cleaned_data.get("order", 9001))
def date_to_datetime(date):
return datetime.datetime(year=date.year, month=date.month, day=date.day)
@receiver(user_logged_in)
def set_or_get_language(user, request, **_kwargs):
if user.language:
translation.activate(user.language)
else:
user.language = get_language()
user.save()
request.session[LANGUAGE_SESSION_KEY] = user.language
def get_parameter_from_url_or_session(request, parameter, default=False):
result = request.GET.get(parameter, None)
if result is None: # if no parameter is given take session value
result = request.session.get(parameter, default)
else:
result = {'true': True, 'false': False}.get(result.lower()) # convert parameter to boolean
request.session[parameter] = result # store value for session
return result
def translate(**kwargs):
# get_language may return None if there is no session (e.g. during management commands)
return property(lambda self: getattr(self, kwargs[get_language() or 'en']))
def clean_email(email):
if email:
email = email.strip().lower()
# Replace email domains in case there are multiple alias domains used in the organisation and all emails should
# have the same domain on EvaP.
for original_domain, replaced_domain in settings.INSTITUTION_EMAIL_REPLACEMENTS:
if email.endswith(original_domain):
return email[:-len(original_domain)] + replaced_domain
return email
class FileResponse(HttpResponse):
def __init__(self, filename, content_type=None, **kwargs):
super().__init__(content_type=content_type, **kwargs)
self.set_content_disposition(filename)
def set_content_disposition(self, filename):
try:
filename.encode("ascii")
self["Content-Disposition"] = f"attachment; filename=\"{filename}\""
except UnicodeEncodeError:
self["Content-Disposition"] = f"attachment; filename*=utf-8''{quote(filename)}"
|
"""A simple densely connected baseline model."""
import typing
import torch
from mzcn.engine.base_model import BaseModel
from mzcn.engine.param_table import ParamTable
from mzcn.engine import hyper_spaces
class DenseBaseline(BaseModel):
"""
A simple densely connected baseline model.
Examples:
>>> model = DenseBaseline()
>>> model.params['mlp_num_layers'] = 2
>>> model.params['mlp_num_units'] = 300
>>> model.params['mlp_num_fan_out'] = 128
>>> model.params['mlp_activation_func'] = 'relu'
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=True
)
params['mlp_num_units'] = 256
params.get('mlp_num_units').hyper_space = \
hyper_spaces.quniform(16, 512)
params.get('mlp_num_layers').hyper_space = \
hyper_spaces.quniform(1, 5)
return params
def build(self):
"""Build."""
self.embeddinng = self._make_default_embedding_layer()
self.mlp = self._make_multi_layer_perceptron_layer(
2 * self._params['embedding_output_dim']
)
self.out = self._make_output_layer(
self._params['mlp_num_fan_out']
)
def forward(self, inputs):
"""Forward."""
input_left, input_right = inputs['text_left'], inputs['text_right']
input_left = self.embeddinng(input_left.long()).sum(1)
input_right = self.embeddinng(input_right.long()).sum(1)
x = torch.cat((input_left, input_right), dim=1)
return self.out(self.mlp(x))
|
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
from install_package import InstallPackage
import os
import shutil
import utils
BASENAME = "gdcm"
GIT_REPO = "git://git.code.sf.net/p/gdcm/gdcm "
GIT_TAG = "v2.0.17"
dependencies = ['SWIG', 'VTK']
class GDCM(InstallPackage):
def __init__(self):
self.source_dir = os.path.join(config.archive_dir, BASENAME)
self.build_dir = os.path.join(config.build_dir, '%s-build' %
(BASENAME,))
self.inst_dir = os.path.join(config.inst_dir, BASENAME)
def get(self):
if os.path.exists(self.source_dir):
utils.output("gdcm already checked out, skipping step.")
else:
os.chdir(config.archive_dir)
ret = os.system("git clone %s" % (GIT_REPO,))
if ret != 0:
utils.error("Could not clone GDCM repo. Fix and try again.")
os.chdir(self.source_dir)
ret = os.system("git checkout %s" % (GIT_TAG,))
if ret != 0:
utils.error("Could not checkout GDCM %s. Fix and try again." % (GIT_TAG,))
def unpack(self):
# no unpack step
pass
def configure(self):
if os.path.exists(
os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
utils.output("gdcm build already configured.")
return
if not os.path.exists(self.build_dir):
os.mkdir(self.build_dir)
cmake_params = \
"-DGDCM_BUILD_APPLICATIONS=OFF " \
"-DGDCM_BUILD_EXAMPLES=OFF " \
"-DGDCM_BUILD_SHARED_LIBS=ON " \
"-DGDCM_BUILD_TESTING=OFF " \
"-DGDCM_USE_ITK=OFF " \
"-DGDCM_USE_VTK=ON " \
"-DGDCM_USE_WXWIDGETS=OFF " \
"-DGDCM_WRAP_JAVA=OFF " \
"-DGDCM_WRAP_PHP=OFF " \
"-DGDCM_WRAP_PYTHON=ON " \
"-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
"-DCMAKE_INSTALL_PREFIX=%s " \
"-DSWIG_DIR=%s " \
"-DSWIG_EXECUTABLE=%s " \
"-DVTK_DIR=%s " \
"-DPYTHON_EXECUTABLE=%s " \
"-DPYTHON_LIBRARY=%s " \
"-DPYTHON_INCLUDE_PATH=%s " % \
(self.inst_dir, config.SWIG_DIR,
config.SWIG_EXECUTABLE, config.VTK_DIR,
config.PYTHON_EXECUTABLE,
config.PYTHON_LIBRARY,
config.PYTHON_INCLUDE_PATH)
ret = utils.cmake_command(self.build_dir, self.source_dir,
cmake_params)
if ret != 0:
utils.error("Could not configure GDCM. Fix and try again.")
def build(self):
posix_file = os.path.join(self.build_dir,
'bin/libvtkgdcmPython.so')
nt_file = os.path.join(self.build_dir, 'bin',
config.BUILD_TARGET, 'vtkgdcmPythonD.dll')
if utils.file_exists(posix_file, nt_file):
utils.output("GDCM already built. Skipping build step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('GDCM.sln')
if ret != 0:
utils.error("Could not build GDCM. Fix and try again.")
def install(self):
if os.name == 'nt':
config.GDCM_LIB = os.path.join(
self.inst_dir, 'bin')
else:
config.GDCM_LIB = os.path.join(self.inst_dir, 'lib')
config.GDCM_PYTHON = os.path.join(self.inst_dir, 'lib')
test_file = os.path.join(config.GDCM_PYTHON, 'gdcm.py')
if os.path.exists(test_file):
utils.output("gdcm already installed, skipping step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('GDCM.sln', install=True)
if ret != 0:
utils.error(
"Could not install gdcm. Fix and try again.")
def clean_build(self):
utils.output("Removing build and installation directories.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
def clean_install(self):
utils.output("Removing installation directory.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
def get_installed_version(self):
import gdcm
return gdcm.Version.GetVersion()
|
from pathlib import Path
import re
p = Path(
r"C:\Program Files (x86)\Steam\steamapps\common\Spelunky 2\Mods\Extracted\Data\Levels"
)
tile_code_re = re.compile(
r"^\\\?(?P<name>\w+)(%(?P<pct>\d{2})(?P<second_name>\w+)?)?\s+(?P<code>.)"
)
codes = set()
for lvl_file in p.glob("*lvl"):
for line in lvl_file.read_text().splitlines():
m = tile_code_re.match(line)
if m:
mdict = m.groupdict()
codes.add(mdict.get("name"))
if mdict.get("pct") and mdict.get("second_name"):
codes.add(mdict.get("second_name"))
|
import os
import sys
import subprocess
import time
toolkit_directory = "Toolkit"
toolkit_repo = "https://github.com/milos85vasic/Apache-Factory-Toolkit.git"
if __name__ == '__main__':
exists = True
steps = []
if not os.path.exists(toolkit_directory):
exists = False
steps.extend(
[
"mkdir " + toolkit_directory,
"git clone --recurse-submodules " + toolkit_repo + " ./" + toolkit_directory,
]
)
for cmd in steps:
os.system(cmd)
branch = "master"
what = sys.argv[1]
if len(sys.argv) >= 3:
branch = sys.argv[2]
from Toolkit.commands import get_python_cmd
python_cmd = get_python_cmd()
setup = python_cmd + " ./" + toolkit_directory + "/websetup_run.py " + what
if branch is not "master":
setup += " " + branch
steps = [
setup
]
if not exists:
steps.extend(
[
"rm -rf ./" + toolkit_directory,
"rm -f " + os.path.basename(__file__)
]
)
for cmd in steps:
os.system(cmd)
|
import csv
from datetime import date
import itertools
import logging
from django.conf import settings
from django.core.management import BaseCommand
from django.utils import timezone
from api.barriers.models import PublicBarrier
from api.barriers.public_data import public_release_to_s3
from api.metadata.constants import BarrierStatus, PublicBarrierStatus, TRADING_BLOCS
from api.metadata.utils import get_countries, get_sectors
logger = logging.getLogger(__name__)
class Metadata:
def __init__(self):
self.statuses = {name: id for id, name in BarrierStatus.choices}
self.countries = {
country["name"]: country["id"]
for country in get_countries()
if country["disabled_on"] is None
}
self.trading_blocs = {
trading_bloc["name"]: trading_bloc["code"]
for trading_bloc in TRADING_BLOCS.values()
}
self.sectors = {}
for sector in get_sectors():
if sector["disabled_on"] is None:
if sector["level"] == 0:
self.sectors[sector["name"].lower()] = sector["id"]
elif sector["level"] == 1:
self.sectors[sector["name"].lower()] = sector["parent"]["id"]
def get_country(self, location_text):
if location_text:
clean_location_text = location_text.split("(")[0].strip()
return self.countries.get(clean_location_text)
def get_caused_by_trading_bloc(self, location_text):
return "(European Union)" in location_text
def get_trading_bloc(self, location_text):
return self.trading_blocs.get(location_text)
def get_status(self, status_text):
clean_status_text = status_text.split("(")[0].strip()
status = self.statuses.get(clean_status_text)
if status is None:
logger.info(f"Status not found: {status_text}")
return status
def get_sectors(self, sectors_text):
sectors_text = sectors_text.strip()
if not sectors_text or sectors_text in ("All", "N/A", "Multisector"):
return []
for sector_name in sectors_text.split(";"):
sector = self.sectors.get(sector_name.lower().strip())
if sector is None:
logger.info(f"Sector not found: {sector_name}")
yield sector
def get_all_sectors(self, sectors_text):
return sectors_text in ("All", "Multisector")
def create_public_barriers_from_csv(csv_file):
metadata = Metadata()
published_date = timezone.now()
id_generator = itertools.count(1).__next__
with open(csv_file, 'r') as file:
reader = csv.DictReader(file)
for row in reader:
public_barrier_id = id_generator()
public_barrier = PublicBarrier(
id=public_barrier_id,
_title=row["Public title"],
_summary=row["Public summary"],
status=metadata.get_status(row["Status"]),
status_date=date.fromisoformat(row["Status date"]),
country=metadata.get_country(row["Location"]),
caused_by_trading_bloc=metadata.get_caused_by_trading_bloc(row["Location"]),
trading_bloc=metadata.get_trading_bloc(row["Location"]),
sectors=metadata.get_sectors(row["Sectors"]),
all_sectors=metadata.get_all_sectors(row["Sectors"]),
_public_view_status=PublicBarrierStatus.PUBLISHED,
first_published_on=published_date,
last_published_on=published_date,
)
if row["Location"] and not public_barrier.country and not public_barrier.trading_bloc:
logger.info(f"Location not found: {row['Location']}")
yield public_barrier
class Command(BaseCommand):
help = "Publish barriers from csv file"
def add_arguments(self, parser):
parser.add_argument("file", type=str, help="CSV file for importing public barriers")
def handle(self, *args, **options):
logger.setLevel(logging.DEBUG)
if settings.DJANGO_ENV in ["local", "dev"]:
csv_file = options["file"]
logger.info("Reading barriers from csv file...")
public_barriers = create_public_barriers_from_csv(csv_file)
logger.info("Publishing barriers...")
public_release_to_s3(public_barriers, force_publish=True)
else:
logger.info(f"Publishing from csv is disabled on {settings.DJANGO_ENV}")
|
from dataclasses import dataclass
from typing import Tuple
from wai.annotations.domain.image import Image
from opex import ObjectPrediction
@dataclass
class OPEXObject:
"""
Internal representation of an OPEX annotation.
"""
prediction: ObjectPrediction
label: str
@classmethod
def from_string(cls, string: str):
return ObjectPrediction.from_json_string(string)
def __str__(self):
return self.prediction.to_json_string()
OPEXODFormat = Tuple[Image, Tuple[OPEXObject]]
|
from pathlib import Path
import numpy
from matplotlib import pyplot
from neodroidvision.regression.denoise.spectral_denoise import fft_im_denoise
if __name__ == "__main__":
def plot_spectrum(im_fft):
"""
:param im_fft:
:type im_fft:
"""
from matplotlib.colors import LogNorm
# A logarithmic colormap
pyplot.imshow(numpy.abs(im_fft), norm=LogNorm(vmin=5))
pyplot.colorbar()
def blur_im(im):
"""
:param im:
:type im:
"""
############################################################
# Easier and better: :func:`scipy.ndimage.gaussian_filter`
############################################################
#
# Implementing filtering directly with FFTs is tricky and time consuming.
# We can use the Gaussian filter from :mod:`scipy.ndimage`
from scipy import ndimage
im_blur = ndimage.gaussian_filter(im, 4)
pyplot.figure()
pyplot.imshow(im_blur, pyplot.cm.gray)
pyplot.title("Blurred image")
def main(im_raw):
"""
:param im_raw:
:type im_raw:
"""
pyplot.figure()
pyplot.imshow(im_raw, pyplot.cm.gray)
pyplot.title("Original image")
im_denoised = fft_im_denoise(im_raw)
pyplot.figure()
pyplot.imshow(im_denoised, pyplot.cm.gray)
pyplot.title("Reconstructed Image")
im22 = pyplot.imread(
str(Path.home() / "Data" / "Datasets" / "Denoise" / "moonlanding.png")
).astype(float)
main(im22)
pyplot.show()
|
import tweepy
import inspect
from bottle import PluginError
class TweepyPlugin(object):
name = 'tweepy'
api = 2
def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret, keyword='api'):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
self.keyword = keyword
def setup(self, app):
for other in app.plugins:
if not isinstance(other, TweepyPlugin): continue
if other.keyword == self.keyword:
raise PluginError("Found another tweepy plugin with "\
"conflicting settings (non-unique keyword).")
def apply(self, callback, context):
conf = context.config.get('tweepy') or {}
consumer_key = conf.get('consumer_key', self.consumer_key)
consumer_secret = conf.get('consumer_secret', self.consumer_secret)
access_token = conf.get('access_token', self.access_token)
access_token_secret = conf.get('access_token_secret', self.access_token_secret)
keyword = conf.get('keyword', self.keyword)
args = inspect.getargspec(context.callback)[0]
if keyword not in args:
return callback
def wrapper(*args, **kwargs):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
kwargs[self.keyword] = tweepy.API(auth)
rv = callback(*args, **kwargs)
return rv
return wrapper
|
#!/usr/bin/python
# coding:utf-8
import pymysql
from managehtml import *
from md5 import *
import os
sqlservername='localhost'
sqluser='simpledrive'
sqlpasswd='simpledrive'
sqldatabase='simpledrive'
def manageUserandServer(username):
isadmin=''
signupstate = ''
invitecode = ''
with open('static/signupstate', 'r', encoding='utf-8') as f:
signupstate = f.read()
with open('static/invitecode', 'r', encoding='utf-8') as f:
invitecode = f.read()
content = cloudhead_manage+username+afterusername_manage+invitecode+aftercurrentinvitecode+signupstate+afteronregister
tmp=''
db = pymysql.connect(sqlservername, sqluser, sqlpasswd, sqldatabase)
check = db.cursor()
sql = 'select username,isadmin from '+ sqldatabase+'.user'
check.execute(sql)
result=check.fetchall()
for i in result:
if i[1]=='yes':
isadmin='是'
else:
isadmin='不是'
tmp = tmp + filedownloadurla_manage + i[0]+ filedownloadurlb_manage + '#'
tmp = tmp + beforefilename_manage + i[0] + afterfilename_manage
tmp = tmp + '#' + aftersize_manage + isadmin + afterfiletime_manage
content = content + (tmp) + cloudreset_manage
return content
def deleteuser(username):
if username=='admin':
return 0
####系统默认账号,无法被移除。
else:
db = pymysql.connect(sqlservername, sqluser, sqlpasswd, sqldatabase)
check = db.cursor()
sql = 'delete from '+sqldatabase+'.user where username=' + "'" + username + "'"
check.execute(sql)
db.commit()
db.close()
return 1
def adduser(username,passwd):
db = pymysql.connect(sqlservername, sqluser, sqlpasswd, sqldatabase)
check = db.cursor()
sql='select username from '+sqldatabase+'.user where username ='+"'"+username+"'"
check.execute(sql)
a=check.fetchone()
a=list(str(a).replace('(','').replace(')','').replace("'","").split(','))
if a[0]==username:
print (a[0])
return 0
else:
print (a[0])
#by default we think this is not a admin, or we can add choice on add user. Maybe it would be the next step
sql = "insert into simpledrive.user (username,passwd,md5,isadmin) VALUES (" + "'" + username + "'" + "," + "'" + passwd + "'" + "," + "'" + md5(passwd) + "'" + "," + "'" + 'no' "'" + ")"
check.execute(sql)
db.commit()
db.close()
try:
os.mkdir(os.getcwd() + '/cloud/' + username)
except:
return 2
return 1
|
from typing import Dict
from .rules import *
from .util import *
import copy
consumes_amount_prefix = "consumes_amount_"
allocates_amount_prefix = "allocates_amount_"
deallocates_amount_prefix = "deallocates_amount_"
def parse(filename: str, include_path, definitions, extra_args):
index = Index.create(True)
includes = [s
for p in include_path
for s in ["-I", p]]
definitions = [s
for d in definitions
for s in ["-D", d]]
cplusplus = filename.endswith(("cpp", "C", "cc"))
nightwatch_parser_c_header_fullname = str(resource_directory / nightwatch_parser_c_header)
llvm_args = includes + extra_args + clang_flags + definitions + \
["-include", nightwatch_parser_c_header_fullname,
f"-D__AVA_PREFIX={NIGHTWATCH_PREFIX}",
"-x", "c++" if cplusplus else "c",
filename]
unit = index.parse(
None,
args=llvm_args,
options=TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD)
errors = []
severity_table = {
Diagnostic.Ignored: (None, parse_expects),
Diagnostic.Note: (info, parse_expects),
Diagnostic.Warning: (warning, parse_expects),
Diagnostic.Error: (error, parse_requires),
Diagnostic.Fatal: (error, parse_requires)
}
for d in unit.diagnostics:
if d.spelling == "incomplete definition of type 'struct __ava_unknown'" \
or d.spelling.startswith("incompatible pointer") \
and d.spelling.endswith("with an expression of type 'struct __ava_unknown *'"):
continue
with location("Clang Parser", report_continue=errors):
kind, func = severity_table[d.severity]
func(not kind, d.spelling, loc=convert_location(d.location), kind=kind)
primary_include_files: Dict[str, File] = {}
primary_include_extents = []
utility_extents = []
replacement_extents = []
type_extents = []
global_config = {}
functions: Dict[str, Function] = {}
include_functions = {}
replaced_functions = {}
metadata_type = None
rules = []
default_rules = []
final_rules = []
def apply_rules(c, annotations, *, name=None):
if name:
annotations["name"] = name
def do(rules):
for rule in rules:
rule.apply(c, annotations)
do(rules)
# print(c.spelling, annotations)
if not annotations or (len(annotations) == 1 and "name" in annotations):
do(default_rules)
do(final_rules)
if name:
del annotations["name"]
def convert_type(tpe, name, annotations, containing_types):
parse_requires(tpe.get_canonical().spelling not in containing_types or "void" in tpe.get_canonical().spelling, "Recursive types don't work.")
original_containing_types = containing_types
containing_types = copy.copy(original_containing_types)
containing_types.add(tpe.get_canonical().spelling)
parse_assert(tpe.spelling, "Element requires valid and complete type.")
apply_rules(tpe, annotations, name=name)
with location(f"in type {term.yellow(tpe.spelling)}"):
allocates_resources, deallocates_resources = {}, {}
for annotation_name, annotation_value in annotations.direct().flatten().items():
if annotation_name.startswith(allocates_amount_prefix):
resource = strip_prefix(allocates_amount_prefix, annotation_name)
allocates_resources[resource] = annotation_value
elif annotation_name.startswith(deallocates_amount_prefix):
resource = strip_prefix(deallocates_amount_prefix, annotation_name)
deallocates_resources[resource] = annotation_value
parse_expects(allocates_resources.keys().isdisjoint(deallocates_resources.keys()),
"The same argument is allocating and deallocating the same resource.")
our_annotations = annotations.direct(type_annotations).flatten()
our_annotations.update(
allocates_resources=allocates_resources,
deallocates_resources=deallocates_resources)
if annotations["type_cast"]:
new_type = annotations["type_cast"]
# annotations = copy.copy(annotations)
annotations.pop("type_cast")
if isinstance(new_type, Conditional):
ret = ConditionalType(
new_type.predicate,
convert_type(new_type.then_branch or tpe, name, annotations, containing_types),
convert_type(new_type.else_branch or tpe, name, annotations, containing_types),
convert_type(tpe, name, annotations, containing_types))
return ret
else:
parse_assert(new_type is not None, "ava_type_cast must provide a new type")
# Attach the original type and then perform conversion using the new type.
our_annotations["original_type"] = convert_type(tpe, name, annotation_set(), original_containing_types)
tpe = new_type
if tpe.is_function_pointer():
pointee = tpe.get_pointee()
if pointee.kind == TypeKind.FUNCTIONNOPROTO:
args = []
else:
args = [convert_type(t, "", annotation_set(), containing_types) for t in pointee.argument_types()]
ret = FunctionPointer(tpe.spelling, Type(f"*{name}", **our_annotations),
return_type=convert_type(pointee.get_result(), "ret",
annotation_set(), containing_types),
argument_types=args,
**our_annotations)
elif tpe.kind in (TypeKind.FUNCTIONPROTO, TypeKind.FUNCTIONNOPROTO):
if tpe.kind == TypeKind.FUNCTIONNOPROTO:
args = []
else:
args = [convert_type(t, "", annotation_set(), containing_types) for t in tpe.argument_types()]
ret = FunctionPointer(tpe.spelling, Type(tpe.spelling, **our_annotations),
return_type=convert_type(tpe.get_result(), "ret",
annotation_set(), containing_types),
argument_types=args,
**our_annotations)
elif tpe.is_static_array():
pointee = tpe.get_pointee()
pointee_annotations = annotations.subelement("element")
pointee_name = f"{name}[{buffer_index_spelling}]"
our_annotations["buffer"] = Expr(tpe.get_array_size())
ret = StaticArray(tpe.spelling, pointee=convert_type(pointee, pointee_name,
pointee_annotations, containing_types),
**our_annotations)
elif tpe.is_pointer():
pointee = tpe.get_pointee()
pointee_annotations = annotations.subelement("element")
pointee_name = f"{name}[{buffer_index_spelling}]"
if tpe.kind in (TypeKind.VARIABLEARRAY, TypeKind.INCOMPLETEARRAY):
sp: str = tpe.spelling
sp = sp.replace("[]", "*")
ret = Type(sp, pointee=convert_type(tpe.element_type, pointee_name,
pointee_annotations, containing_types),
**our_annotations)
else:
ret = Type(tpe.spelling, pointee=convert_type(pointee, pointee_name,
pointee_annotations, containing_types),
**our_annotations)
elif tpe.get_canonical().kind == TypeKind.RECORD:
def expand_field(f: Cursor, prefix):
f_tpe = f.type
decl = f_tpe.get_declaration()
if decl.is_anonymous():
if decl.kind == CursorKind.UNION_DECL:
# FIXME: This assumes the first field is as large or larger than any other field.
first_field = sorted(f_tpe.get_fields(), key=lambda f: f.type.get_size())[0]
return expand_field(first_field, f"{prefix}.{first_field.displayname}")
else:
parse_requires(False, "The only supported anonymous member type is unions.")
return [(f.displayname,
convert_type(f.type, f"{prefix}.{f.displayname}",
annotations.subelement(Field(f.displayname)), containing_types)
)]
field_types = dict(ff
for field in tpe.get_canonical().get_fields()
for ff in expand_field(field, name))
ret = Type(tpe.spelling, fields=field_types, **our_annotations)
else:
ret = Type(tpe.spelling, **our_annotations)
return ret
def convert_argument(i, arg, annotations, *, type=None, is_ret=False):
name = arg.displayname if not is_ret else RET_ARGUMENT_NAME
if not name:
name = "__arg{}".format(i)
annotations["depends_on"].discard(name)
apply_rules(arg, annotations, name=name)
with location(f"argument {term.yellow(name)}", convert_location(arg.location)):
if not is_ret:
expressions = list(arg.find_descendants(lambda c: c.kind.is_expression()))
parse_assert(len(expressions) <= 1,
"There must only be one expression child in argument declarations.")
value = expressions[0].source if expressions else None
else:
value = None
type = type or arg.type
return Argument(name,
convert_type(type, name, annotations, set()),
value=value,
location=convert_location(arg.location),
**annotations.direct(argument_annotations).flatten())
def convert_function(cursor, supported=True):
with location(f"at {term.yellow(cursor.displayname)}", convert_location(cursor.location),
report_continue=errors):
# TODO: Capture tokens here and then search them while processing arguments to find commented argument
# names.
body = None
for c in cursor.get_children():
if c.kind == CursorKind.COMPOUND_STMT:
body = c
break
prologue = []
epilogue = []
declarations = []
implicit_arguments = []
annotations = annotation_set()
annotations.update(extract_attr_annotations(cursor))
if body:
annotations.update(extract_annotations(body))
output_list = prologue
for c in body.get_children():
c_annotations = extract_annotations(c)
c_attr_annotations = extract_attr_annotations(c)
if "implicit_argument" in c_attr_annotations:
# FIXME: The [0] should be replaced with code to select the actual correct var decl
implicit_arguments.append(c.children[0])
continue
if len(c_annotations) and list(c_annotations.keys()) != ["depends_on"]:
continue
found_variables = False
if c.kind.is_declaration:
for cc in c.find_descendants(lambda cc: cc.kind == CursorKind.VAR_DECL):
if not cc.displayname.startswith(NIGHTWATCH_PREFIX) and cc.displayname != "ret":
parse_expects(len(cc.children) == 0,
"Declarations in prologue and epilogue code may not be initialized. "
"(This is currently not checked fully.)")
declarations.append(convert_argument(-2, cc, annotation_set()))
found_variables = True
if list(c.find_descendants(lambda cc: cc.displayname == "ava_execute")):
parse_requires(c.kind != CursorKind.DECL_STMT or c.children[0].displayname == "ret",
"The result of ava_execute() must be named 'ret'.")
output_list = epilogue
elif not found_variables:
src = c.source
output_list.append(src + ("" if src.endswith(";") else ";"))
apply_rules(cursor, annotations, name=cursor.mangled_name)
args = []
for i, arg in enumerate(list(cursor.get_arguments()) + implicit_arguments):
args.append(convert_argument(i, arg, annotations.subelement(arg.displayname)))
resources = {}
for annotation_name, annotation_value in annotations.direct().flatten().items():
if annotation_name.startswith(consumes_amount_prefix):
resource = strip_prefix(consumes_amount_prefix, annotation_name)
resources[resource] = annotation_value
return_value = convert_argument(-1, cursor, annotations.subelement("return_value"),
is_ret=True, type=cursor.result_type)
if "unsupported" in annotations:
supported = not bool(annotations["unsupported"])
disable_native = False
if "disable_native" in annotations:
disable_native = bool(annotations["disable_native"])
return Function(
cursor.mangled_name,
return_value,
args,
location=convert_location(cursor.location),
logue_declarations=declarations,
prologue=prologue,
epilogue=epilogue,
consumes_resources=resources,
supported=supported,
disable_native=disable_native,
type=convert_type(cursor.type, cursor.mangled_name, annotation_set(), set()),
**annotations.direct(function_annotations).flatten())
utility_mode = False
utility_mode_start = None
replacement_mode = False
replacement_mode_start = None
def convert_decl(c: Cursor):
nonlocal utility_mode, utility_mode_start, replacement_mode, replacement_mode_start, metadata_type
assert not (replacement_mode and utility_mode)
if c.kind in ignored_cursor_kinds:
return
normal_mode = not replacement_mode and not utility_mode
# not (c.kind == CursorKind.VAR_DECL and c.displayname.startswith(
# NIGHTWATCH_PREFIX)) and (utility_mode or replacement_mode):
included_extent = True
if normal_mode and c.kind == CursorKind.FUNCTION_DECL and c.location.file.name == filename and c.spelling == "ava_metadata":
metadata_type = convert_type(c.result_type.get_pointee(), "ava_metadata", annotation_set(), set())
elif normal_mode and c.kind == CursorKind.FUNCTION_DECL and c.displayname.startswith(NIGHTWATCH_PREFIX + "category_"):
name = strip_unique_suffix(strip_prefix(NIGHTWATCH_PREFIX + "category_", c.displayname))
annotations = extract_annotations(c)
attr_annotations = extract_attr_annotations(c)
# print(annotations)
rule_list = default_rules if "default" in attr_annotations else rules
annotations.pop("default", None)
# print(name, annotations)
if name == "type":
rule_list.append(Types(c.result_type.get_pointee(), annotations))
elif name == "functions":
rule_list.append(Functions(annotations))
elif name == "pointer_types":
rule_list.append(PointerTypes(annotations))
elif name == "const_pointer_types":
rule_list.append(ConstPointerTypes(annotations))
elif name == "nonconst_pointer_types":
rule_list.append(NonconstPointerTypes(annotations))
elif name == "non_transferable_types":
rule_list.append(NonTransferableTypes(annotations))
elif normal_mode and c.kind == CursorKind.VAR_DECL and c.storage_class == StorageClass.STATIC:
# This is a utility function for the API forwarding code.
parse_expects(
c.linkage == LinkageKind.INTERNAL,
f"at {term.yellow(c.displayname)}",
"API utility functions should be static (or similar) since they are included in header files.",
loc=convert_location(c.location))
utility_extents.append((c.extent.start.line, c.extent.end.line))
elif c.kind == CursorKind.VAR_DECL and c.displayname.startswith(NIGHTWATCH_PREFIX):
name = strip_unique_suffix(strip_nw(c.displayname))
if name == "begin_utility":
parse_requires(not utility_mode, "ava_begin_utility can only be used outside utility mode to enter that mode.")
utility_mode = True
utility_mode_start = c.extent.start.line
elif name == "end_utility":
parse_requires(utility_mode, "ava_end_utility can only be used inside utility mode to exit that mode.")
utility_mode = False
parse_assert(utility_mode_start is not None, "Should be unreachable.")
utility_extents.append((utility_mode_start, c.extent.end.line))
elif name == "begin_replacement":
parse_requires(not replacement_mode,
"ava_begin_replacement can only be used outside replacement mode to enter that mode.")
replacement_mode = True
replacement_mode_start = c.extent.start.line
elif name == "end_replacement":
parse_requires(replacement_mode,
"ava_end_replacement can only be used inside replacement mode to exit that mode.")
replacement_mode = False
parse_assert(replacement_mode_start is not None, "Should be unreachable.")
replacement_extents.append((replacement_mode_start, c.extent.end.line))
else:
global_config[name] = get_string_literal(c)
elif normal_mode and c.kind == CursorKind.VAR_DECL and c.type.spelling.endswith("_resource") \
and c.type.spelling.startswith("ava_"):
# TODO: Use the resource declarations to check resource usage.
pass
elif c.kind == CursorKind.FUNCTION_DECL and c.location.file.name == filename:
if normal_mode and c.is_definition() and c.storage_class == StorageClass.STATIC:
# This is a utility function for the API forwarding code.
parse_expects(
c.linkage == LinkageKind.INTERNAL,
f"at {term.yellow(c.displayname)}",
"API utility functions should be static (or similar) since they are included in header files.",
loc=convert_location(c.location))
utility_extents.append((c.extent.start.line, c.extent.end.line))
elif normal_mode:
# This is an API function.
f = convert_function(c)
if f:
functions[c.mangled_name] = f
elif replacement_mode:
# Remove the function from the list because it is replaced
replaced_functions[c.mangled_name] = c
elif normal_mode and c.kind == CursorKind.FUNCTION_DECL and c.location.file.name in [f.name for f in primary_include_files.values()]:
included_extent = False
f = convert_function(c, supported=False)
if f:
include_functions[c.mangled_name] = f
elif normal_mode and c.kind == CursorKind.INCLUSION_DIRECTIVE \
and not c.displayname.endswith(nightwatch_parser_c_header) and c.location.file.name == filename:
try:
primary_include_files[c.displayname] = c.get_included_file()
except AssertionError as e:
parse_assert(not e, str(e), loc=convert_location(c.location))
# elif normal_mode and c.kind == CursorKind.INCLUSION_DIRECTIVE and c.tokens[-1].spelling == '"' \
# and not c.displayname.endswith(nightwatch_parser_c_header):
# parse_assert(False, "Including AvA specifications in other specifications is not yet supported. Ask amp to do it.")
elif normal_mode and c.kind in (CursorKind.MACRO_DEFINITION, CursorKind.STRUCT_DECL, CursorKind.TYPEDEF_DECL) \
and c.location.file and c.location.file.name == filename:
# This is a utility macro for the API forwarding code.
type_extents.append((c.extent.start.line, c.extent.end.line))
elif (normal_mode or replacement_mode) and c.kind in (CursorKind.UNEXPOSED_DECL,) and len(c.tokens) and c.tokens[0].spelling == "extern" \
and c.location.file in primary_include_files.values():
for cc in c.get_children():
convert_decl(cc)
return # Skip the extents processing below
elif normal_mode:
# Default case for normal mode.
is_semicolon = len(c.tokens) == 1 and c.tokens[0].spelling == ";"
if c.location.file and not is_semicolon:
parse_expects(c.location.file.name != filename, f"Ignoring unsupported: {c.kind} {c.spelling}",
loc=convert_location(c.location))
# if len(c.tokens) >= 1 and c.tokens[0].spelling == "extern" and c.kind == CursorKind.UNEXPOSED_DECL:
# print(c.kind, c.tokens[0].spelling)
else:
# Default case for non-normal modes
return # Skip the extents processing below
if c.location.file in primary_include_files.values():
primary_include_extents.append((c.location.file, c.extent.start.line, c.extent.end.line, included_extent))
for c in unit.cursor.get_children():
convert_decl(c)
parse_expects(primary_include_files, "Expected at least one API include file.")
extra_functions = {}
if errors:
raise MultipleError(*errors)
for name, function in functions.items():
if name in include_functions:
del include_functions[name]
elif not function.callback_decl:
extra_functions[name] = function
for name, cursor in replaced_functions.items():
if name in include_functions:
del include_functions[name]
else:
parse_requires(name not in functions, "Replacing forwarded functions is not allowed.",
loc=convert_location(cursor.location))
if extra_functions:
function_str = ", ".join(str(f.name) for f in extra_functions.values())
parse_expects(False, f"""
Functions appear in {filename}, but are not in {", ".join(primary_include_files.keys())}:
{function_str}""".strip(),
loc=Location(filename, None, None, None))
# We use binary mode because clang operates in bytes not characters.
# TODO: If the source files have "\r\n" and clang uses text mode then this will cause incorrect removals.
# TODO: There could be functions in the header which are not processed with the current configuration. That will
# mess things up.
c_types_header_code = bytearray()
for name, file in primary_include_files.items():
with open(file.name, "rb") as fi:
# content = fi.read()
# primary_include_extents.sort(key=lambda r: r(0).start.offset)
def find_modes(i):
modes = set()
for in_name, start, end, mode in primary_include_extents:
if in_name == file and start <= i <= end:
modes.add(mode)
return modes
error_reported = False
i = None
for i, line in enumerate(fi):
modes = find_modes(i+1)
# print(i, modes, line)
keep_line = True in modes or not modes
error_line = keep_line and False in modes
parse_expects(not error_line or error_reported,
"Line both needed and excluded. Incorrect types header may be generated.",
loc=Location(file.name, i, None, None))
error_reported = error_reported or error_line
if keep_line:
c_types_header_code.extend(line)
else:
c_types_header_code.extend(b"/* NWR: " +
line.replace(b"/*", b"@*").replace(b"*/", b"*@").rstrip() +
b" */\n")
def load_extents(extents):
with open(filename, "rb") as fi:
def utility_line(i):
for start, end in extents:
if start <= i <= end:
return True
return False
c_code = bytearray()
last_emitted_line = None
for i, line in enumerate(fi):
if utility_line(i+1):
if last_emitted_line != i-1:
c_code.extend("""#line {} "{}"\n""".format(i+1, filename).encode("utf-8"))
c_code.extend(line)
last_emitted_line = i
return bytes(c_code).decode("utf-8")
c_utility_code = load_extents(utility_extents)
c_replacement_code = load_extents(replacement_extents)
c_type_code = load_extents(type_extents)
return API(functions=list(functions.values()) + list(include_functions.values()),
includes=list(primary_include_files.keys()),
c_types_header_code=bytes(c_types_header_code).decode("utf-8"),
c_type_code=c_type_code,
c_utility_code=c_utility_code,
c_replacement_code=c_replacement_code,
metadata_type=metadata_type,
missing_functions=list(include_functions.values()),
cplusplus=cplusplus,
**global_config)
|
#The svm class uses the package CVXOPT, Python Software for Convex Optimization, that is not part of the standard python,
#but is included in Anaconda for example.
import numpy as np
import warnings
#Everything else is imported locally when needed
class SVMClass:
##########################################################################################################################
# Linear only support vector machine #
# #
# Louis-Francois Arsenault, Columbia University la2518@columbia.edu (2016) #
##########################################################################################################################
# #
# INPUTS: #
# #
# typeL : 'norm2' (default) is the standard svm using ||w||_2^2 while 'norm1' is a 1-norm svm using ||w||_1 #
# rather with objective function as: ttp://papers.nips.cc/paper/2450-1-norm-support-vector-machines.pdf #
# #
# X : A ndarray array([x_11,x_12,...,x_1p],[x_21,x_22,...,x_2p],...,[x_Nlearn1,x_Nlearn2,...,x_Nlearnp]) #
# representing a matrix with Nlearn lines with all known examples of the training set and the columns #
# are the dimensions p of one x. #
# #
# y : A 1d array numpy.array([y_1,y_2,...,y_Nlearn]) representing a vector of size Nlearn with values for #
# each examples of the training set in the form -1 and 1. #
# #
# C : Value of the slack variable to be used. The default value is set to 0, meaning a separable problem #
# #
# tolsvm : In the 2-norm case with non-zero C, what tolerance to define below which we have a support vector. #
# The final tolerance is tolsvm = tolsvm*C. #
# #
# OUTPUTS: #
# #
# self.Nlearn : Number of instances in training #
# #
# self.pdim : How many dimensions for the linear model given by vector w; equal to p+1 (constant term included) #
# #
# self.w : This is the linear model. A vector with p+1 elements. A prediction is sign( w[0:-1]^T*Xt + w[-1] ). #
# #
# In the 2-norm case with non-zero C: #
# #
# self.alpha : The alpha vector for the dual model #
# #
# self.indsv : The indices of the support vectors #
##########################################################################################################################
def __init__(self,typeL='norm2'):
self.typeL = typeL
def train(self,X,y,C=0,tolsvm=1e-5):
if self.typeL is 'norm2': #2-norm svm
#The 2-norm svm is the standard approach where the min. approach has the ||w||_2^2 term
from NumMethods import quadprog
if C==0:
#Solving the linearly separable problem with quad. prog.
#We treat the constant term as part of the coeffs. w and thus
#need to add 1 at the end of every input
OneVec = np.ones((X.shape[0],1))
XX = np.concatenate((X, OneVec), axis=1)
#Number of instances in the training set as well as the dimension of final w
SizeXX = XX.shape
pdim = SizeXX[1]
Nlearn = SizeXX[0]
self.Nlearn = Nlearn
self.pdim = pdim
#Building the matrices for the quadratic prog. problem
H1 = np.identity(pdim)
H1[pdim-1,pdim-1] = 0.
bineq1 = -np.ones((Nlearn,1))
Aineq1 = -np.diag(y).dot(XX)
q1 = np.zeros((1,pdim))[0]
#Solve the quad. prog.
self.w = quadprog(H1,q1,Aineq=Aineq1,bineq=bineq1)
self.w.shape = (self.pdim,)
else:
#Solving the dual with quad. prog.
#The constant term is included in w at the end as the last value
#Tolerance for the choice of support vectors
self.tolsvm=tolsvm*C
#Number of instances in the training set as well as the dimension of final w
SizeX = X.shape
pdim = SizeX[1]
Nlearn = SizeX[0]
self.Nlearn = Nlearn
self.pdim = pdim+1
#Building the matrices for the quadratic prog. problem
Hinter = np.dot(X,X.transpose())
H1 = np.diag(y).dot( Hinter.dot( np.diag(y) ) )
q1 = -np.ones(Nlearn)
Aeq1 = y.copy()
beq1 = np.array([0.])
lb1 = np.zeros(Nlearn)
ub1 = C*np.ones(Nlearn)
#Solve the quad. prog.
alpha=quadprog(H1,q1,Aeq=Aeq1,beq=beq1,lb=lb1,ub=ub1)
self.alpha = alpha.transpose()[0]
#Find the support vectors and only use them
indsv = np.where((alpha > self.tolsvm) & (alpha < (C-self.tolsvm)))[0].astype(int)
self.indsv = indsv
#The linear model is completely specified by the vector w
self.w = np.zeros(pdim+1)
#self.w[0:-1] = self.alpha[indsv].dot(np.diag(y[indsv]).dot(X[indsv]))
self.w[0:-1] = np.einsum("i,i,ij",alpha[indsv],y[indsv],X[indsv])
#The constant term obtained by the value for each support vector and then average them
bias = np.mean(y[indsv]- X[indsv,:].dot(self.w[0:-1]))
self.w[-1] = bias
elif self.typeL is 'norm1': #1-norm svm
#The 1-norm svm uses ||w||_1 rather than ||w||_2^2
#For details, see http://papers.nips.cc/paper/2450-1-norm-support-vector-machines.pdf
from cvxopt.modeling import variable as cvxvar, op, sum as cvxsum, max as cvxmax
from cvxopt.solvers import options as cvxopt
from cvxopt import matrix as cvxmat
cvxopt['show_progress'] = False
#Number of instances in the training set as well as the dimension of final w
self.Nlearn = X.shape[0]
self.pdim = X.shape[1]+1
#Necessary matrices for the unconstrained problem
AL1 = np.diag(y).dot(X)
BL1 = cvxmat(C*y)
ALL1 = cvxmat(C*AL1)
#The variables to be found
WW = cvxvar(ALL1.size[1],'WW')
bb = cvxvar(1,'bb')
#Calling the solver
op( cvxsum(abs(WW)) + cvxsum(cvxmax(0,1-(ALL1*WW + BL1*bb)))).solve()
self.w = np.zeros(self.pdim)
self.w[0:-1] = np.array(WW.value).reshape((self.pdim-1,))
self.w[-1] = np.array(bb.value).reshape((1,))
def query(self,Xt):
#Prediction for matrix Xt
OneVec = np.ones((Xt.shape[0],1))
XXt = np.concatenate((Xt, OneVec), axis=1)
return np.sign(XXt.dot(self.w))
def score(self,X,ytrue):
#Return the % of correct predictions
ypredic = self.query(X)
return 1-0.5*np.sum(np.absolute(ypredic-ytrue))/len(ytrue)
##########################################################################################################################
|
import json
import os
import warnings
import random
import string
import csv
import time
import datetime
import io
import hashlib
from flask import (
Blueprint, flash, Flask, g, redirect, render_template, request, url_for, jsonify, Response
)
from survey.figure_eight import FigureEight
from core.models.metrics import MAX_GAIN
from survey._app import app, csrf_protect
from survey.admin import get_job_config
from survey.db import get_db, table_exists
from survey.figure_eight import RowState
from survey.utils import get_table, increase_worker_bonus, pay_worker_bonus, get_resp_worker_id, is_worker_available, generate_completion_code
from .prop import BASE as prop_BASE, finalize_round, JUDGING_TIMEOUT_SEC, LAST_MODIFIED_KEY, STATUS_KEY, WORKER_KEY
from .resp import BASE as resp_BASE
from survey.txx.helpers import finalize_resp
BASE = "txx"
NEXT_IS_RESPONDER = 0
NEXT_IS_PROPOSER = 1
NEXT_IS_PROPOSER_WAITING = 2
NEXT_IS_WAITING = 2
def check_is_proposer_next(job_id, worker_id, treatment, max_judgments=None, resp_only=None, prop_only=None):
app.logger.debug("check_is_proposer_next")
resp_table = get_table(resp_BASE, job_id=job_id, schema="result", treatment=treatment)
prop_table = get_table(prop_BASE, job_id=job_id, schema="result", treatment=treatment)
prop_table_data = get_table(prop_BASE, job_id=job_id, schema="data", treatment=treatment)
job_config = get_job_config(get_db("DB"), job_id)
con = get_db("DATA")
nb_resp = 0
nb_prop = 0
nb_prop_open = 0
if table_exists(con, resp_table):
with con:
tmp = con.execute(f"SELECT COUNT(*) as count from {resp_table} where job_id=?", (job_id, )).fetchone()
if tmp:
nb_resp = tmp["count"]
if table_exists(con, prop_table_data):
with con:
judging_timeout = time.time() - JUDGING_TIMEOUT_SEC
tmp = con.execute(f"SELECT COUNT(*) as count from {prop_table_data} where (job_id=? OR job_id like 'REF%') and ({STATUS_KEY}=? OR ({STATUS_KEY}=? and {LAST_MODIFIED_KEY}<?) OR ({WORKER_KEY}=?))", (job_id, RowState.JUDGEABLE, RowState.JUDGING, judging_timeout, worker_id)).fetchone()
if tmp:
nb_prop_open = tmp["count"]
if table_exists(con, prop_table):
with con:
tmp = con.execute(f"SELECT COUNT(*) as count from {prop_table} where (job_id=? OR job_id like 'REF%')", (job_id, )).fetchone()
if tmp:
nb_prop = tmp["count"]
#TODO: if nb_resp >= expected row/2, should only take props
if max_judgments is None or max_judgments==0:
max_judgments = job_config["expected_judgments"]
max_resp = (max_judgments // 2)
max_prop = (max_judgments // 2)
if resp_only:
max_resp = max_judgments
elif prop_only:
max_prop = max_judgments
if max_judgments > 0:
#if (max_judgments // 2) <= nb_resp and (max_judgments // 2) > nb_prop:
if max_resp <= nb_resp and max_prop > nb_prop:
if nb_prop_open > 0:
is_proposer = NEXT_IS_PROPOSER
else:
is_proposer = NEXT_IS_WAITING
elif nb_prop_open > 0:
is_proposer = NEXT_IS_PROPOSER
else:
if resp_only or prop_only:
is_proposer = NEXT_IS_WAITING
else:
is_proposer = NEXT_IS_RESPONDER
if resp_only:
if max_judgments > nb_resp:
is_proposer = NEXT_IS_RESPONDER
else:
is_proposer = NEXT_IS_WAITING
elif prop_only:
if max_judgments > nb_prop:
is_proposer = NEXT_IS_PROPOSER
else:
is_proposer = NEXT_IS_WAITING
elif nb_prop_open > 0:
is_proposer = NEXT_IS_PROPOSER
else:
is_proposer = NEXT_IS_RESPONDER
app.logger.debug(f"max_judgments: {max_judgments}, nb_prop: {nb_prop}, nb_resp: {nb_resp}, nb_prop_open: {nb_prop_open}, is_proposer: {is_proposer}")
return is_proposer
def get_previous_worker_code(job_id, worker_id, treatment):
"""
Generate a code for the user in case he already took the main task
"""
resp_table = get_table(resp_BASE, job_id=job_id, schema="result", treatment=treatment)
prop_table = get_table(prop_BASE, job_id=job_id, schema="result", treatment=treatment)
worker_code = None
if is_worker_available(worker_id, resp_table):
worker_code = get_db().execute(f"SELECT completion_code from {resp_table} where worker_id=?", [worker_id]).fetchone()[0]
if not worker_code:
worker_code = generate_completion_code(resp_BASE, job_id)
if is_worker_available(worker_id, prop_table):
worker_code = get_db().execute(f"SELECT completion_code from {prop_table} where worker_id=?", [worker_id]).fetchone()[0]
if not worker_code:
worker_code = generate_completion_code(prop_BASE, job_id)
return worker_code
def handle_index(treatment, resp_only=None, prop_only=None):
job_id = request.args.get("job_id", "na")
worker_id = request.args.get("worker_id", "na")
max_judgments = None
try:
max_judgments = int(request.args.get("max_judgments", "0"))
except ValueError:
pass
previous_worker_code = get_previous_worker_code(job_id, worker_id, treatment)
app.logger.debug(f"handle_index: job_id: {job_id}, worker_id: {worker_id}")
is_proposer = check_is_proposer_next(job_id, worker_id, treatment, max_judgments=max_judgments, resp_only=resp_only, prop_only=prop_only)
table_all = get_table(BASE, "all", schema=None)
con = get_db()
if previous_worker_code is None:
if resp_only:
return redirect(url_for(f"{treatment}.resp.index", **request.args))
elif prop_only:
return redirect(url_for(f"{treatment}.prop.index", **request.args))
else:
if is_proposer:
return redirect(url_for(f"{treatment}.prop.index", **request.args))
else:
return redirect(url_for(f"{treatment}.resp.index", **request.args))
else:
flash("You already completed the main task!")
if resp_BASE in previous_worker_code:
return render_template("txx/resp.done.html", worker_code=previous_worker_code)
else:
return render_template("txx/prop.done.html", worker_code=previous_worker_code)
def handle_index_feedback(treatment, base_treatment):
job_id = request.args.get("job_id", "na")
worker_id = request.args.get("worker_id", "na")
max_judgments = None
try:
max_judgments = int(request.args.get("max_judgments", "0"))
except ValueError:
pass
previous_worker_code = get_previous_worker_code(job_id, worker_id, base_treatment)
app.logger.debug(f"handle_index: job_id: {job_id}, worker_id: {worker_id}")
is_proposer = check_is_proposer_next(job_id, worker_id, treatment, max_judgments=max_judgments)
table_all = get_table(BASE, "all", schema=None)
con = get_db()
if table_exists(con, table_all):
with con:
res = con.execute(f"SELECT * from {table_all} WHERE worker_id=?", (worker_id,)).fetchone()
# if res:
# flash(f"You already took part on this survey. Thank you for your participation")
# return render_template("error.html")
if previous_worker_code is None:
if is_proposer:
return redirect(url_for(f"{treatment}.prop.index", **request.args))
else:
return redirect(url_for(f"{treatment}.resp.index", **request.args))
else:
if prop_BASE in previous_worker_code:
return redirect(url_for(f"{treatment}.prop.index", **request.args))
else:
return redirect(url_for(f"{treatment}.resp.index", **request.args))
def _process_judgments(signal, payload, job_id, job_config, treatment, auto_finalize=False):
"""
:param signal: (str)
:param payload: (dict)
:param job_id: (int|str)
:param job_config: (JobConfig)
:param auto_finalize (bool)
"""
error_happened = False
app.logger.debug(f"_process_judgments: {signal}, job_id: {job_id}, auto_finalize: {auto_finalize}")
with app.app_context():
try:
if signal == "new_judgments":
judgments_count = payload['judgments_count']
fig8 = FigureEight(job_id, job_config["api_key"])
for idx in range(judgments_count):
if auto_finalize == True:
try:
con = get_db("RESULT")
worker_judgment = payload['results']['judgments'][idx]
worker_id = worker_judgment["worker_id"]
app.logger.debug(f"_process_judgments: {signal}, job_id: {job_id}, worker_id: {worker_id}")
is_responder = False
is_proposer = False
table_resp = get_table(resp_BASE, job_id=job_id, schema="result", treatment=treatment)
table_prop = get_table(prop_BASE, job_id=job_id, schema="result", treatment=treatment)
with con:
if table_exists(con, table_resp):
res = con.execute(f"SELECT * from {table_resp} WHERE job_id=? and worker_id=?", (job_id, worker_id)).fetchone()
if res:
is_responder = True
if not is_responder and table_exists(con, table_prop):
res = con.execute(f"SELECT * from {table_prop} WHERE job_id=? and worker_id=?", (job_id, worker_id)).fetchone()
if res:
is_proposer= True
if is_responder:
finalize_resp(job_id=job_id, worker_id=worker_id, treatment=treatment)
elif is_proposer:
finalize_round(job_id=job_id, prop_worker_id=worker_id, treatment=treatment)
else:
app.logger.error(f"Error: unknown worker_id: {worker_id} for job_id: {job_id}")
except Exception as err:
if not error_happened:
app.log_exception(err)
error_happened = True
else:
worker_judgment = payload['results']['judgments'][idx]
worker_id = worker_judgment["worker_id"]
pay_worker_bonus(job_id, worker_id, fig8)
elif signal == "unit_complete":
judgments_count = payload['judgments_count']
fig8 = FigureEight(job_id, job_config["api_key"])
for idx in range(judgments_count):
if auto_finalize == False:
worker_judgment = payload['results']['judgments'][idx]
worker_id = worker_judgment["worker_id"]
# PAY_WORKER won't pay someone twice.
pay_worker_bonus(job_id, worker_id, fig8)
#TODO: may process the whole unit here
pass
except Exception as err:
app.log_exception(err)
app.logger.debug(f"_process_judgments: {signal}, job_id: {job_id} - done")
def handle_webhook(treatment):
"""
request.args:
- job_id: job's id
- worker_id: worker's id
- synchron: Directly process data without puting in a queue for another thread
"""
app.logger.debug("handle_webhook")
sync_process = False
auto_finalize = False
sync_process = request.args.get("synchron", False)
form = request.form.to_dict()
if "signal" in form:
signal = form['signal']
if signal in {'unit_complete', 'new_judgments'}:
payload_raw = form['payload']
signature = form['signature']
payload = json.loads(payload_raw)
job_id = payload['job_id']
job_config = get_job_config(get_db("DB"), job_id)
payload_ext = payload_raw + job_config["api_key"]
verif_signature = hashlib.sha1(payload_ext.encode()).hexdigest()
if signature == verif_signature:
args = (signal, payload, job_id, job_config, treatment, auto_finalize)
if sync_process:
_process_judgments(*args)
else:
app.config["THREADS_POOL"].starmap_async(_process_judgments, [args])
else:
job_id = request.args.get("job_id")
worker_id = request.args.get("worker_id")
job_config = get_job_config(get_db("DB"), job_id)
auto_finalize = True
payload = {
"judgments_count": 1,
"job_id": job_id,
"results": {
"judgments": [
{
"job_id": job_id,
"worker_id": worker_id
}
]
}
}
args = ("new_judgments", payload, job_id, job_config, treatment, auto_finalize)
if sync_process:
_process_judgments(*args)
else:
app.config["THREADS_POOL"].starmap_async(_process_judgments, [args])
# flash("You may close this tab now and continue with the survey.")
# return render_template("info.html", job_id=job_id, webhook=True)
return Response(status=200)
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'IPGroup'
db.create_table('iprestrict_ipgroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('iprestrict', ['IPGroup'])
# Adding model 'IPRange'
db.create_table('iprestrict_iprange', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ip_group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['iprestrict.IPGroup'])),
('first_ip', self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39)),
('cidr_prefix_length', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('last_ip', self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39, null=True, blank=True)),
))
db.send_create_signal('iprestrict', ['IPRange'])
# Adding model 'Rule'
db.create_table('iprestrict_rule', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url_pattern', self.gf('django.db.models.fields.CharField')(max_length=500)),
('ip_group', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['iprestrict.IPGroup'])),
('action', self.gf('django.db.models.fields.CharField')(default='D', max_length=1)),
('rank', self.gf('django.db.models.fields.IntegerField')(blank=True)),
))
db.send_create_signal('iprestrict', ['Rule'])
# Adding model 'ReloadRulesRequest'
db.create_table('iprestrict_reloadrulesrequest', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('iprestrict', ['ReloadRulesRequest'])
def backwards(self, orm):
# Deleting model 'IPGroup'
db.delete_table('iprestrict_ipgroup')
# Deleting model 'IPRange'
db.delete_table('iprestrict_iprange')
# Deleting model 'Rule'
db.delete_table('iprestrict_rule')
# Deleting model 'ReloadRulesRequest'
db.delete_table('iprestrict_reloadrulesrequest')
models = {
'iprestrict.ipgroup': {
'Meta': {'object_name': 'IPGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'iprestrict.iprange': {
'Meta': {'object_name': 'IPRange'},
'cidr_prefix_length': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'first_ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['iprestrict.IPGroup']"}),
'last_ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True', 'blank': 'True'})
},
'iprestrict.reloadrulesrequest': {
'Meta': {'object_name': 'ReloadRulesRequest'},
'at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'iprestrict.rule': {
'Meta': {'ordering': "['rank', 'id']", 'object_name': 'Rule'},
'action': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_group': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['iprestrict.IPGroup']"}),
'rank': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'url_pattern': ('django.db.models.fields.CharField', [], {'max_length': '500'})
}
}
complete_apps = ['iprestrict']
|
data_stacked = data.stack()
|
"""
htmlgen
Kind of like HTMLGen, only much simpler. Like stan, only not. The
only important symbol that is exported is ``html``.
You create tags with attribute access. I.e., the ``A`` anchor tag is
``html.a``. The attributes of the HTML tag are done with keyword
arguments. The contents of the tag are the non-keyword arguments
(concatenated). You can also use the special ``c`` keyword, passing a
list, tuple, or single tag, and it will make up the contents (this is
useful because keywords have to come after all non-keyword arguments,
which is non-intuitive).
If the value of an attribute is None, then no attribute will be
inserted. So::
>>> html.a(href='http://www.yahoo.com', name=None, c='Click Here')
'<a href=\"http://www.yahoo.com\">Click Here</a>'
If a non-string is passed in, then ``webhelpers.escapes.html_escape``
is called on the value.
``html`` can also be called, and it will concatenate the string
representations of its arguments.
``html.comment`` will generate an HTML comment, like
``html.comment('comment text', 'and some more text')`` -- note that it
cannot take keyword arguments (because they wouldn't mean anything).
For cases where you cannot use a name (e.g., for the ``class``
attribute) you can append an underscore to the name, like
``html.span(class_='alert')``.
Examples::
>>> print html.html(
... html.head(html.title(\"Page Title\")),
... html.body(
... bgcolor='#000066',
... text='#ffffff',
... c=[html.h1('Page Title'),
... html.p('Hello world!')],
... ))
<html>
<head>
<title>Page Title</title>
</head>
<body text=\"#ffffff\" bgcolor=\"#000066\">
<h1>Page Title</h1><p>
Hello world!
</p>
</body>
</html>
>>> html.a(href='#top', c='return to top')
'<a href=\"#top\">return to top</a>'
>>> 1.4
1.4
.. note::
Should this return objects instead of strings? That would allow
things like ``html.a(href='foo')('title')``. Also, the objects
could have a method that shows that they are trully HTML, and thus
should not be further quoted.
However, in some contexts you can't use objects, you need actual
strings. But maybe we can just make sure those contexts don't
happen in webhelpers.
"""
from util import html_escape
__all__ = ['html']
def strify(s):
if s is None:
return ''
if not isinstance(s, basestring):
s = unicode(s)
if isinstance(s, unicode):
s = s.encode('ascii', 'xmlcharrefreplace')
return s
class UnfinishedComment:
def __call__(self, *args):
return '<!--%s-->' % '\n'.join(map(strify, args))
class Base:
comment = UnfinishedComment()
def __getattr__(self, attr):
if attr.startswith('__'):
raise AttributeError
attr = attr.lower()
return UnfinishedTag(attr)
def __call__(self, *args):
return ''.join(map(str, args))
def escape(self, *args):
return ''.join(map(html_escape, args))
def str(self, arg):
return strify(arg)
class UnfinishedTag:
def __init__(self, tag):
self._tag = tag
def __call__(self, *args, **kw):
return tag(self._tag, *args, **kw)
def __str__(self):
if self._tag in empty_tags:
return '<%s />' % self._tag
else:
return '<%s></%s>' % (self._tag, self._tag)
def tag(tag, *args, **kw):
if kw.has_key("c"):
if args:
raise TypeError(
"The special 'c' keyword argument cannot be used in "
"conjunction with non-keyword arguments")
args = kw["c"]
del kw["c"]
attrargs = []
for attr, value in kw.items():
if value is None:
continue
if attr.endswith('_'):
attr = attr[:-1]
attrargs.append(' %s="%s"' % (attr, html_escape(value)))
if not args and tag in empty_tags:
return '<%s%s />' % (tag, ''.join(attrargs))
else:
return '<%s%s>%s</%s>' % (
tag, ''.join(attrargs), ''.join(map(strify, args)),
tag)
# Taken from: http://www.w3.org/TR/REC-html40/index/elements.html
empty_tags = {}
for _t in ("area base basefont br col frame hr img input isindex "
"link meta param".split()):
empty_tags[_t] = None
block_level_tags = {}
for _t in ("applet blockquote body br dd div dl dt fieldset "
"form frameset head hr html iframe map menu noframes "
"noscript object ol optgroup p param script select "
"table tbody tfoot thead tr ul var"):
block_level_tags[_t] = None
html = Base()
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["D. Naudet"]
__license__ = "MIT"
__date__ = "15/09/2016"
from collections import namedtuple
import numpy as np
from silx.gui import qt as Qt
from .PlotModel import PlotTree
from ...model.TreeView import TreeView
from ...model.ModelDef import ModelRoles
from ...model.Model import Model, RootNode, Node
from ...project.Hdf5Nodes import H5GroupNode
from ...project.IntensityGroup import IntensityGroup
from ...widgets.Containers import GroupBox
from ...widgets.Input import StyledLineEdit
from ...widgets.XsocsPlot2D import XsocsPlot2D
from ...widgets.Buttons import FixedSizePushButon
try:
from silx.gui.plot.ImageRois import ImageRoiManager
except ImportError:
# TODO remove this import once the ROIs are added to the silx release.
from ...silx_imports.ImageRois import ImageRoiManager
IntensityViewEvent = namedtuple('IntensityViewEvent', ['roi', 'entries'])
class RectRoiWidget(Qt.QWidget):
sigRoiApplied = Qt.Signal(object)
def __init__(self, roiManager, parent=None):
# TODO :
# support multiple ROIs then batch them
super(RectRoiWidget, self).__init__(parent)
self.__roiToolBar = roiToolBar = roiManager.toolBar(rois=['rectangle'],
options=['show'])
roiToolBar.setMovable(False)
topLayout = Qt.QVBoxLayout(self)
grpBox = GroupBox('ROI')
layout = Qt.QGridLayout(grpBox)
row = 0
layout.addWidget(roiToolBar, row, 0, 1, 2, Qt.Qt.AlignTop)
row += 1
self._xEdit = edit = StyledLineEdit(nChar=6)
edit.setReadOnly(True)
layout.addWidget(Qt.QLabel('x='), row, 0, Qt.Qt.AlignTop)
layout.addWidget(edit, row, 1, Qt.Qt.AlignTop)
row += 1
self._yEdit = edit = StyledLineEdit(nChar=6)
edit.setReadOnly(True)
layout.addWidget(Qt.QLabel('y='), row, 0, Qt.Qt.AlignTop)
layout.addWidget(edit, row, 1, Qt.Qt.AlignTop)
row += 1
self._wEdit = edit = StyledLineEdit(nChar=6)
edit.setReadOnly(True)
layout.addWidget(Qt.QLabel('w='), row, 0, Qt.Qt.AlignTop)
layout.addWidget(edit, row, 1, Qt.Qt.AlignTop)
row += 1
self._hEdit = edit = StyledLineEdit(nChar=6)
edit.setReadOnly(True)
layout.addWidget(Qt.QLabel('h='), row, 0, Qt.Qt.AlignTop)
layout.addWidget(edit, row, 1, Qt.Qt.AlignTop)
row += 1
hLayout = Qt.QHBoxLayout()
style = Qt.QApplication.style()
icon = style.standardIcon(Qt.QStyle.SP_DialogApplyButton)
self.__applyBn = applyBn = Qt.QToolButton()
applyBn.setToolTip('Apply ROI')
applyBn.setStatusTip('Apply ROI')
applyBn.setIcon(icon)
applyBn.setToolButtonStyle(Qt.Qt.ToolButtonTextBesideIcon)
applyBn.setText('To Q Space')
applyBn.setEnabled(False)
hLayout.addWidget(applyBn)
applyBn.clicked.connect(self.__applyRoi)
icon = style.standardIcon(Qt.QStyle.SP_DialogCloseButton)
self.__discardBn = discardBn = Qt.QToolButton()
discardBn.setToolTip('Discard ROI')
discardBn.setStatusTip('Discard ROI')
discardBn.setIcon(icon)
discardBn.setEnabled(False)
hLayout.addWidget(discardBn, Qt.Qt.AlignRight)
discardBn.clicked.connect(self.__discardRoi)
layout.addLayout(hLayout, row, 0, 1, 2, Qt.Qt.AlignCenter)
# topLayout.setSizeConstraint(Qt.QLayout.SetMinimumSize)
topLayout.addWidget(grpBox)
topLayout.addStretch(100)
# TODO : weakref
self.__roiManager = roiManager
roiManager.sigRoiDrawingFinished.connect(self.__roiDrawingFinished,
Qt.Qt.QueuedConnection)
roiManager.sigRoiRemoved.connect(self.__roiRemoved,
Qt.Qt.QueuedConnection)
roiManager.sigRoiMoved.connect(self.__roiMoved,
Qt.Qt.QueuedConnection)
def sizeHint(self):
return Qt.QSize(self.__roiToolBar.sizeHint().width() + 10, 0)
def __discardRoi(self, checked):
self.__roiManager.clear()
def __applyRoi(self, checked):
# At the moment we only support one roi at a time.
roi = self.__roiManager.rois
roiItem = self.__roiManager.roiItem(roi[0])
xMin = roiItem.pos[0]
xMax = xMin + roiItem.width
yMin = roiItem.pos[1]
yMax = yMin + roiItem.height
self.sigRoiApplied.emit([xMin, xMax, yMin, yMax])
def __roiDrawingFinished(self, event):
self.__display(event['xdata'], event['ydata'])
self.__discardBn.setEnabled(True)
self.__applyBn.setEnabled(True)
def __clear(self):
self._xEdit.clear()
self._yEdit.clear()
self._wEdit.clear()
self._hEdit.clear()
def __display(self, xData, yData):
xMin, xMax = xData[0], xData[2]
if xMax < xMin:
xMin, xMax = xMax, xMin
yMin, yMax = yData[0], yData[1]
if yMax < yMin:
yMin, yMax = yMax, yMin
self._xEdit.setText(str(xMin))
self._yEdit.setText(str(yMin))
self._wEdit.setText(str(xMax - xMin))
self._hEdit.setText(str(yMax - yMin))
def __roiRemoved(self, name):
self.__clear()
self.__discardBn.setEnabled(False)
self.__applyBn.setEnabled(False)
def __roiMoved(self, event):
self.__display(event['xdata'], event['ydata'])
class IntensityTotalNode(H5GroupNode):
"""
Node displaying info about the number of entries selected.
"""
total = property(lambda self: self.__total)
samplePos = property(lambda self: self.__samplePos)
icons = None
def __init__(self, **kwargs):
super(IntensityTotalNode, self).__init__(**kwargs)
# TODO : check item type
self.nodeName = 'Total'
self.__total = None
self.__samplePos = None
self.__notifyModel = True
def _loadChildren(self):
iGroup = IntensityGroup(self.h5File, self.h5Path)
iItems = iGroup.getIntensityItems()
children = []
for iItem in iItems:
if iItem.entry == 'Total':
continue
iNode = IntensityViewItemNode(iItem)
children.append(iNode)
self.nodeName = 'Total {0} / {0}'.format(len(children))
return children
def _childInternalDataChanged(self, sender, *args):
super(IntensityTotalNode, self)._childInternalDataChanged(sender,
*args)
if sender.parent() != self:
return
if self.__notifyModel:
self.__getTotal()
self.sigInternalDataChanged.emit([0])
def __getTotal(self):
total = None
samplePos = None
nSelected = 0
childCount = self.childCount()
for childIdx in range(childCount):
child = self.child(childIdx)
if child.checkState == Qt.Qt.Unchecked:
continue
nSelected += 1
intensity, pos = child.item.getScatterData()
if total is None:
total = intensity
samplePos = pos
else:
total += intensity
blocked = self.blockSignals(True)
self.nodeName = 'Total {0} / {1}'.format(nSelected, childCount)
self.__total = total
self.__samplePos = samplePos
self.blockSignals(blocked)
def scatterData(self):
if self.total is None:
self.__getTotal()
return self.total, self.samplePos
def getSelectedEntries(self):
"""
Returns the list of entries, the list of selected entries indices,
and the list of unselected entries indices.
:return:
"""
selected = []
unselected = []
entries = []
for childIdx in range(self.childCount()):
child = self.child(childIdx)
entries.append(child.item.entry)
if child.checkState == Qt.Qt.Unchecked:
unselected.append(childIdx)
else:
selected.append(childIdx)
return entries, selected, unselected
def selectAll(self):
"""
Selects all entries.
:return:
"""
# blocked = self.blockSignals(True)
self.__notifyModel = False
for childIdx in range(self.childCount()):
child = self.child(childIdx)
child.setCheckState(Qt.Qt.Checked)
# self.blockSignals(blocked)
self.__getTotal()
self.__notifyModel = True
self._notifyDataChange()
def unselectAll(self):
"""
Unselects all entries.
:return:
"""
# blocked = self.blockSignals(True)
self.__notifyModel = False
for childIdx in range(self.childCount()):
child = self.child(childIdx)
child.setCheckState(Qt.Qt.Unchecked)
# self.blockSignals(blocked)
self.__getTotal()
self.__notifyModel = True
self._notifyDataChange()
class IntensityViewItemNode(Node):
checkable = True
item = property(lambda self: self.__item)
def __init__(self, iItem, **kwargs):
super(IntensityViewItemNode, self).__init__(**kwargs)
# TODO : check item type
self.__item = iItem
self.nodeName = str(iItem.projectRoot().shortName(iItem.entry))
self.setCheckState(Qt.Qt.Checked)
self.setData(IntensityModelColumns.AngleColumn,
iItem.entry,
Qt.Qt.ToolTipRole)
def scatterData(self):
return self.item.getScatterData()
class IntensityModelColumns(object):
AngleColumn = 0
ColumnNames = ['Angle']
class IntensityRootNode(RootNode):
"""
Root node for the FitModel
"""
ColumnNames = IntensityModelColumns.ColumnNames
class IntensityModel(Model):
"""
Model displaying a FitH5 file contents.
"""
RootNode = IntensityRootNode
ModelColumns = IntensityModelColumns
ColumnsWithDelegates = None
def __iTotalNode(self):
iTotalIndex = self.index(0, 0)
return iTotalIndex.data(ModelRoles.InternalDataRole)
def getSelectedEntries(self):
iTotalNode = self.__iTotalNode()
if iTotalNode is None:
return []
return iTotalNode.getSelectedEntries()
def selectAll(self):
iTotalNode = self.__iTotalNode()
if iTotalNode is None:
return []
iTotalNode.selectAll()
def unselectAll(self):
iTotalNode = self.__iTotalNode()
if iTotalNode is None:
return []
iTotalNode.unselectAll()
class IntensityTree(TreeView):
sigCurrentChanged = Qt.Signal(object)
def __init__(self, intensityGroupItem, **kwargs):
super(IntensityTree, self).__init__(**kwargs)
model = IntensityModel()
iGroupNode = IntensityTotalNode(h5File=intensityGroupItem.filename,
h5Path=intensityGroupItem.path)
model.appendGroup(iGroupNode)
self.setModel(model)
self.setShowUniqueGroup(True)
self.setExpanded(self.model().index(0, 0), True)
model.startModel()
def currentChanged(self, current, previous):
super(IntensityTree, self).currentChanged(current, previous)
node = current.data(ModelRoles.InternalDataRole)
if not node:
return
self.sigCurrentChanged.emit(node)
class IntensityView(Qt.QMainWindow):
sigProcessApplied = Qt.Signal(object)
plot = property(lambda self: self.__plotWindow)
def __init__(self,
intensityGroup,
parent=None,
**kwargs):
super(IntensityView, self).__init__(parent, **kwargs)
self.setWindowTitle('[XSOCS] {0}:{1}'.format(intensityGroup.filename,
intensityGroup.path))
self.__displayedNode = None
self.__selectedPoint = None
self.__plotWindow = plotWindow = XsocsPlot2D()
plotWindow.setShowMousePosition(True)
plotWindow.setShowSelectedCoordinates(True)
plotWindow.sigPointSelected.connect(self.__slotPointSelected)
selector = Qt.QWidget()
layout = Qt.QVBoxLayout(selector)
# TODO : check item type
self.__iGroup = intensityGroup
self.__tree = tree = IntensityTree(intensityGroup, parent=self)
tree.model().dataChanged.connect(self.__slotModelDataChanged)
tree.sigCurrentChanged.connect(self.__slotItemSelected)
layout.addWidget(tree)
bnLayout = Qt.QHBoxLayout()
selAllBn = FixedSizePushButon('Select All')
selNoneBn = FixedSizePushButon('Clear')
selAllBn.clicked.connect(tree.model().selectAll)
selNoneBn.clicked.connect(tree.model().unselectAll)
bnLayout.addWidget(selAllBn)
bnLayout.addWidget(selNoneBn)
layout.addLayout(bnLayout)
dock = Qt.QDockWidget(self)
dock.setWidget(selector)
features = dock.features() ^ Qt.QDockWidget.DockWidgetClosable
dock.setFeatures(features)
self.addDockWidget(Qt.Qt.LeftDockWidgetArea, dock)
self.__roiManager = roiManager = ImageRoiManager(plotWindow)
rectRoiWidget = RectRoiWidget(roiManager)
rectRoiWidget.sigRoiApplied.connect(self.__slotRoiApplied)
dock = Qt.QDockWidget(self)
dock.setWidget(rectRoiWidget)
features = dock.features() ^ Qt.QDockWidget.DockWidgetClosable
dock.setFeatures(features)
self.addDockWidget(Qt.Qt.RightDockWidgetArea, dock)
profileWid = Qt.QWidget()
profileLayout = Qt.QHBoxLayout(profileWid)
self.__profilePlot = profilePlot = XsocsPlot2D()
profilePlot.setKeepDataAspectRatio(False)
profileLayout.addWidget(profilePlot, 10)
plotTree = PlotTree(profilePlot)
profileLayout.addWidget(plotTree)
dock = Qt.QDockWidget(self)
dock.setWidget(profileWid)
features = dock.features() ^ Qt.QDockWidget.DockWidgetClosable
dock.setFeatures(features)
self.addDockWidget(Qt.Qt.BottomDockWidgetArea, dock)
self.setCentralWidget(plotWindow)
def __slotModelDataChanged(self, topLeft, bottomRight, roles=None):
nodeL = topLeft.data(ModelRoles.InternalDataRole)
nodeR = bottomRight.data(ModelRoles.InternalDataRole)
if nodeL != nodeR:
print('Multiple selection not supported yet.')
return
if nodeL is None:
return
if not isinstance(nodeL, IntensityTotalNode):
return
# else: the total intensity has changed.
self.__slotPointSelected(self.__selectedPoint)
if nodeL == self.__displayedNode:
self.__slotItemSelected(nodeL)
def __slotItemSelected(self, node):
"""
Slot called when an item is selected in the tree view. Updates the
scatter plot accordingly.
:param item:
:return:
"""
self.__displayedNode = node
intensity, positions = node.scatterData()
title = node.nodeName
if intensity is None:
self.__plotWindow.clear()
self.__plotWindow.setGraphTitle(title)
return
self.setPlotData(positions.pos_0, positions.pos_1, intensity, title)
def setPlotData(self, x, y, data, title=None):
"""
Displays the scatter plot.
:param x:
:param y:
:param data:
:return:
"""
plot = self.__plotWindow
plot.setPlotData(x, y, data)
plot.setGraphTitle(title)
def __slotPointSelected(self, point):
"""
Slot called when a point is selected on the intensity map.
:param point:
:return:
"""
plot = self.__profilePlot
plot.setGraphTitle(None)
plot.clear()
if point is None:
return
iGroup = self.__iGroup
entries, selected, unselected =\
self.__tree.model().getSelectedEntries()
nEntries = len(entries)
xsocsH5 = iGroup.projectRoot().xsocsH5
angles = np.ndarray(shape=(nEntries, ))
intensities = np.ndarray(shape=(nEntries,))
# TODO : error if selected not in iItems (isnt supposed to happen...)
# TODO : make sure the angles are sorted?
for entryIdx, entry in enumerate(entries):
item = iGroup.getIntensityItem(entry)
intensities[entryIdx] = item.getPointValue(point.xIdx)
angles[entryIdx] = xsocsH5.scan_angle(entry)
title = 'Profile @ ({0:.7g}, {1:.7g})'.format(point.x, point.y)
plot.addCurve(angles, intensities, legend='All')
plot.setGraphTitle(title)
if selected:
plot.addCurve(angles[selected],
intensities[selected],
legend='Selected ({0}/{1}'.format(len(selected),
len(entries)))
for unselIdx in unselected:
plot.addXMarker(angles[unselIdx],
legend=entries[unselIdx],
color='red')
self.__selectedPoint = point
def __slotRoiApplied(self, roi):
"""
Slot called when the ROI is applied.
:param roi:
:return:
"""
entries, selected, _ =\
self.__tree.model().getSelectedEntries()
selEntries = [entries[idx] for idx in selected]
event = IntensityViewEvent(roi=roi, entries=selEntries)
self.sigProcessApplied.emit(event)
if __name__ == '__main__':
pass
|
import time
import sys
import array
import struct
import serial
from ga144 import GA144
class FlashReader(GA144):
def __init__(self, port, dumpfile, length):
du = open(dumpfile, "wb")
length = int(length, 0)
print 'length', length
GA144.__init__(self)
self.loadprogram("flashread.ga")
ser = self.download(port, 460800, listen = False)
s = None
while s != chr(0xa5):
s = ser.read(1)
print 'synced'
d = s + ser.read(4 * (length/2 + 1))
d = "".join([c for i,c in enumerate(d) if i%4 in (1,2)])
print "Manufacturer: %02x" % ord(d[0])
print "Device ID: %02x" % ord(d[1])
du.write(d[2:])
class FlashWriter(GA144):
def __init__(self, port, args):
GA144.__init__(self)
writername = "flashwrite.ga"
if "--winbond" in args:
writername = "flashwrite-winbond.ga"
args.remove("--winbond")
flashfile = args[0]
self.loadprogram(writername)
# print "\n".join(self.node['705'].listing)
im = open(flashfile, "rb")
offset = 0
ser = serial.Serial(port, 460800)
while True:
sector = im.read(4096)
if len(sector) == 0:
break
print "%4dK " % (offset / 1024),
payload = [offset / 64] + array.array('H', sector).tolist()
self.stow(payload)
self.send(ser)
ser.read(8)
print "OK"
offset += 4096
def recites(self):
# Return the recite nodes, in head-to-tail order
return [n for n in self.order if 'recite' in self.node[n].attr][::-1]
def stow(self, payload):
C = 61
assert (61 * len(self.recites())) >= len(payload)
for i,n in zip(range(0, len(payload), C), self.recites()):
self.node[n].load_pgm[3:] = payload[i:i + C]
if __name__ == '__main__':
port = sys.argv[1]
op = sys.argv[2]
if op == 'read':
FlashReader(port, *sys.argv[3:])
elif op == 'write':
FlashWriter(port, sys.argv[3:])
else:
print "unknown operation '%s'" % op
sys.exit(1)
|
# -*- coding: utf-8 -*-
"""Veil domain entity."""
import sys
from enum import Enum, IntEnum
from typing import List, Optional
from uuid import uuid4
try:
from aiohttp.client_reqrep import ClientResponse
except ImportError: # pragma: no cover
ClientResponse = None
from ..base import (VeilApiObject, VeilCacheConfiguration,
VeilRestPaginator, VeilRetryConfiguration)
from ..base.utils import (BoolType, NullableBoolType, NullableStringType,
NullableUuidStringType, StringType,
UuidStringType, VeilConfiguration,
argument_type_checker_decorator)
class DomainConfiguration(VeilConfiguration):
"""Simplified Veil domain description.
Structure for VeiL domain copy.
(resource_pool) and (node + datapool) are mutually exclusive parameters -> see mutually_check method # noqa: E501
Attributes:
verbose_name: domain verbose name.
resource_pool: VeiL resource pool id(uuid).
node: VeiL node id(uuid).
datapool: VeiL data-pool id(uuid).
parent: VeiL parent domain id(uuid).
thin: created domain should be a thin clone of parent domain.
"""
verbose_name = StringType('verbose_name')
resource_pool = NullableUuidStringType('resource_pool')
node = NullableUuidStringType('node')
datapool = NullableUuidStringType('datapool')
parent = UuidStringType('parent')
thin = BoolType('thin')
def __init__(self, verbose_name: str,
parent: str,
resource_pool: Optional[str] = None,
node: Optional[str] = None,
datapool: Optional[str] = None,
thin: bool = True,
count: int = 1
) -> None:
"""Please see help(DomainConfiguration) for more info."""
self.resource_pool = resource_pool
self.node = node
self.datapool = datapool
self.mutually_check()
self.verbose_name = verbose_name
self.parent = parent
self.thin = thin
self.count = count
self.domains_ids = [str(uuid4()) for i in range(count)]
def mutually_check(self):
"""Validate mutually exclusive parameters.
(resource_pool) and (node + datapool) are mutually exclusive parameters, so only one pair can be filled. # noqa: E501
"""
if self.resource_pool and (self.node or self.datapool):
raise ValueError(
'{} and ({} + {}) are mutually exclusive parameters, so only one pair can be filled.'.format( # noqa: E501
self.resource_pool, self.node, self.datapool))
class DomainUpdateConfiguration(VeilConfiguration):
"""Simplified Veil DomainUpdate description.
Attributes:
verbose_name: domain verbose name.
description: domain description.
os_type: domain os_type.
os_version: domain os_version.
tablet: tablet switcher.
start_on_boot: start on boot switcher.
spice_stream: spice stream switcher.
"""
verbose_name = NullableStringType('verbose_name')
description = NullableStringType('description')
os_type = NullableStringType('os_type')
os_version = NullableStringType('os_version')
tablet = NullableBoolType('tablet')
start_on_boot = NullableBoolType('start_on_boot')
spice_stream = NullableBoolType('spice_stream')
def __init__(self,
verbose_name: Optional[str] = None,
description: Optional[str] = None,
os_type: Optional[str] = None,
os_version: Optional[str] = None,
tablet: Optional[bool] = None,
start_on_boot: Optional[bool] = None,
spice_stream: Optional[bool] = None):
"""Please see help(DomainUpdateConfiguration) for more info."""
self.verbose_name = verbose_name
self.description = description
self.os_type = os_type
self.os_version = os_version
self.tablet = tablet
self.start_on_boot = start_on_boot
self.spice_stream = spice_stream
self.no_empty_check()
def no_empty_check(self):
"""Checking that there is something to change.""" # noqa: D401
if not self.notnull_attrs:
raise ValueError(
'All attributes are empty. Choose at least something.')
class MultiManagerAction(Enum):
"""Possible options for VeiL multi-manager."""
START = 'start'
SHUTDOWN = 'shutdown'
SUSPEND = 'suspend'
REBOOT = 'reboot'
RESUME = 'resume'
DELETE = 'delete'
MIGRATE = 'migrate'
class DomainPowerState(IntEnum):
"""Veil domain power states."""
UNDEFINED = 0
OFF = 1
SUSPENDED = 2
ON = 3
class DomainOsType:
"""Possible domain os types."""
WIN = 'Windows'
LINUX = 'Linux'
OTHER = 'Other'
class DomainTcpUsb:
"""Domain TCP USB device.
Attributes:
host: IP адрес клиента с которого будет проброшено устройство.
service: Порт клиента с которого будет проброшено устройство.
"""
def __init__(self, host: str, service: int):
"""Please see help(DomainTcpUsb) for more info."""
self.host = host
self.service = service
class VeilGuestAgentCmd(Enum):
"""Veil guest agent commands."""
LOCK_SCREEN = 'lock_screen'
LOGOFF = 'logoff'
SHUTDOWN = 'shutdown'
LOGIN = 'login'
HIBERNATE = 'hibernate'
SET_NUMBER_OF_CPUS = 'set_number_of_cpus'
ECHO = 'echo'
LIFECYCLE_EVENT = 'lifecycle_event'
MEMORY_STATS = 'memory_stats'
DISK_USAGES = 'disk_usages'
FREE_RAM = 'free_ram'
API_VERSION = 'api_version'
FQDN = 'fqdn'
USER_INFO = 'user_info'
TIMEZONE = 'timezone'
INFO = 'info'
OS_INFO = 'os_info'
APP_LIST = 'app_list'
class DomainGuestUtils:
"""Guest utils attributes."""
def __init__(self,
veil_state: bool = False,
qemu_state: bool = False,
version: Optional[str] = None,
hostname: Optional[str] = None,
ipv4: Optional[list] = None,
**_) -> None:
"""Please see help(DomainGuestUtils) for more info."""
self.veil_state = veil_state
self.qemu_state = qemu_state
self.version = version
self.hostname = hostname
self.ipv4 = ipv4
@property
def first_ipv4_ip(self):
"""First ipv4 address in list."""
if not isinstance(self.ipv4, list):
return None
if len(self.ipv4) > 0:
return self.ipv4[0]
@property
def apipa_problem(self):
"""All ipv4 addresses in 169.254.*.*.
Regex expression works dramatically slower.
"""
apipa_case = '169.254.'
if not isinstance(self.ipv4, list):
return None
if len(self.ipv4) == 0:
return None
return all(isinstance(ip, str) and apipa_case in ip for ip in self.ipv4)
class DomainBackupConfiguration(VeilConfiguration):
"""Domain backup options.
Attributes:
datapool: datapool_uid on which the backup will be created.
backup: backup_uid of previous backup file with activated can_be_incremental.
can_be_incremental: create a backup with the possibility of further increment.
increment: increments the last of the available backups with the ability to increment,
if there are none, then creates a new incremental backup.
exclude_iso: exclude attached to Domain (VM) ISO from backup.
Note:
1. for thin-clones backup mechanism prohibited on VeiL ECP.
2. datapool may be set only if backup is empty
"""
datapool = NullableUuidStringType('datapool')
backup = NullableUuidStringType('datapool')
can_be_incremental = BoolType('can_be_incremental')
increment = BoolType('increment')
exclude_iso = BoolType('exclude_iso')
def __init__(self,
datapool: Optional[str] = None,
backup: Optional[str] = None,
can_be_incremental: Optional[bool] = False,
increment: Optional[bool] = False,
exclude_iso: Optional[bool] = True,
**_) -> None:
"""Please see help(VeilDomain) for more info.
:param _: may be save_files, limit_days - only for scheduled jobs. Ignored in client.
"""
self.datapool = datapool
self.backup = backup
self.can_be_incremental = can_be_incremental
self.increment = increment
self.exclude_iso = exclude_iso
class VeilDomain(VeilApiObject):
"""Veil domain entity.
Attributes:
client: https_client instance.
api_object_id: VeiL domain id(uuid).
resource_pool: Veil resource-pool id(uuid) for extra filtering.
cluster_id: VeiL cluster id(uuid) for extra filtering.
node_id: VeiL node id(uuid) for extra filtering.
data_pool_id: VeiL data-pool id(uuid) for extra filtering.
template: VeiL template sign. Int because of ujson limitations (only str or int).
"""
__API_OBJECT_PREFIX = 'domains/'
def __init__(self, client,
api_object_id: Optional[str] = None,
resource_pool: Optional[str] = None,
cluster_id: Optional[str] = None,
node_id: Optional[str] = None,
data_pool_id: Optional[str] = None,
template: Optional[int] = None,
retry_opts: Optional[VeilRetryConfiguration] = None,
cache_opts: Optional[VeilCacheConfiguration] = None) -> None:
"""Please see help(VeilDomain) for more info.
Arguments:
template: Boolean int (0|1).
"""
super().__init__(client,
api_object_id=api_object_id,
api_object_prefix=self.__API_OBJECT_PREFIX,
retry_opts=retry_opts,
cache_opts=cache_opts)
self.remote_access = None
self.verbose_name = None
self.node = None
self.parent = None
self.remote_access_port = None
self.real_remote_access_port = None
self.graphics_password = None
self.template = template
self.os_type = None
self.os_version = None
self.user_power_state = 0
self.guest_utils = None
self.cpu_topology = None
self.cpu_count = None
self.tags = None
self.owners = None
self.sound = None
self.cpu_type = None
self.memory_count = None
self.vdisks = None
self.video = None
self.is_ova = None
self.cdroms = None
# resource_pool, cluster_id, node_id or data_pool_id can be UUID.
self.resource_pool = str(resource_pool) if resource_pool else None
self.cluster_id = str(cluster_id) if cluster_id else None
self.node_id = str(node_id) if node_id else None
self.data_pool_id = str(data_pool_id) if data_pool_id else None
@property
def cpu_count_prop(self):
"""Get cpu_count from cpu_topology dict or self.cpu_count."""
if isinstance(self.cpu_topology, dict) and self.cpu_topology.get('cpu_count'):
return self.cpu_topology['cpu_count']
return self.cpu_count
@property
def parent_name(self):
"""Get parent Domain name."""
if isinstance(self.parent, dict):
return self.parent.get('verbose_name')
@property
def parent_uuid(self):
"""Get parent Domain UUID."""
if isinstance(self.parent, dict):
return self.parent.get('id')
@property
def guest_agent(self):
"""Verbose domain guest utils."""
return DomainGuestUtils(**self.guest_utils) if self.guest_utils else None
@property
def qemu_state(self):
"""Guest agent qemu state."""
return self.guest_agent.qemu_state if self.guest_agent else None
@property
def first_ipv4(self):
"""First ipv4 address."""
return self.guest_agent.first_ipv4_ip if self.guest_agent else None
@property
def power_state(self):
"""Verbose domain power state."""
return DomainPowerState(self.user_power_state)
@property
def powered(self):
"""Domain is power state is ON."""
return self.power_state == DomainPowerState.ON
@property
def os_windows(self):
"""Domain (VM) has Windows OS."""
return self.os_type == DomainOsType.WIN
@property
def hostname(self):
"""Guest utils hostname value."""
return self.guest_agent.hostname if self.guest_agent else None
@property
def apipa_problem(self):
"""Guest utils apipa_problem value."""
return self.guest_agent.apipa_problem if self.guest_agent else None
@property
async def in_ad(self):
"""Windows domain (VM) already in AD."""
print(
'\nWARNING: in_ad scheduled for removal in 2.2.2 '
'use is_in_ad method.\n',
file=sys.stderr,
)
return await self.is_in_ad()
async def is_in_ad(self) -> bool:
"""Windows domain (VM) already in AD."""
qemu_guest_command = {'path': 'powershell.exe',
'arg': ['(Get-WmiObject Win32_ComputerSystem).PartOfDomain']}
response = await self.guest_command(qemu_cmd='guest-exec', f_args=qemu_guest_command)
if response.status_code == 200 and response.value:
guest_exec_response = response.value.get('guest-exec', dict()).get('out-data', 'False') # noqa: E501
if isinstance(guest_exec_response, str):
guest_exec_response = guest_exec_response.strip()
return guest_exec_response == 'True'
return False
def action_url(self, action: str) -> str:
"""Build domain action full url."""
return self.api_object_url + action
@argument_type_checker_decorator
async def guest_command(self, veil_cmd: VeilGuestAgentCmd = None,
qemu_cmd: str = None,
f_args: dict = None,
timeout: int = 5):
"""Guest agent commands endpoint."""
url = self.api_object_url + 'guest-command/'
body = dict()
if veil_cmd:
body['veil_cmd'] = veil_cmd.value
if qemu_cmd:
body['qemu_cmd'] = qemu_cmd
if f_args:
body['fargs'] = f_args
if timeout:
body['timeout'] = timeout
response = await self._post(url=url, json_data=body)
return response
async def set_hostname(self, hostname: str = None):
"""Set domain hostname on VeiL ECP."""
url = self.api_object_url + 'set-hostname/'
domain_hostname = hostname if hostname else self.verbose_name
body = dict(hostname=domain_hostname)
response = await self._post(url=url, json_data=body)
return response
async def add_to_ad(self,
domain_name: str,
login: str,
password: str,
restart: bool = True,
new_name: Optional[str] = None) -> 'ClientResponse':
"""Add domain (VM) to AD.
domain_name: str Specifies the domain to which the computers are added
login: str AD user which can add VM to domain
password: str AD user password
restart: bool restart VM that was added to the domain or workgroup
new_name: str Specifies a new name for the computer in the new domain
"""
url = self.api_object_url + 'add-to-ad/'
body = dict(domainname=domain_name, login=login, password=password)
if restart:
body['restart'] = 1
if new_name:
body['newname'] = new_name
response = await self._post(url=url, json_data=body)
return response
async def rm_from_ad(self,
login: str,
password: str,
restart: bool = True) -> 'ClientResponse':
"""Remove domain (VM) from AD."""
url = self.api_object_url + 'rm-from-ad/'
body = dict(login=login, password=password)
if restart:
body['restart'] = 1
response = await self._post(url=url, json_data=body)
return response
async def add_to_ad_group(self, computer_name: str,
domain_username: str,
domain_password: str,
cn_pattern: str):
"""Add a domain to one or more Active Directory groups."""
print(
'\nWARNING: add_to_ad_group method scheduled for removal in 2.3 '
'use your own LDAP command, like '
'extend.microsoft.add_members_to_groups\n',
file=sys.stderr,
)
credential_value = '$(New-Object System.Management.Automation.PsCredential("{}", \
$(ConvertTo-SecureString -String "{}" -AsPlainText -Force)))'.format( # noqa: E501
domain_username, domain_password)
get_computer_filter = "Get-ADComputer -Identity '{}' -Properties 'SID' -Credential {}".format(computer_name, # noqa: E501
credential_value) # noqa: E501
add_group_filter = "Add-ADPrincipalGroupMembership -MemberOf '{}' -Credential {}".format(cn_pattern, # noqa: E501
credential_value) # noqa: E501
qemu_guest_command = {'path': 'powershell.exe', 'arg': [get_computer_filter, '|', add_group_filter]} # noqa: E501
return await self.guest_command(qemu_cmd='guest-exec', f_args=qemu_guest_command)
async def attach_usb(self, action_type: Optional[str] = None,
usb: Optional[dict] = None,
usb_controller: Optional[dict] = None,
tcp_usb: Optional[DomainTcpUsb] = None,
no_task: bool = False):
"""Attach usb devices to VM."""
if action_type is None:
action_type = 'tcp_usb_device'
url = self.api_object_url + 'attach-usb/'
body = dict(action_type=action_type)
if usb and isinstance(usb, dict):
body['usb'] = usb
if usb_controller and isinstance(usb_controller, dict):
body['usb_controller'] = usb_controller
if tcp_usb:
body['tcp_usb'] = tcp_usb.__dict__
extra_params = {'async': 0} if no_task else None
response = await self._post(url=url, json_data=body, extra_params=extra_params)
return response
async def detach_usb(self, action_type: Optional[str] = None,
controller_order: Optional[int] = None,
usb: Optional[str] = None,
remove_all: bool = True):
"""Detach usb devices from VM."""
if action_type is None:
action_type = 'tcp_usb_device'
url = self.api_object_url + 'detach-usb/'
body = dict(action_type=action_type)
if controller_order and isinstance(controller_order, int):
body['controller_order'] = controller_order
if usb and isinstance(usb, str):
body['usb'] = usb
if remove_all:
body['remove_all'] = 1
response = await self._post(url=url, json_data=body)
return response
async def start(self, force: bool = False) -> 'ClientResponse':
"""Send domain action 'start'."""
url = self.action_url('start/')
body = dict(force=force)
response = await self._post(url=url, json_data=body)
return response
async def reboot(self, force: bool = False) -> 'ClientResponse':
"""Send domain action 'reboot'."""
url = self.action_url('reboot/')
body = dict(force=force)
response = await self._post(url=url, json_data=body)
return response
async def suspend(self, force: bool = False) -> 'ClientResponse':
"""Send domain action 'suspend'."""
url = self.action_url('suspend/')
body = dict(force=force)
response = await self._post(url=url, json_data=body)
return response
async def reset(self, force: bool = False) -> 'ClientResponse':
"""Send domain action 'reset'."""
url = self.action_url('reset/')
body = dict(force=force)
response = await self._post(url=url, json_data=body)
return response
async def shutdown(self, force: bool = False) -> 'ClientResponse':
"""Send domain action 'shutdown'."""
url = self.action_url('shutdown/')
body = dict(force=force)
response = await self._post(url=url, json_data=body)
return response
async def resume(self, force: bool = False) -> 'ClientResponse':
"""Send domain action 'resume'."""
url = self.action_url('resume/')
body = dict(force=force)
response = await self._post(url=url, json_data=body)
return response
async def remote_access_action(self, enable: bool = True) -> 'ClientResponse':
"""Send domain action 'remote-action'."""
url = self.api_object_url + 'remote-access/'
body = dict(remote_access=enable)
response = await self._post(url, json_data=body)
return response
async def enable_remote_access(self) -> 'ClientResponse':
"""Enable domain remote-access."""
return await self.remote_access_action(enable=True)
async def disable_remote_access(self) -> 'ClientResponse':
"""Disable domain remote-access."""
return await self.remote_access_action(enable=False)
@argument_type_checker_decorator
async def create(self, domain_configuration: DomainConfiguration) -> 'ClientResponse':
"""Run multi-create-domain on VeiL ECP."""
url = self.base_url + 'multi-create-domain/'
response = await self._post(url=url, json_data=domain_configuration.notnull_attrs)
return response
@argument_type_checker_decorator
async def update(self,
domain_update_configuration: DomainUpdateConfiguration) -> 'ClientResponse': # noqa: E501
"""Run VeiL ECP domain update endpoint."""
url = self.api_object_url
response = await self._put(url=url,
json_data=domain_update_configuration.notnull_attrs)
return response
async def update_verbose_name(self, verbose_name: str):
"""Interface for Veil ECP domain update endpoint."""
conf = DomainUpdateConfiguration(verbose_name=verbose_name)
return await self.update(domain_update_configuration=conf)
async def remove(self, full: bool = True, force: bool = False) -> 'ClientResponse':
"""Remove domain instance on VeiL ECP."""
url = self.action_url('remove/')
body = dict(full=full, force=force)
response = await self._post(url=url, json_data=body)
return response
async def list(self, with_vdisks: bool = True, # noqa: A003
paginator: VeilRestPaginator = None,
fields: List[str] = None,
params: dict = None) -> 'ClientResponse':
"""Get list of data_pools with node_id filter.
By default get only domains with vdisks.
"""
extra_params = dict(with_vdisks=int(with_vdisks))
if self.resource_pool:
extra_params['resource_pool'] = self.resource_pool
if self.cluster_id:
extra_params['cluster'] = self.cluster_id
if self.node_id:
extra_params['node'] = self.node_id
if self.data_pool_id:
extra_params['datapool'] = self.data_pool_id
if isinstance(self.template, bool):
# ujson can`t work with booleans
extra_params['template'] = int(self.template)
elif isinstance(self.template, int):
extra_params['template'] = self.template
# Additional request parameters
if fields and isinstance(fields, list):
extra_params['fields'] = ','.join(fields)
if params:
extra_params.update(params)
return await super().list(paginator=paginator, extra_params=extra_params)
async def __multi_manager(self, action: MultiManagerAction,
entity_ids: List[str],
full: bool,
force: bool) -> 'ClientResponse':
"""Multi manager with action.
Possible actions:
start
shutdown
suspend
reboot
resume
delete
migrate
"""
url = self.base_url + 'multi-manager/'
body = dict(full=full, force=force, entity_ids=entity_ids, action=action.value)
response = await self._post(url=url, json_data=body)
return response
async def multi_start(self, entity_ids: List[str],
full: bool = True,
force: bool = False) -> 'ClientResponse':
"""Multi start domain instance on VeiL ECP."""
return await self.__multi_manager(action=MultiManagerAction.START,
entity_ids=entity_ids,
full=full,
force=force)
async def multi_shutdown(self, entity_ids: List[str],
full: bool = True,
force: bool = False) -> 'ClientResponse':
"""Multi shutdown domain instance on VeiL ECP."""
return await self.__multi_manager(action=MultiManagerAction.SHUTDOWN,
entity_ids=entity_ids,
full=full,
force=force)
async def multi_suspend(self, entity_ids: List[str],
full: bool = True,
force: bool = False) -> 'ClientResponse':
"""Multi suspend domain instance on VeiL ECP."""
return await self.__multi_manager(action=MultiManagerAction.SUSPEND,
entity_ids=entity_ids,
full=full,
force=force)
async def multi_reboot(self, entity_ids: List[str],
full: bool = True,
force: bool = False) -> 'ClientResponse':
"""Multi reboot domain instance on VeiL ECP."""
return await self.__multi_manager(action=MultiManagerAction.REBOOT,
entity_ids=entity_ids,
full=full,
force=force)
async def multi_resume(self, entity_ids: List[str],
full: bool = True,
force: bool = False) -> 'ClientResponse': # noqa: E501
"""Multi resume domain instance on VeiL ECP."""
return await self.__multi_manager(action=MultiManagerAction.RESUME,
entity_ids=entity_ids,
full=full,
force=force)
async def multi_remove(self, entity_ids: List[str],
full: bool = True,
force: bool = False) -> 'ClientResponse':
"""Multi remove domain instance on VeiL ECP."""
return await self.__multi_manager(action=MultiManagerAction.DELETE,
entity_ids=entity_ids,
full=full,
force=force)
async def multi_migrate(self, entity_ids: List[str],
full: bool = True,
force: bool = False) -> 'ClientResponse':
"""Multi migrate domain instance on VeiL ECP."""
return await self.__multi_manager(action=MultiManagerAction.MIGRATE,
entity_ids=entity_ids,
full=full,
force=force)
async def backup(self, configuration: DomainBackupConfiguration):
"""Create domain backup."""
url = ''.join([self.base_url, 'backup/'])
data = configuration.notnull_attrs
data['domain'] = self.api_object_id
return await self._post(url=url, json_data=data)
async def show_backup(self, file_id: str):
"""A serialized VM representation from a VeiL ECP or OVA backup.""" # noqa: D401
url = ''.join([self.base_url, 'show-backup/'])
data = {'file': file_id}
return await self._post(url=url, json_data=data)
async def automated_restore(self, file_id: str, node_id: str, datapool_id: str = None):
"""Automatically restore VM from backup."""
url = ''.join([self.base_url, 'automated-restore/'])
data = {'file': file_id, 'node': node_id}
if datapool_id:
data['datapool'] = datapool_id
return await self._post(url=url, json_data=data)
|
#!/usr/bin/env python
#
# Jiao Lin <jiao.lin@gmail.com>
#
from mcvine.applications.InstrumentBuilder import build
components = ['source', 'sample', 'monitor']
App = build(components)
name = "sqekernel-test"
if __name__ == '__main__': App(name).run()
# End of file
|
import unittest
from centralized_pre_commit_conf.update_gitignore import get_updated_gitignore_content
GITIGNORE_INFO_TEXT = "# fervpierpvjepvjpvjepvjperjverpovpeorvpor"
class TestUpdateGitignore(unittest.TestCase):
def test_nothing(self):
text, mode = get_updated_gitignore_content("", set(["a", "b", "c"]), GITIGNORE_INFO_TEXT)
self.assertEqual(mode, "a")
self.assertEqual(
text,
f"""{GITIGNORE_INFO_TEXT}
a
b
c
""",
)
def test_something(self):
text, mode = get_updated_gitignore_content("d\ne\n", set(["a", "b", "c"]), GITIGNORE_INFO_TEXT)
self.assertEqual(mode, "a")
self.assertEqual(
text,
f"""
{GITIGNORE_INFO_TEXT}
a
b
c
""",
)
def test_old_cppc_data(self):
text, mode = get_updated_gitignore_content(
f"d\ne\n{GITIGNORE_INFO_TEXT}\nf\ng\n", set(["a", "b", "c"]), GITIGNORE_INFO_TEXT
)
self.assertEqual(mode, "w")
self.assertEqual(
text,
f"""d
e
{GITIGNORE_INFO_TEXT}
a
b
c
f
g
""",
)
def test_duplicated_old_cppc_data(self):
text, mode = get_updated_gitignore_content(
f"""d
e
{GITIGNORE_INFO_TEXT}
f
g
{GITIGNORE_INFO_TEXT}
h
i
""",
set(["a", "b", "c"]),
GITIGNORE_INFO_TEXT,
)
self.assertEqual(mode, "w")
self.assertEqual(
text,
f"""d
e
{GITIGNORE_INFO_TEXT}
a
b
c
f
g
h
i
""",
)
def test_real_data(self):
text, mode = get_updated_gitignore_content(
f""".idea/
*.egg-info/
{GITIGNORE_INFO_TEXT}
.isort.cfg
.pylintrc
.flake8
.pre-commit-config.yaml
build/
dist/
""",
set([".clang-format", ".clang-tidy", ".csslintrc"]),
GITIGNORE_INFO_TEXT,
)
self.assertEqual(mode, "w")
self.assertEqual(
text,
f""".idea/
*.egg-info/
{GITIGNORE_INFO_TEXT}
.clang-format
.clang-tidy
.csslintrc
.isort.cfg
.pylintrc
.flake8
.pre-commit-config.yaml
build/
dist/
""",
)
|
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import utils
import config
from utils import *
from tornado.options import options
from mongohandler import MongoHandler
api_logger = config.getlog()
stormuiapi = config.getStormUIAPI()
class RunTopology(BaseHandler):
"""
Lunch topology on cluster.
Params:
name: string
xml: string
reload: bool
"""
def prepare(self):
content_type = self.request.headers.get("Content-Type", "")
if content_type.startswith("application/json"):
self.arguments = tornado.escape.json_decode(self.request.body)
else:
self.arguments = None
def post(self):
if self.arguments:
api_logger.info("HEADERS: " + str(self.request))
# Parse each param
data = self.arguments
if 'name' not in list(data.keys()) or 'xml' not in list(data.keys()) or 'reload' not in list(data.keys()):
api_logger.error("Error requests params")
self.finish('{"result":"error","description":"Error requests params"}')
else:
try:
t_name = data['name']
t_xml = data['xml']
api_logger.info("XML: " + str(t_xml))
t_reload = data['reload']
except Exception as e:
api_logger.error("Error requests params" + str(e))
self.write('{"result":"error","description":"Error requests params","debug":"' + str(e) + '"}')
u = Utils()
try:
# Create folder (named as topology name) if not exists
u.checkAndcreate(options.storm_topology_path + str(t_name), "storm", "storm")
try:
# Create folder /var/storm/topologies/{name}/config if not exists
u.checkAndcreate(options.storm_topology_path + str(t_name) + options.storm_topology_config_path,
"storm", "storm")
try:
# Create folder /data/storm/topologies/{name}/jar if not exists
u.checkAndcreate(
options.storm_topology_data_path + str(t_name) + options.storm_topology_jar_path, "storm",
"storm")
except Exception as e:
api_logger.error("Error on Create folder " + options.storm_topology_data_path + str(
t_name) + options.storm_topology_jar_path + " if not exists. " + str(e))
except Exception as e:
api_logger.error("Error on Create folder " + options.storm_topology_path + str(
t_name) + options.storm_topology_config_path + " if not exists. " + str(e))
except Exception as e:
api_logger.error("Error on Create folder (named as topology name) if not exists. " + str(e))
if t_reload:
# Use global jar /var/storm/lastjar/file.jar
jar_path = options.storm_global_jar_path + options.storm_global_jar_bin
else:
# Use last jar /var/storm/topologies/{name}/jar/file.jar
jar_path = options.storm_topology_data_path + str(
t_name) + options.storm_topology_jar_path + storm_global_jar_bin
#
# Create xml file
# /var/storm/{topologyname}/config/
try:
with open(options.storm_topology_path + str(t_name) + options.storm_topology_config_path + str(
t_name) + ".xml", "w") as builder_file:
builder_file.write(str(t_xml))
except Exception as e:
api_logger.error("Error creating config file. " + str(e))
self.finish('{"result":"error","description":"Error creating config file."}')
# Launch topology
# <STORM_BINARY_PATH>/storm
# jar /var/storm/lastjar/sinfonier-community-1.0.0.jar com.sinfonier.DynamicTopology
# /var/storm/{topologyname}/config/TOPOLOGY_NAME.xml TOPOLOGY_NAME
cmd_launch = options.storm_binary + " jar " + jar_path + " com.sinfonier.DynamicTopology " + \
options.storm_topology_path + str(t_name) + options.storm_topology_config_path + str(t_name) + ".xml " + str(t_name)
cmd = execCommand(cmd_launch)
# Get output and error
(output, err) = cmd.execute()
if err:
try:
error = err.split("\n")[0].replace('"', '#')
error = error.split("\n")[0].replace('`', '#')
except Exception as e:
api_logger.error("Error getting exception from JAVA.")
error = "Unkown error"
api_logger.error("Error executing launching topology command. " + str(err))
self.finish(
'{"result":"error","description":"Error launching topology", "detail":"' + str(error) + '"}')
else:
api_logger.debug("Command output: " + str(output))
errorDebug = False
warDebug = False
dedugStack = ""
warStack = ""
fDate = ""
for line in output.splitlines():
if "ERROR" in line:
errorDebug = True
api_logger.error("Found ERROR TAG: Error launching topology. " + str(dedugStack))
dedugStack = line
break
if errorDebug:
self.finish('{"result":"error","description":"Error launching topology", "detail":"' + str(
dedugStack) + '"}')
else:
# Change state on mongo
moncon = MongoHandler()
moncon.mongoStartCon()
moncon.updateRunningState(t_name)
moncon.mongoStopCon()
self.finish('{"result":"success","description":"Topology running"}')
else:
api_logger.error("Content-Type:application/json missing")
self.finish('{"result":"error","description":"Content-Type:application/json missing"}')
class StopTopology(BaseHandler):
"""
Stop topology on cluster.
Params:
name
"""
def prepare(self):
content_type = self.request.headers.get("Content-Type", "")
if content_type.startswith("application/json"):
self.arguments = tornado.escape.json_decode(self.request.body)
else:
self.arguments = None
def post(self):
if self.arguments:
# Parse each param
data = self.arguments
if 'name' not in list(data.keys()):
self.finish('{"result":"error","description":"Error requests params"}')
else:
t_name = ""
try:
t_name = data['name']
except Exception as e:
api_logger.error("Error requests params: " + str(e))
self.write('{"result":"error","description":"Error requests params","debug":"' + str(e) + '"}')
## Get Topology ID
topo_info = stormuiapi.getTopologySummaryByName(t_name)
if topo_info:
topo_id = topo_info["id"]
api_logger.debug("Stopping topology " + t_name)
response = stormuiapi.killTopology(topo_id, "0")
if response["status"] == "KILLED":
# Change state on mongo
moncon = MongoHandler()
moncon.mongoStartCon()
moncon.updateStoppedState(t_name)
moncon.mongoStopCon()
# Delete worker file
try:
os.remove(options.storm_topology_path + t_name + "/worker")
except:
api_logger.debug("Error removing worker file of topology " + t_name + ". Continuing...")
self.finish('{"result":"success","description":"Topology stopped"}')
else:
api_logger.error("Topology '" + t_name + "' couldn't be stopped")
api_logger.error(json.dumps(response))
self.write(
'{"result":"error","description":"Topology "' + t_name + '" could not be stopped",'
'"debug":""}')
else:
api_logger.error("Topology '" + t_name + "' not found")
self.write('{"result":"error","description":"Topology '"+t_name+"' not found","debug":""}')
else:
self.finish('{"result":"error","description":"Content-Type:application/json missing"}')
class UpdateTopology(BaseHandler):
"""
Update topology running.
Params:
name: string
xml: string
reload: bool
"""
def prepare(self):
content_type = self.request.headers.get("Content-Type", "")
if content_type.startswith("application/json"):
self.arguments = tornado.escape.json_decode(self.request.body)
else:
self.arguments = None
def post(self):
if self.arguments:
# Parse each param
data = self.arguments
if 'name' not in list(data.keys()) or 'xml' not in list(data.keys()) or 'reload' not in list(data.keys()):
self.finish('{"result":"error","description":"Error requests params"}')
else:
try:
t_name = data['name']
t_xml = data['xml']
t_reload = data['reload']
except Exception as e:
api_logger.error("Error requests params: " + str(e))
self.write('{"result":"error","description":"Error requests params","debug":"' + str(e) + '"}')
## STOP TOPOLOGY
topo_info = stormuiapi.getTopologySummaryByName(t_name)
api_logger.debug("Stopping topology " + t_name)
if topo_info:
topo_id = topo_info["id"]
response = stormuiapi.killTopology(topo_id, "0")
if response["status"] == "KILLED":
## Topology Killed
# Change state on mongo
moncon = MongoHandler()
moncon.mongoStartCon()
moncon.updateState(t_name, "hold")
for i in range(4):
response = stormuiapi.getTopologySummaryByName(t_name)
if not response:
api_logger.info("Topology " + t_name + " killed.")
break
time.sleep(5)
else:
api_logger.debug("Topology " + t_name + " couldn't be stopped on time. Waiting 5 sec more...")
time.sleep(5)
if t_reload == "true":
# Use global jar /var/storm/lastjar/file.jar
jar_path = options.storm_global_jar_path + options.storm_global_jar_bin
else:
# Use last jar /var/storm/topologies/{name}/jar/file.jar
jar_path = options.storm_topology_data_path + str(
t_name) + options.storm_topology_jar_path + storm_global_jar_bin
#
# Create xml file
# /var/storm/{topologyname}/config/
try:
with open(options.storm_topology_path + str(t_name) + options.storm_topology_config_path + str(
t_name) + ".xml", "w") as builder_file:
builder_file.write(str(t_xml))
except Exception:
self.finish('{"result":"error","description":"Internal error"}')
# Launch topology
# /opt/sinfonier/storm/bin/storm
# jar /var/storm/lastjar/sinfonier-community-1.0.0.jar com.sinfonier.DynamicTopology
# /var/storm/{topologyname}/config/TOPOLOGY_NAME.xml TOPOLOGY_NAME
api_logger.debug("Restarting topology " + t_name + ".")
cmd_launch = options.storm_binary + " jar " + jar_path + " com.sinfonier.DynamicTopology " + \
options.storm_topology_path + str(
t_name) + options.storm_topology_config_path + str(t_name) + ".xml " + str(t_name)
cmd = execCommand(cmd_launch)
# Get output and error
(output, err) = cmd.execute()
if err:
# Change state on mongo
moncon = MongoHandler()
moncon.mongoStartCon()
moncon.updateStoppedState(t_name)
moncon.mongoStopCon()
self.finish(
'{"result":"error","description":"Error launching topology","detail":"' + str(err) + '"}')
else:
api_logger.info("Topology " + t_name + " updated successfully")
# Change state on mongo
moncon = MongoHandler()
moncon.mongoStartCon()
moncon.updateRunningState(t_name)
moncon.mongoStopCon()
self.write('{"result":"success","description":"Topology restarted."}')
else:
# TOPOLOGY NOT KILLED
self.finish('{"result":"error","description":"Error stopping topology","detail":"Not killed"}')
else:
self.finish('{"result":"error","description":"Content-Type:application/json missing"}')
|
""" User Management Module
This module reads the 'users.conf' file and gets all users's info.
"""
__all__ = ["UserMgr"]
import ConfigParser
class UserMgr:
"""User Manager
The format of the user_info is:
user_info = {
"username": "maple",
"password": "valley",
"ethernet_interface": "eth0",
"dhcp_command": "dhcpcd",
"daemon": True,
# following has not implemented yet
"carry_version_info": True,
"broadcast_logoff": False
"packet_type": "unicast"
}
"""
def __init__(self, path=None):
if path is None:
self.users_cfg_path = '/etc/yah3c.conf'
else:
self.users_cfg_path = path
self.config = ConfigParser.ConfigParser()
self.config.read(self.users_cfg_path)
def save_and_reload(self):
fp = open(self.users_cfg_path, 'w')
self.config.write(fp)
fp.close()
self.config.read(self.users_cfg_path)
def get_user_number(self):
return len(self.config.sections())
def get_all_users_info(self):
users_info = []
for username in self.config.sections():
user_info = dict(self.config.items(username))
user_info['username'] = username
users_info.append(user_info)
return users_info
def get_user_info(self, username):
user_info = dict(self.config.items(username))
user_info['username'] = username
return user_info
def add_user(self, user_info):
self.config.add_section(user_info['username'])
self.update_user_info(user_info)
def remove_user(self, username):
self.config.remove_section(username)
self.save_and_reload()
def update_user_info(self, user_info):
self.config.set(user_info['username'], 'password',
user_info['password'])
self.config.set(user_info['username'], 'ethernet_interface',
user_info['ethernet_interface'])
self.config.set(user_info['username'], 'dhcp_command',
user_info['dhcp_command'])
self.config.set(user_info['username'], 'daemon',
user_info['daemon'])
self.save_and_reload()
|
"""Units of measure module."""
from pathlib import Path
import json
from functools import lru_cache
# Bifrost weights and measures module
# Temporary version based on pagoda code
# todo: Replace with RESQML uom based version at a later date
version = '5th May 2021'
# physical constants
feet_to_metres = 0.3048
metres_to_feet = 1.0 / feet_to_metres
ft3_to_m3 = 0.028316846592
m3_to_ft3 = 1.0 / ft3_to_m3
bbl_to_m3 = 0.158987294928
m3_to_bbl = 1.0 / bbl_to_m3
psi_to_kPa = 44.482216152605 / 6.4516
kPa_to_psi = 1.0 / psi_to_kPa
def rq_uom(units):
"""Returns RESQML uom string equivalent to units, or 'Euc' if not determined."""
if not isinstance(units, str): return 'Euc'
if units == '' or units == 'Euc': return 'Euc'
ul = units.lower()
if ul in ['m', 'ft', 'm3', 'ft3', 'm3/m3', 'ft3/ft3', 'bbl', 'bar', 'psi', 'm3/d', 'bbl/d']: return ul
if ul in ['m', 'metre', 'metres', 'meter']: return 'm'
if ul in ['ft', 'foot', 'feet', 'ft[us]']: return 'ft' # NB. treating different foot sizes as identical
if units == 'd' or ul in ['days', 'day']: return 'd'
if units in ['kPa', 'Pa', 'mD']: return units
if ul in ['psi', 'psia']: return 'psi'
if ul in ['1000 bbl', 'mstb', 'mbbl']: return '1000 bbl'
if ul in ['bbl', 'stb']: return 'bbl'
if ul in ['1E6 bbl', 'mmstb', 'mmbbl']: return '1E6 bbl'
if ul in ['1E6 ft3', 'mmscf']: return '1E6 ft3'
if ul in ['1000 ft3', 'mscf']: return '1000 ft3'
if ul in ['ft3', 'scf']: return 'ft3'
if ul in ['bbl/d', 'stb/d', 'bbl/day', 'stb/day']: return 'bbl/d'
if ul in ['1000 bbl/d', 'mstb/day', 'mbbl/day', 'mstb/d', 'mbbl/d']: return '1000 bbl/d'
if ul in ['1E6 bbl/d', 'mmstb/day', 'mmbbl/day', 'mmstb/d', 'mmbbl/d']: return '1E6 bbl/d'
if ul in ['1000 ft3/d', 'mscf/day', 'mscf/d']: return '1000 ft3/d'
if ul in ['ft3/d', 'scf/day', 'scf/d']: return 'ft3/d'
if ul in ['1E6 ft3/d', 'mmscf/day', 'mmscf/d']: return '1E6 ft3/d'
if ul in ['ft3/bbl', 'scf/bbl', 'ft3/stb', 'scf/stb']: return 'ft3/bbl'
if ul in ['1000 ft3/bbl', 'mscf/bbl', 'mscf/stb']: return '1000 ft3/bbl'
if ul in ['m3', 'sm3']: return 'm3'
if ul in ['m3/d', 'm3/day', 'sm3/d', 'sm3/day']: return 'm3/d'
if ul == '1000 m3': return '1000 m3'
if ul in ['1000 m3/d', '1000 m3/day']: return '1000 m3/d'
if ul == '1E6 m3': return '1E6 m3'
if ul in ['1E6 m3/d', '1E6 m3/day']: return '1E6 m3/d'
if units in ['mD.m', 'mD.ft']: return units
if ul == 'count': return 'Euc'
uom_list = properties_data()['uoms']
if units in uom_list: return units
if ul in uom_list: return ul # dangerous! for example, 'D' means D'Arcy and 'd' means day
return 'Euc'
def rq_uom_list(units_list):
"""Returns a list of RESQML uom equivalents for units in list."""
rq_list = []
for u in units_list: rq_list.append(rq_uom(u))
return rq_list
def p_length_unit(units):
"""Returns length units string as expected by pagoda weights and measures module."""
# NB: other length units are supported by resqml
if units.lower() in ['m', 'metre', 'metres']: return 'metres'
if units.lower() in ['ft', 'foot', 'feet', 'ft[us]']: return 'feet'
assert(False) # unrecognised length units
def rq_length_unit(units):
"""Returns length units string as expected by resqml."""
# NB: other length units are supported by resqml
if units.lower() in ['m', 'metre', 'metres']: return 'm'
if units.lower() in ['ft', 'foot', 'feet', 'ft[us]']: return 'ft' # NB. treating different foot sizes as identical
raise ValueError(f'unrecognised length units {units}')
def p_time_unit(units):
"""Returns human readable version of time units string."""
# NB: other time units are supported by resqml
if units.lower() in ['s', 'sec', 'secs', 'second', 'seconds']: return 'seconds'
if units.lower() in ['ms', 'msec', 'millisecs', 'millisecond', 'milliseconds']: return 'milliseconds'
if units.lower() in ['min', 'mins', 'minute', 'minutes']: return 'minutes'
if units.lower() in ['h', 'hr', 'hour', 'hours']: return 'hours'
if units.lower() in ['wk', 'week', 'weeks']: return 'weeks'
if units.lower() in ['a', 'yr', 'year', 'years']: return 'years'
assert(False) # unrecognised time units
def rq_time_unit(units):
"""Returns time units string as expected by resqml."""
# NB: other time units are supported by resqml
if units.lower() in ['s', 'sec', 'secs', 'second', 'seconds']: return 's'
if units.lower() in ['ms', 'msec', 'millisecs', 'millisecond', 'milliseconds']: return 'ms'
if units.lower() in ['min', 'mins', 'minute', 'minutes']: return 'min'
if units.lower() in ['h', 'hr', 'hour', 'hours']: return 'h'
if units.lower() in ['wk', 'week', 'weeks']: return 'wk'
if units.lower() in ['a', 'yr', 'year', 'years']: return 'a'
assert(False) # unrecognised time units
def convert_lengths(a, from_units, to_units):
"""Converts values in numpy array (or a scalar) from one length unit to another, in situ if array.
arguments:
a (numpy float array, or float): array of length values to undergo unit conversion in situ, or a scalar
from_units (string): 'm', 'metres', 'ft' or 'feet' being the units of the data before conversion
to_units (string): 'm', 'metres', 'ft' or 'feet' being the required units
returns:
a after unit conversion
"""
from_units = rq_length_unit(from_units)
to_units = rq_length_unit(to_units)
if from_units == to_units: return a
if from_units == 'ft' and to_units == 'm': a *= feet_to_metres
elif from_units == 'm' and to_units == 'ft': a *= metres_to_feet
else: raise ValueError('unsupported length unit conversion')
return a
def convert_pressures(a, from_units, to_units):
"""Converts values in numpy array (or a scalar) from one pressure unit to another, in situ if array.
arguments:
a (numpy float array, or float): array of pressure values to undergo unit conversion in situ, or a scalar
from_units (string): 'kPa', 'Pa', 'bar' or 'psi' being the units of the data before conversion
to_units (string): 'kPa', 'Pa', 'bar' or 'psi' being the required units
returns:
a after unit conversion
"""
from_units = rq_uom(from_units)
to_units = rq_uom(to_units)
assert from_units in ['kPa', 'Pa', 'bar', 'psi'] and to_units in ['kPa', 'Pa', 'bar', 'psi']
if from_units == to_units: return a
if from_units in ['kPa', 'Pa', 'bar'] and to_units == 'psi': factor = kPa_to_psi
elif from_units == 'psi' and to_units in ['kPa', 'Pa', 'bar']: factor = psi_to_kPa
else: factor = 1.0
if from_units == 'Pa': factor *= 0.001
elif from_units == 'bar': factor *= 100.0
if to_units == 'Pa': factor *= 1000.0
elif to_units == 'bar': factor *= 0.01
a *= factor
return a
def convert_volumes(a, from_units, to_units):
"""Converts values in numpy array (or a scalar) from one volume unit to another, in situ if array.
arguments:
a (numpy float array, or float): array of volume values to undergo unit conversion in situ, or a scalar
from_units (string): 'm3', 'ft3' or 'bbl' being the units of the data before conversion
to_units (string): 'm3', 'ft3' or 'bbl' being the required units
returns:
a after unit conversion
"""
from_units = rq_uom(from_units)
to_units = rq_uom(to_units)
assert from_units in ['m3', 'ft3', 'bbl'] and to_units in ['m3', 'ft3', 'bbl']
if from_units == to_units: return a
if from_units == 'm3':
if to_units == 'ft3': a *= m3_to_ft3
else: a *= m3_to_bbl
elif from_units == 'ft3':
if to_units == 'm3': a *= ft3_to_m3
else: a *= ft3_to_m3 * m3_to_bbl
else: # from_units == 'bbl'
if to_units == 'm3': a *= bbl_to_m3
else: a *= bbl_to_m3 * m3_to_ft3
return a
@lru_cache(maxsize=None)
def properties_data():
""" Return valid resqml uoms and property kinds.
Returns a dict with keys:
- "uoms" : list of valid units of measure
- "property_kinds" : dict mapping valid property kinds to their description
"""
json_path = Path(__file__).parent / 'data' / 'properties.json'
with open(json_path) as f:
data = json.load(f)
return data
|
# -*- coding: utf-8 -*-
__author__ = "Ariel Rodrigues"
__version__ = "0.1.0"
__license__ = ""
"""
Module Docstring
params: {
join_data: string,
embeddings: string
}
"""
import luigi
import logging
import datetime
import utils
log = logging.getLogger(__name__)
class CNN(luigi.Task):
params = luigi.DictParameter(default=None)
def are_valid_params(self):
return self.params and \
type(self.params["join_data"]) is str and \
type(self.params["embeddings"]) is str
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.are_valid_params():
raise Exception(f'CNN: worng params type')
self.constants = {
'join_data': f'../outputs/{self.params["join_data"]}',
'embeddings': f'../outputs/{self.params["embeddings"]}'
}
def run(self):
self.emite_log(f'starting task with params {str(self.constants)}')
RESULTS = utils.main(self.constants['join_data'], self.constants['embeddings'])
self.write_result(RESULTS)
self.emite_log(f'task has finnished')
def emite_log(self, message):
formated_datetime = datetime.datetime.now().strftime('%d-%m-%Y-%H-%M-%S')
log.info(f'{formated_datetime}: {message}')
def output(self):
return luigi.LocalTarget(f'../outputs/results/result_{self.constants['join_data']}_{self.constants['embeddings']}')
def write_result(self, result):
with self.output().open('w') as out_file:
for line in result:
out_file.write(line)
if __name__ == '__main__':
luigi.run()
|
import hashlib
def genetere_rate_cache_key(source: int, currency: int) -> str:
key = (f'latest-rates-{source}-{currency}' * 100).encode()
return hashlib.md5(key).hexdigest()
# return f'latest-rates-{source}-{currency}'
|
from collections import Counter
def find_repeated_dna_sequences(s: str) -> list[str]:
if len(s) < 10:
return []
counter = Counter(s[i: i + 10] for i in range(len(s) - 10 + 1))
return [key for key in counter if counter[key] > 1]
if __name__ == "__main__":
print(find_repeated_dna_sequences("AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"))
print(find_repeated_dna_sequences("AAAAAAAAAAAAA"))
|
import os
import operator, functools
from typing import List, Tuple
from concurrent.futures import ProcessPoolExecutor
def make_double(n):
print('PID[{}]:\t{}'.format(os.getpid(), n))
return n*2
def multi_processing(func, inputs, n_max_workers=None):
p_executor = ProcessPoolExecutor(max_workers=n_max_workers)
with p_executor as exec:
outputs = exec.map(make_double, inputs)
# Concatenate outputs(Generator) if the dtype of inputs is List
if type(outputs) == List:
functools.reduce(operator.iconcat, list(outputs), [])
else:
outputs = list(outputs)
return outputs
if __name__=='__main__':
with open('../../sample/zero_to_hundred.txt') as reader:
lines = reader.readlines()
lines = list(map(lambda x: int(x), lines))
outputs = multi_processing(func=make_double, inputs=lines)
print(len(outputs), outputs)
|
aux=0
n=int(input())
while n!=0:
aux=10*aux+n%10
n=int(n/10)
print(aux)
|
import argparse
import base64
import logging
import os
import sys
from typing import (
Any,
Dict,
)
import requests
import yaml
DESCRIPTION = """Load a Galaxy model store into a running Galaxy instance.
See the corresponding galaxy-build-objects script for one possible way to
create a model store to use with this script.
This script creates all datasets in "discarded"/"deferred" states (depending on if
source URIs are available). To actually load the datasets into a Galaxy instance's
object store the underlying libraries need to be used directly and fed your Galaxy's
database configuration and objectstore setup.
"""
logging.basicConfig()
log = logging.getLogger(__name__)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
args = _arg_parser().parse_args(argv)
galaxy_url = args.galaxy_url
api_url = f"{galaxy_url.rstrip('/')}/api"
api_key = args.key
assert api_key
history_id = args.history_id
if history_id:
create_url = f"{api_url}/histories/{history_id}/contents_from_store?key={api_key}"
else:
create_url = f"{api_url}/histories/from_store?key={api_key}"
store_path = args.store
assert os.path.exists(store_path)
is_json = False
for json_ext in [".yml", ".yaml", ".json"]:
if store_path.endswith(json_ext):
is_json = True
data: Dict[str, Any] = {}
if is_json:
with open(store_path, "r") as f:
store_dict = yaml.safe_load(f)
data["store_dict"] = store_dict
else:
with open(store_path, "rb") as fb:
store_contents = fb.read()
data["store_content_base64"] = base64.b64encode(store_contents)
response = requests.post(create_url, json=data)
response.raise_for_status()
def _arg_parser():
parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("store", metavar="STORE", help="file or directory containing the model store to connect to")
# copied from test script in galaxy-tool-util...
parser.add_argument("-u", "--galaxy-url", default="http://localhost:8080", help="Galaxy URL")
parser.add_argument("-k", "--key", default=None, help="Galaxy User API Key")
parser.add_argument("-t", "--history-id", default=None, help="Encoded history ID to load model store into")
return parser
if __name__ == "__main__":
main()
|
#! /usr/bin/env python3
import logging
from ops.charm import CharmBase
from ops.main import main
from ops.model import ActiveStatus, MaintenanceStatus, BlockedStatus
import subprocess
import os
logger = logging.getLogger(__name__)
class SuricataCharm(CharmBase):
"""Class representing this Operator charm."""
def __init__(self, *args):
"""Initialize charm and configure states and events to observe."""
super().__init__(*args)
self.framework.observe(self.on.config_changed, self.configure_pod)
self.framework.observe(self.on.add_rule_action, self._on_add_rule_action)
self.framework.observe(self.on.update_rules_action, self._on_update_rules_action)
self.framework.observe(self.on.start_service_action, self._on_start_service_action)
self.framework.observe(self.on.stop_service_action, self._on_stop_service_action)
self.framework.observe(self.on.run_action, self._on_run_action)
self.framework.observe(self.on.health_check_action, self._on_health_check_action)
self.framework.observe(self.on.touch_action, self._on_touch_action)
def _on_touch_action(self, event):
"""Create an empty file receiving the path and filename as input"""
filename = event.params["filename"]
try:
subprocess.run(["touch", filename])
event.set_results({
"output": f"File {filename} created successfully"
})
except Exception as e:
event.fail(f"Touch action failed with the following exception: {e}")
def _on_run_action(self, event):
"""Execute command receiving the command as input"""
cmd = event.params["cmd"]
try:
os.system(cmd)
event.set_results({
"output": f"Command: {cmd} executed successfully"
})
except Exception as e:
event.fail(f"Command: {cmd} failed with the following exception: {e}")
def _on_health_check_action(self, event):
"""Check if Suricata service is running"""
try:
healthcheck = subprocess.run(["service","suricata","status"], check=True, capture_output=True, text=True)
output = healthcheck.stdout.split(' ')
if "not" in output:
event.set_results({
"output": f"Status: Suricata is not running"
})
else:
event.set_results({
"output": f"Status: Suricata is running"
})
except Exception as e:
event.fail(f"Command: Health-check failed with the following exception: {e}")
def _on_add_rule_action(self, event):
"""Add rule to Suricata config"""
pass
def _on_update_rules_action(self, event):
"""Update default rules to Suricata config"""
try:
os.system("suricata-update")
event.set_results({
"output": f"Command: suricata-update executed successfully"
})
except Exception as e:
event.fail(f"Command: suricata-update failed with the following exception: {e}")
def _on_start_service_action(self, event):
"""Start Suricata service"""
try:
subprocess.run(["service","suricata","restart"], check=True, capture_output=True, text=True)
event.set_results({
"output": f"Start: Suricata service started successfully"
})
except Exception as e:
event.fail(f"Start: Suricata service starting failed with the following exception: {e}")
def _on_stop_service_action(self, event):
"""Stop Suricata service"""
try:
subprocess.run(["service","suricata","stop"], check=True, capture_output=True, text=True)
event.set_results({
"output": f"Start: Suricata service stopped successfully"
})
except Exception as e:
event.fail(f"Start: Suricata service stopping failed with the following exception: {e}")
def configure_pod(self, event):
if not self.unit.is_leader():
self.unit.status = ActiveStatus()
return
self.unit.status = MaintenanceStatus("Applying pod spec")
containers = [
{
"name": self.framework.model.app.name,
"image": "lopeez97/suricata:latest",
"ports": [
{
"name": "suricata",
"containerPort": 22,
"protocol": "TCP",
}
],
"command": ["/bin/bash","-ce","tail -f /dev/null",],
"kubernetes": { "securityContext": { "privileged": True}}
}
]
self.model.pod.set_spec({"version": 3, "containers": containers})
self.unit.status = ActiveStatus()
self.app.status = ActiveStatus()
if __name__ == "__main__":
main(SuricataCharm)
|
"""
This script evaluates the performance of the
AKKEffProofOfRetrievability Proof of
retrievability implemented in
koppercoin.crypto.AKKEffProofOfRetrievability.py
"""
from koppercoin.crypto.AKKEffProofOfRetrievability import *
from koppercoin.crypto.AKKProofOfRetrievability import GQProofOfRetrievability as SlowGQProofOfRetrievability
import time
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import os
import jsonpickle
from pympler import asizeof
import gc
# which timings should we compute?
# set to 1 if we should compute it, otherwise 0
(gq, schnorr, okamoto, shoup, sw) = (1,0,0,1,1)
# The messagesizes which will be tested
messagesizes = range(250, 4001, 250)
# messagesizes = range(300, 501, 50)
# The number of runs per messagesize
runs = 10
if gq:
gqtimings = {'keygen': [],
'encode': [],
'genchallenge': [],
'genproof': [],
'verify': []}
if schnorr:
schnorrtimings = {'keygen': [],
'encode': [],
'genchallenge': [],
'genproof': [],
'verify': []}
if okamoto:
okamototimings = {'keygen': [],
'encode': [],
'genchallenge': [],
'genproof': [],
'verify': []}
if shoup:
shouptimings = {'keygen': [],
'encode': [],
'genchallenge': [],
'genproof': [],
'verify': []}
if sw:
swtimings = {'keygen': [],
'encode': [],
'genchallenge': [],
'genproof': [],
'verify': []}
for i in range(len(messagesizes)):
print("Running PoR on messagesize " + str(messagesizes[i]) + " from " + str(list(messagesizes)))
if gq:
gqtimings['keygen'].append([])
gqtimings['encode'].append([])
gqtimings['genchallenge'].append([])
gqtimings['genproof'].append([])
gqtimings['verify'].append([])
if schnorr:
schnorrtimings['keygen'].append([])
schnorrtimings['encode'].append([])
schnorrtimings['genchallenge'].append([])
schnorrtimings['genproof'].append([])
schnorrtimings['verify'].append([])
if okamoto:
okamototimings['keygen'].append([])
okamototimings['encode'].append([])
okamototimings['genchallenge'].append([])
okamototimings['genproof'].append([])
okamototimings['verify'].append([])
if shoup:
shouptimings['keygen'].append([])
shouptimings['encode'].append([])
shouptimings['genchallenge'].append([])
shouptimings['genproof'].append([])
shouptimings['verify'].append([])
if sw:
swtimings['keygen'].append([])
swtimings['encode'].append([])
swtimings['genchallenge'].append([])
swtimings['genproof'].append([])
swtimings['verify'].append([])
for run in range(runs):
data = os.urandom(messagesizes[i]*1024)
# GQProofOfRetrievability
if gq:
print("Computing GQProofOfRetrievability")
time_pre = time.time()
(pk, sk) = GQProofOfRetrievability.keygen()
time_post = time.time()
duration = time_post - time_pre
gqtimings['keygen'][i].append(duration)
time_pre = time.time()
(mij, authenticators, filehandle) = GQProofOfRetrievability.encode(sk, pk, data)
time_post = time.time()
duration = time_post - time_pre
gqtimings['encode'][i].append(duration)
time_pre = time.time()
challenge = os.urandom(32)
time_post = time.time()
duration = time_post - time_pre
gqtimings['genchallenge'][i].append(duration)
time_pre = time.time()
proof = GQProofOfRetrievability.genproof(pk, data, authenticators, challenge)
time_post = time.time()
duration = time_post - time_pre
gqtimings['genproof'][i].append(duration)
time_pre = time.time()
a = GQProofOfRetrievability.verify(proof, pk, challenge, filehandle)
time_post = time.time()
duration = time_post - time_pre
gqtimings['verify'][i].append(duration)
# SchnorrProofOfRetrievability
if schnorr:
print("Computing SchnorrProofOfRetrievability")
time_pre = time.time()
(pk, sk) = SchnorrProofOfRetrievability.keygen()
time_post = time.time()
duration = time_post - time_pre
schnorrtimings['keygen'][i].append(duration)
time_pre = time.time()
(mij, authenticators, filehandle) = SchnorrProofOfRetrievability.encode(sk, pk, data)
time_post = time.time()
duration = time_post - time_pre
schnorrtimings['encode'][i].append(duration)
time_pre = time.time()
challenge = os.urandom(32)
time_post = time.time()
duration = time_post - time_pre
schnorrtimings['genchallenge'][i].append(duration)
time_pre = time.time()
proof = SchnorrProofOfRetrievability.genproof(pk, data, authenticators, challenge)
time_post = time.time()
duration = time_post - time_pre
schnorrtimings['genproof'][i].append(duration)
time_pre = time.time()
a = SchnorrProofOfRetrievability.verify(proof, pk, challenge, filehandle)
time_post = time.time()
duration = time_post - time_pre
schnorrtimings['verify'][i].append(duration)
# OkamotoProofOfRetrievability
if okamoto:
print("Computing OkamotoProofOfRetrievability")
time_pre = time.time()
(pk, sk) = OkamotoProofOfRetrievability.keygen()
time_post = time.time()
duration = time_post - time_pre
okamototimings['keygen'][i].append(duration)
time_pre = time.time()
(mij, authenticators, filehandle) = OkamotoProofOfRetrievability.encode(sk, pk, data)
time_post = time.time()
duration = time_post - time_pre
okamototimings['encode'][i].append(duration)
time_pre = time.time()
challenge = os.urandom(32)
time_post = time.time()
duration = time_post - time_pre
okamototimings['genchallenge'][i].append(duration)
time_pre = time.time()
proof = OkamotoProofOfRetrievability.genproof(pk, data, authenticators, challenge)
time_post = time.time()
duration = time_post - time_pre
okamototimings['genproof'][i].append(duration)
time_pre = time.time()
a = OkamotoProofOfRetrievability.verify(proof, pk, challenge, filehandle)
time_post = time.time()
duration = time_post - time_pre
okamototimings['verify'][i].append(duration)
# ShoupProofOfRetrievability
if shoup:
print("Computing ShoupProofOfRetrievability")
time_pre = time.time()
(pk, sk) = ShoupProofOfRetrievability.keygen()
time_post = time.time()
duration = time_post - time_pre
shouptimings['keygen'][i].append(duration)
time_pre = time.time()
(mij, authenticators, filehandle) = ShoupProofOfRetrievability.encode(sk, pk, data)
time_post = time.time()
duration = time_post - time_pre
shouptimings['encode'][i].append(duration)
time_pre = time.time()
challenge = os.urandom(32)
time_post = time.time()
duration = time_post - time_pre
shouptimings['genchallenge'][i].append(duration)
time_pre = time.time()
proof = ShoupProofOfRetrievability.genproof(pk, data, authenticators, challenge)
time_post = time.time()
duration = time_post - time_pre
shouptimings['genproof'][i].append(duration)
time_pre = time.time()
a = ShoupProofOfRetrievability.verify(proof, pk, challenge, filehandle)
time_post = time.time()
duration = time_post - time_pre
shouptimings['verify'][i].append(duration)
# SWProofOfRetrievability
if sw:
print("Computing SWProofOfRetrievability")
time_pre = time.time()
(pk, sk) = SWProofOfRetrievability.keygen()
time_post = time.time()
duration = time_post - time_pre
swtimings['keygen'][i].append(duration)
time_pre = time.time()
(mij, authenticators, filehandle) = SWProofOfRetrievability.encode(sk, pk, data)
time_post = time.time()
duration = time_post - time_pre
swtimings['encode'][i].append(duration)
time_pre = time.time()
challenge = os.urandom(32)
time_post = time.time()
duration = time_post - time_pre
swtimings['genchallenge'][i].append(duration)
time_pre = time.time()
proof = SWProofOfRetrievability.genproof(pk, data, authenticators, challenge)
time_post = time.time()
duration = time_post - time_pre
swtimings['genproof'][i].append(duration)
time_pre = time.time()
a = SWProofOfRetrievability.verify(proof, pk, challenge, filehandle)
time_post = time.time()
duration = time_post - time_pre
swtimings['verify'][i].append(duration)
# Save the data in complicated JSON-format
if gq:
with open('timings_GQPoR.json', 'w') as f:
json_obj = jsonpickle.encode(gqtimings)
f.write(json_obj)
if schnorr:
with open('timings_SchnorrPoR.json', 'w') as f:
json_obj = jsonpickle.encode(schnorrtimings)
f.write(json_obj)
if okamoto:
with open('timings_OkamotoPoR.json', 'w') as f:
json_obj = jsonpickle.encode(okamototimings)
f.write(json_obj)
if shoup:
with open('timings_ShoupPoR.json', 'w') as f:
json_obj = jsonpickle.encode(shouptimings)
f.write(json_obj)
if sw:
with open('timings_SWPoR.json', 'w') as f:
json_obj = jsonpickle.encode(swtimings)
f.write(json_obj)
print("Running postprocessing steps")
# Transform to handy Dataframes
for algo in ['keygen', 'encode', 'genchallenge', 'genproof', 'verify']:
if gq:
gqtimings[algo] = pd.DataFrame(gqtimings[algo]).T
gqtimings[algo].columns = messagesizes
if schnorr:
schnorrtimings[algo] = pd.DataFrame(schnorrtimings[algo]).T
schnorrtimings[algo].columns = messagesizes
if okamoto:
okamototimings[algo] = pd.DataFrame(okamototimings[algo]).T
okamototimings[algo].columns = messagesizes
if shoup:
shouptimings[algo] = pd.DataFrame(shouptimings[algo]).T
shouptimings[algo].columns = messagesizes
if sw:
swtimings[algo] = pd.DataFrame(swtimings[algo]).T
swtimings[algo].columns = messagesizes
for algo in ['keygen', 'encode', 'genchallenge', 'genproof', 'verify']:
# Save the data in handy .csv
if gq:
gqtimings[algo].to_csv('timings_GQPoR_'+str(algo)+'.csv')
if schnorr:
schnorrtimings[algo].to_csv('timings_SchnorrPoR_'+str(algo)+'.csv')
if okamoto:
okamototimings[algo].to_csv('timings_OkamotoPoR_'+str(algo)+'.csv')
if shoup:
shouptimings[algo].to_csv('timings_ShoupPoR_'+str(algo)+'.csv')
if sw:
swtimings[algo].to_csv('timings_SWPoR_'+str(algo)+'.csv')
# GQ
if gq:
# Set up the plot
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Filesize in Kb')
plt.ylabel('Time in sec')
plt.title('Time Measurements for ' + str(algo))
# Plot the values
gqtimings[algo].boxplot()
plt.legend(loc='upper left')
# Save the figure
plt.savefig('timings_GQPoR_'+str(algo)+'.png')
# Schnorr
if schnorr:
# Set up the plot
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Filesize in Kb')
plt.ylabel('Time in sec')
plt.title('Time Measurements for ' + str(algo))
# Plot the values
schnorrtimings[algo].boxplot()
plt.legend(loc='upper left')
# Save the figure
plt.savefig('timings_SchnorrPoR_'+str(algo)+'.png')
# Okamoto
if okamoto:
# Set up the plot
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Filesize in Kb')
plt.ylabel('Time in sec')
plt.title('Time Measurements for ' + str(algo))
# Plot the values
okamototimings[algo].boxplot()
plt.legend(loc='upper left')
# Save the figure
plt.savefig('timings_OkamotoPoR_'+str(algo)+'.png')
# Shoup
if shoup:
# Set up the plot
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Filesize in Kb')
plt.ylabel('Time in sec')
plt.title('Time Measurements for ' + str(algo))
# Plot the values
shouptimings[algo].boxplot()
plt.legend(loc='upper left')
# Save the figure
plt.savefig('timings_ShoupPoR_'+str(algo)+'.png')
# SW
if sw:
# Set up the plot
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Filesize in Kb')
plt.ylabel('Time in sec')
plt.title('Time Measurements for ' + str(algo))
# Plot the values
swtimings[algo].boxplot()
plt.legend(loc='upper left')
# Save the figure
plt.savefig('timings_SWPoR_'+str(algo)+'.png')
# GQ
if gq:
# Comparison between the average times of all of them
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Filesize in Kb')
plt.ylabel('Time in sec')
plt.title('Time Measurements for the Proof of Retrievability')
# Build a DataFrame with all the timing data
df = pd.DataFrame([gqtimings[key].mean() for key in gqtimings.keys()], index=gqtimings.keys())
df = df.transpose()
# plot it
df.keygen.plot(style='bo')
df.verify.plot(style='gv')
df.genchallenge.plot(style='r^')
df.genproof.plot(style='c>')
df.encode.plot(style='m<')
plt.legend(loc='upper left')
plt.savefig('timings_GQPoR_all.png')
# Schnorr
if schnorr:
# Comparison between the average times of all of them
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Filesize in Kb')
plt.ylabel('Time in sec')
plt.title('Time Measurements for the Proof of Retrievability')
# Build a DataFrame with all the timing data
df = pd.DataFrame([schnorrtimings[key].mean() for key in schnorrtimings.keys()], index=schnorrtimings.keys())
df = df.transpose()
# plot it
df.keygen.plot(style='bo')
df.verify.plot(style='gv')
df.genchallenge.plot(style='r^')
df.genproof.plot(style='c>')
df.encode.plot(style='m<')
plt.legend(loc='upper left')
plt.savefig('timings_SchnorrPoR_all.png')
# Okamoto
if okamoto:
# Comparison between the average times of all of them
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Filesize in Kb')
plt.ylabel('Time in sec')
plt.title('Time Measurements for the Proof of Retrievability')
# Build a DataFrame with all the timing data
df = pd.DataFrame([okamototimings[key].mean() for key in okamototimings.keys()], index=okamototimings.keys())
df = df.transpose()
# plot it
df.keygen.plot(style='bo')
df.verify.plot(style='gv')
df.genchallenge.plot(style='r^')
df.genproof.plot(style='c>')
df.encode.plot(style='m<')
plt.legend(loc='upper left')
plt.savefig('timings_OkamotoPoR_all.png')
# Shoup
if shoup:
# Comparison between the average times of all of them
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Filesize in Kb')
plt.ylabel('Time in sec')
plt.title('Time Measurements for the Proof of Retrievability')
# Build a DataFrame with all the timing data
df = pd.DataFrame([shouptimings[key].mean() for key in shouptimings.keys()], index=shouptimings.keys())
df = df.transpose()
# plot it
df.keygen.plot(style='bo')
df.verify.plot(style='gv')
df.genchallenge.plot(style='r^')
df.genproof.plot(style='c>')
df.encode.plot(style='m<')
plt.legend(loc='upper left')
plt.savefig('timings_ShoupPoR_all.png')
# SW
if sw:
# Comparison between the average times of all of them
plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel('Filesize in Kb')
plt.ylabel('Time in sec')
plt.title('Time Measurements for the Proof of Retrievability')
# Build a DataFrame with all the timing data
df = pd.DataFrame([swtimings[key].mean() for key in swtimings.keys()], index=swtimings.keys())
df = df.transpose()
# plot it
df.keygen.plot(style='bo')
df.verify.plot(style='gv')
df.genchallenge.plot(style='r^')
df.genproof.plot(style='c>')
df.encode.plot(style='m<')
plt.legend(loc='upper left')
plt.savefig('timings_SWPoR_all.png')
|
from google_api_python_tools.dataproc.job import DataProcJob
from google_api_python_tools.dataproc.operation import DataProcOperation
from google_api_python_tools.dataproc.constants import DataprocImageVersion
class DataProcCluster(object):
class ClusterJob(object):
def __init__(self, cluster):
self.cluster = cluster
def get_last_state(self):
return self.translate_state(self.cluster.description['status']['state'])
def get_details_message(self):
status = self.cluster.description['status']
return status['detail'] if 'detail' in status else None
def get_current_state(self):
self.cluster.update()
return self.get_last_state()
def translate_state(self, state):
raise NotImplementedError
class StartJob(ClusterJob):
translations = {
"UNKNOWN": "ERROR",
"CREATING": "RUNNING",
"RUNNING": "DONE",
"ERROR": "ERROR",
"DELETING": "ERROR",
"UPDATING": "RUNNING"
}
@classmethod
def from_id(cls, client, project, name, region="global"):
return DataProcCluster.StartJob(DataProcCluster(client, project, name, region).update())
def translate_state(self, state):
return self.translations[state]
DEFAULT_SCOPES = ['https://www.googleapis.com/auth/monitoring',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/logging.admin']
def __init__(self, client, project, name, region):
self.client = client
self.project = project
self.name = name
self.region = region
self.description = None
def update(self):
self.description = self.client.execute(
lambda x: x.projects().regions().clusters().get(projectId=self.project,
clusterName=self.name,
region=self.region))
return self
def create(self, master_machine_type, worker_machine_type, worker_instance_count,
image=DataprocImageVersion.V_1_0, zone="europe-west1-b", network="default", scopes=None, uid=None):
master_configuration = {
"numInstances": 1,
"machineTypeUri": "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/machineTypes/%s" % (
self.project, zone, master_machine_type),
"diskConfig": {},
"isPreemptible": False
}
worker_configuration = {
"numInstances": worker_instance_count,
"machineTypeUri": "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/machineTypes/%s" % (
self.project, zone, worker_machine_type),
"diskConfig": {},
"isPreemptible": False
}
self.description = {
"projectId": self.project,
"clusterName": self.name,
"config": {
"gceClusterConfig": {
"zoneUri": "https://www.googleapis.com/compute/v1/projects/%s/zones/%s" % (self.project, zone),
"networkUri": "https://www.googleapis.com/compute/v1/projects/%s/global/networks/%s" %
(self.project, network),
"serviceAccountScopes": scopes or self.DEFAULT_SCOPES,
"metadata": {
"uid": uid
}
},
"masterConfig": master_configuration,
"workerConfig": worker_configuration,
"softwareConfig": {
"imageVersion": image
}
}
}
response = self.client.execute(lambda x: x.projects().regions().clusters().create(projectId=self.project,
region=self.region,
body=self.description))
return DataProcOperation(self.client, response)
def delete(self):
"""
Delete cluster
"""
response = self.client.execute(lambda x: x.projects().regions().clusters().delete(projectId=self.project,
region=self.region,
clusterName=self.name))
return DataProcOperation(self.client, response)
def exists(self):
"""
Check that cluster exists
"""
response = self.client.execute(lambda x: x.projects().regions().clusters().list(projectId=self.project,
region=self.region))
return 'clusters' in response and self.name in [b['clusterName'] for b in response['clusters']]
def get_default_logging_config(self):
return {"driverLogLevels": {"com.ocado": "INFO", "root": "FATAL", "org.apache": "INFO"}}
def submit_spark_sql_job(self, queries, job_id=None):
return self.submit_job('sparkSqlJob', {
'loggingConfig': self.get_default_logging_config(),
'queryList': {
'queries': queries
}
}, job_id)
def submit_spark_job(self, job_body, job_id=None):
if 'loggingConfig' not in job_body.keys():
job_body['loggingConfig'] = self.get_default_logging_config()
return self.submit_job('sparkJob', job_body, job_id)
def submit_job(self, job_name, job_body, job_id=None):
request_body = {
'job': {
'placement': {
'clusterName': self.name
},
job_name: job_body
}
}
if job_id:
request_body['job']['reference'] = {
'projectId': self.project,
'jobId': job_id
}
response = self.client.execute(lambda x: x.projects().regions().jobs().submit(projectId=self.project,
region=self.region,
body=request_body))
return DataProcJob(self.client, self.project, self.region, response)
|
__version__ = '1.1.51'
|
#!/usr/bin/python2.6
import os, sys
import networkx # http://networkx.lanl.gov/
import cPickle
'''Parse some html pages and build an adjacency matrix.
Written by Eric Brochu and Nando de Freitas.
Modified by Kevin Murphy, 20 Feb 2011.
'''
def parseFiles(folder):
'''Make a dictionary, keys are filenames, value is list of files that are pointed to'''
fnames = os.listdir(folder)
links = {}
for file in fnames:
links[file] = []
filename = os.path.join(folder, file)
print 'processing ', filename
f = open(filename, 'r')
for line in f.readlines():
while True:
p = line.partition('<a href="http://')[2]
if p=='':
break
(url, _, line) = p.partition('\">')
links[file].append(url)
print "file %s links to %s" % (file, url)
f.close()
return links
def mkGraph(mydict):
'''Convert dictionary into weighted digraph'''
DG = networkx.DiGraph()
DG.add_nodes_from(mydict.keys())
edges = []
for key, values in mydict.iteritems():
eweight = {}
# for each node on our list of values, increment a counter
for v in values:
if v in eweight:
eweight[v] += 1
else:
eweight[v] = 1
# for each unique target we connect to, create a weighted edge
for succ, weight in eweight.iteritems():
edges.append([key, succ, {'weight':weight}])
DG.add_edges_from(edges)
return DG
def plotGraph(DG):
'''Visualize network'''
pmtkFigureFolder = os.environ['PMTKFIGUREFOLDER']
import matplotlib.pyplot as plt
plt.figure(figsize=(9,9))
pos=networkx.spring_layout(DG,iterations=10)
#networkx.draw(DG,pos,node_size=0,alpha=0.4,edge_color='r', font_size=16)
networkx.draw_circular(DG)
plt.savefig(os.path.join(pmktFigureFolder, "link_graph.pdf"))
plt.show()
#def pmtkInit():
# pmtkFolder = os.environ['PMTKPYTHONHOME']
# execfile(os.path.join(pmtkFolder, 'pmtk3PythonInit.py'))
def DGtoAdjMat(DG)
NX = DG.nnodes()
fnames = DG.nodes()
T = matrix(numpy.zeros((NX, NX)))
# Map from names to numbers
f2i = dict((fn, i) for i, fn in enumerate(fnames))
for predecessor, successors in DG.adj.iteritems():
for s, edata in successors.iteritems():
T[f2i[predecessor], f2i[s]] = edata['weight']
return T
def main():
#pmtkInit()
pmtkDataFolder = os.environ['PMTKDATAFOLDER']
mydict = parseFiles(os.path.join(pmtkDataFolder, 'smallWeb'))
fnames = mydict.keys()
DG = mkGraph(mydict)
plotGraph(DG)
#pmtkTmpFolder = os.environ['PMTKTMPFOLDER']
# Save file
tmpName = os.path.join(pmtkDataFolder, 'smallWeb', 'DG.pkl')
cPickle.dump(DG, open(tmpName, 'w'))
# DG = cPickle.load(fname)
DGtoAdjMat(DG)
if __name__ == '__main__':
main()
|
from typing import Any
import cv2
from .image_binary import ImageBinary
class ImageGray:
def __init__(self, image_bytes) -> None:
self.image_bytes = image_bytes
def apply_clahe(self, clip_limit, tile_size) -> Any:
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_size)
return ImageGray(clahe.apply(self.image_bytes))
def to_binary(
self,
) -> ImageBinary:
_, image_binary = cv2.threshold(
self.image_bytes, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU
)
return ImageBinary(image_binary)
|
# -*- coding: utf-8 -*-
__all__ = ['Reverse', 'UrlBuildingError']
from .url import URL
from .url_templates import UrlBuildingError
from ..utils import cached_property
class Location(object):
'''
Class representing an endpoint in the reverse url map.
'''
# XXX For backward compatibility, some subclasses override constructor
fragment_builder = None
def __init__(self, *builders, **kwargs):
self.builders = list(builders)
self.subdomains = kwargs.get('subdomains', [])
self.fragment_builder = kwargs.get('fragment_builder', None)
@property
def all_builders(self):
builders = self.builders
if self.fragment_builder is not None:
# attention! not +=
builders = builders + [self.fragment_builder]
return builders
@property
def need_arguments(self):
for b in self.all_builders:
if b._url_params:
return True
return False
def build_path(self, reverse, **kwargs):
result = []
for b in self.builders:
result.append(b(**kwargs))
return ''.join(result)
def build_subdomians(self, reverse):
subdomains = [getattr(x, 'primary', x)
for x in self.subdomains
if getattr(x, 'primary', x)]
return u'.'.join(subdomains)
def build_fragment(self, reverse, **kwargs):
if self.fragment_builder is None:
return None
return self.fragment_builder(**kwargs)
@property
def url_arguments(self):
result = set()
for builder in self.all_builders:
result |= set(builder._url_params)
return result
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self.builders == other.builders and \
self.subdomains == other.subdomains and \
self.fragment_builder == other.fragment_builder
def __repr__(self):
args = '*{!r}'.format(self.builders)
if self.subdomains:
args += ', subdomains={!r}'.format(self.subdomains)
if self.fragment_builder:
args += ', fragment={!r}'.format(self.fragment_builder)
return '{}({})'.format(self.__class__.__name__, args)
class Reverse(object):
'''
Object incapsulating reverse url map and methods needed to build urls
by their names, namespaces and parameters.
Usually an instance of `Reverse` can be found in `env.root`.
'''
def __init__(self, scope, location=None, path='', host='',
ready=False, need_arguments=False, bound_env=None, parent=None,
finalize_params=None, pending_args=None, fragment=None):
# location is stuff containing builders for current reverse step
# (builds url part for particular namespace or endpoint)
self._location = location
# scope is a dict having nested namespace and endpoint names as key and
# (location, nested scope) tuple as values for the current namespace
self._scope = scope
self._path = path
self._host = host
self._fragment = fragment
# ready means that self._location path and subdomain have been already
# added to self._path and self._host
self._ready = ready
# in the case it is endpoint and
# default rule accepts arguments, it is still callable
self._callable = not ready or (
'' in scope and scope[''][0].need_arguments)
self._need_arguments = need_arguments
self._is_endpoint = (not self._scope) or ('' in self._scope)
self._is_scope = bool(self._scope)
self._bound_env = bound_env
self._parent = parent
self._finalize_params = finalize_params or {}
self._pending_args = pending_args or {}
def _attach_subdomain(self, host, location):
subdomain = location.build_subdomians(self)
if not host:
return subdomain
if subdomain:
return subdomain + '.' + host
return host
def __call__(self, **kwargs):
'''
Get a copy of the `Reverse` but with same namespace and same url name,
but with arguments attached.
'''
if not self._callable:
raise UrlBuildingError('Endpoint do not accept arguments')
if self._finalize_params:
raise UrlBuildingError('Calling a reverse object multiple times is not allowed')
if self._is_endpoint or self._need_arguments:
finalize_params = {}
path, host = self._path, self._host
location = self._location
fragment = self._fragment
if location and not self._ready:
kwargs.update(self._pending_args)
host = self._attach_subdomain(host, location)
path += location.build_path(self, **kwargs)
# a little bit verbose to ease test coverage usage
loc_fragment = location.build_fragment(self, **kwargs)
if loc_fragment is not None:
fragment = loc_fragment
if '' in self._scope:
finalize_params = kwargs
return self.__class__(self._scope, location, path=path, host=host,
fragment=fragment,
bound_env=self._bound_env,
ready=self._is_endpoint,
parent=self._parent,
finalize_params=finalize_params)
# XXX is this code reachable?
raise UrlBuildingError('Not an endpoint {}'.format(repr(self)))
def __getattr__(self, name):
'''
Get subreverse, a reverse in current namespace with the name, equal
to the attribute name::
env.root.index # getattr(env.root, 'index')
'''
if self._is_scope and name in self._scope:
if self._need_arguments:
return getattr(self(), name)
location, scope = self._scope[name]
path = self._path
host = self._host
fragment = self._fragment
ready = not location.need_arguments
if ready:
path += location.build_path(self)
loc_fragment = location.build_fragment(self)
if loc_fragment is not None:
fragment = loc_fragment
host = self._attach_subdomain(host, location)
pending_args = dict(self._finalize_params)
return self.__class__(scope, location, path, host, ready,
fragment=fragment,
bound_env=self._bound_env,
parent=self,
need_arguments=location.need_arguments,
pending_args=pending_args)
raise UrlBuildingError('Namespace or endpoint "{}" does not exist'
' in {!r}'.format(name, self))
def _finalize(self):
# deferred build of the last part of url for endpoints that
# also have nested scopes
# i.e. finalization of __call__ for as_url
if self._need_arguments:
self = self()
path, host = self._path, self._host
location = self._scope[''][0]
host = self._attach_subdomain(host, location)
path += location.build_path(self, **self._finalize_params)
loc_fragment = location.build_fragment(self, **self._finalize_params)
if loc_fragment is not None:
fragment = loc_fragment
else:
fragment = self._fragment
return self.__class__({}, self._location, path=path, host=host,
fragment=fragment,
bound_env=self._bound_env,
parent=self._parent,
ready=self._is_endpoint)
@cached_property
def url_arguments(self):
args = set()
if self._is_endpoint or self._need_arguments:
if self._location:
args |= self._location.url_arguments
if self._is_endpoint and self._scope:
args |= self._scope[''][0].url_arguments
return args
def _build_url_silent(self, _name, **kwargs):
subreverse = self
used_args = set()
for part in _name.split('.'):
if not subreverse._ready and subreverse._need_arguments:
used_args |= subreverse.url_arguments
subreverse = subreverse(**kwargs)
subreverse = getattr(subreverse, part)
if not subreverse._ready and subreverse._is_endpoint:
used_args |= subreverse.url_arguments
subreverse = subreverse(**kwargs)
return used_args, subreverse
def build_subreverse(self, _name, **kwargs):
'''
String-based reverse API. Returns subreverse object::
env.root.build_subreverse('user', user_id=1).profile
'''
_, subreverse = self._build_url_silent(_name, **kwargs)
return subreverse
def build_url(self, _name, **kwargs):
'''
String-based reverse API. Returns URL object::
env.root.build_url('user.profile', user_id=1)
Checks that all necessary arguments are provided and all
provided arguments are used.
'''
used_args, subreverse = self._build_url_silent(_name, **kwargs)
if set(kwargs).difference(used_args):
raise UrlBuildingError(
'Not all arguments are used during URL building: {}'\
.format(', '.join(set(kwargs).difference(used_args))))
return subreverse.as_url
@property
def as_url(self):
'''
Reverse object converted to `web.URL`.
If Reverse is bound to env:
* try to build relative URL,
* use current domain name, port and scheme as default
'''
if '' in self._scope:
return self._finalize().as_url
if not self._is_endpoint:
raise UrlBuildingError('Not an endpoint {}'.format(repr(self)))
if self._ready:
path, host = self._path, self._host
else:
return self().as_url
# XXX there is a little mess with `domain` and `host` terms
if ':' in host:
domain, port = host.split(':')
else:
domain = host
port = None
if self._bound_env:
request = self._bound_env.request
scheme_port = {'http': '80',
'https': '443'}.get(request.scheme, '80')
# Domain to compare with the result of build.
# If both values are equal, domain part can be hidden from result.
# Take it from route_state, not from env.request, because
# route_state contains domain values with aliased replaced by their
# primary value
primary_domain = self._bound_env._route_state.primary_domain
host_split = request.host.split(':')
request_domain = host_split[0]
request_port = host_split[1] if len(host_split) > 1 else scheme_port
port = port or request_port
return URL(path, host=domain or request_domain,
port=port if port != scheme_port else None,
scheme=request.scheme, fragment=self._fragment,
show_host=host and (domain != primary_domain \
or port != request_port))
return URL(path, host=domain, port=port,
fragment=self._fragment, show_host=True)
def __str__(self):
'''URLencoded representation of the URL'''
return str(self.as_url)
@classmethod
def from_handler(cls, handler):
'''
Get unbound instance of the class related to given handler::
app = web.cases(..)
Reverse.from_handler(app)
'''
return cls(handler._locations())
def bind_to_env(self, bound_env):
'''
Get a copy of the reverse, bound to `env` object.
Can be found in env.root attribute::
# done in iktomi.web.app.Application
env.root = Reverse.from_handler(app).bind_to_env(env)
'''
return self.__class__(self._scope, self._location,
path=self._path, host=self._host,
fragment=self._fragment,
ready=self._ready,
need_arguments=self._need_arguments,
finalize_params=self._finalize_params,
parent=self._parent,
bound_env=bound_env)
def __repr__(self):
return '{}(path=\'{}\', host=\'{}\')'.format(
self.__class__.__name__, self._path, self._host)
|
import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4])
plt.ylabel('some numbers')
plt.show()
|
import sys, datetime
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QTableWidget, QTableWidgetItem
from PyQt5.QtCore import QStringListModel
from PyQt5.QtGui import QIcon, QStandardItemModel
from PyQt5 import uic
form_class = uic.loadUiType("GUI/Qt/MoreInfo.ui")[0]
'''
변수명 기능 위젯 종류
more_info 상세정보 + 종목명 QLabel
type_fi_stat: 재무제표 중류 선택 QComboBox
stat_tab 재무제표 상세항목이 담긴 표를 표시함 QTabWidget
tab_comprehensive_income_statement: 포괄손익계산서 탭 QWidget
tab_financial_statement: 재무상태표 탭 QWidget
tab_cash_flow_statement: 현금흐름표 탭 QWidget
table_comprehensive_income_statement: 포괄손익계산서 테이블
table_financial_statement: 재무상태표 테이블
table_cash_flow_statement: 포괄손익계산서 테이블
month_stat: 연간 라디오버튼
year_stat: 분기 라디오버튼
search: 검색 버튼 QPushButton
menubar: 메뉴바 QMenuBar
menu_help: '도움말'메뉴 QComboBox
menu_file: '파일'메뉴 QTableWidget
statusBar: 상태표시줄 QStatusBar
'''
class InfoWindow(QMainWindow, form_class):
def __init__(self, parent=None, code=None):
super(InfoWindow, self).__init__(parent)
self.setupUi(self)
if code == None:
QMessageBox.critical(self, 'Warning', "종목정보를 가져올 수 없습니다! 다시 시도해주세요!", QMessageBox.Ok)
self.close()
self.more_info.setText("상세정보 항목("+code+")")
self.setWindowIcon(QIcon("Images/favicon.png"))
self.setFixedSize(self.size())
self.search_stat_event()
self.search.clicked.connect(self.search_stat_event)
# Events
def search_stat_event(self):
# type1: 분기-0, 연도-1
# type2: 주재무제표, IFRS(연결)-0, IFRS(별도)-1, GAAP(연결)-2, GAAP(별도)-3
if self.month_stat.isChecked() == True:
type1 = 0
elif self.year_stat.isChecked() == True:
type1 = 1
else:
type1 = 1
if self.type_fi_stat.currentText() == "주재무제표" or self.stat_tab.currentText() == "IFRS(연결)":
type2 = 0
elif self.type_fi_stat.currentText() == "IFRS(별도)":
type2 = 1
elif self.type_fi_stat.currentText() == "GAAP(연결)":
type2 = 2
elif self.type_fi_stat.currentText() == "GAAP(별도)":
type2 = 3
else:
type2 = 0
# 이제 여기서 주어진 조건에 맞게 테이블header 세팅하는 메소드와 연결
print(type1, type2)
# Setting Table
def table_year_setting(self):
now = datetime.date.today().year
column_header = [str(now-4)+"/12 (IFRS연결)", str(now-3)+"/12 (IFRS연결)", str(now-2)+"/12 (IFRS연결)", str(now-1)+"/12 (IFRS연결)", str(now)+"/12 (IFRS연결)"]
self.table_comprehensive_income_statement.setHorizontalHeaderLabels(column_header)
self.table_financial_statement.setHorizontalHeaderLabels(column_header)
self.table_cash_flow_statement.setHorizontalHeaderLabels(column_header)
def table_month_setting(self):
pass
if __name__ == "__main__":
app = QApplication(sys.argv)
myWindow = InfoWindow(code="삼성전자(005930)")
myWindow.show()
sys.exit(app.exec_())
|
from .cllexer import COOL_LEXER
from .cllexer import tokens as COOL_TOKENS
|
import threading
import unittest
from peewee import *
from playhouse.kv import PickledKeyStore
from playhouse.kv import KeyStore
class KeyStoreTestCase(unittest.TestCase):
def setUp(self):
self.kv = KeyStore(CharField())
self.ordered_kv = KeyStore(CharField(), ordered=True)
self.pickled_kv = PickledKeyStore(ordered=True)
self.kv.clear()
def test_storage(self):
self.kv['a'] = 'A'
self.kv['b'] = 1
self.assertEqual(self.kv['a'], 'A')
self.assertEqual(self.kv['b'], '1')
self.assertRaises(KeyError, self.kv.__getitem__, 'c')
del(self.kv['a'])
self.assertRaises(KeyError, self.kv.__getitem__, 'a')
self.kv['a'] = 'A'
self.kv['c'] = 'C'
self.assertEqual(self.kv[self.kv.key << ('a', 'c')], ['A', 'C'])
self.kv[self.kv.key << ('a', 'c')] = 'X'
self.assertEqual(self.kv['a'], 'X')
self.assertEqual(self.kv['b'], '1')
self.assertEqual(self.kv['c'], 'X')
del(self.kv[self.kv.key << ('a', 'c')])
self.assertRaises(KeyError, self.kv.__getitem__, 'a')
self.assertRaises(KeyError, self.kv.__getitem__, 'c')
self.assertEqual(self.kv['b'], '1')
self.pickled_kv['a'] = 'A'
self.pickled_kv['b'] = 1.1
self.assertEqual(self.pickled_kv['a'], 'A')
self.assertEqual(self.pickled_kv['b'], 1.1)
def test_container_properties(self):
self.kv['x'] = 'X'
self.kv['y'] = 'Y'
self.assertEqual(len(self.kv), 2)
self.assertTrue('x' in self.kv)
self.assertFalse('a' in self.kv)
def test_dict_methods(self):
for kv in (self.ordered_kv, self.pickled_kv):
kv['a'] = 'A'
kv['c'] = 'C'
kv['b'] = 'B'
self.assertEqual(list(kv.keys()), ['a', 'b', 'c'])
self.assertEqual(list(kv.values()), ['A', 'B', 'C'])
self.assertEqual(list(kv.items()), [
('a', 'A'),
('b', 'B'),
('c', 'C'),
])
def test_iteration(self):
for kv in (self.ordered_kv, self.pickled_kv):
kv['a'] = 'A'
kv['c'] = 'C'
kv['b'] = 'B'
items = list(kv)
self.assertEqual(items, [
('a', 'A'),
('b', 'B'),
('c', 'C'),
])
def test_shared_mem(self):
self.kv['a'] = 'xxx'
self.assertEqual(self.ordered_kv['a'], 'xxx')
def set_k():
kv_t = KeyStore(CharField())
kv_t['b'] = 'yyy'
t = threading.Thread(target=set_k)
t.start()
t.join()
self.assertEqual(self.kv['b'], 'yyy')
def test_get(self):
self.kv['a'] = 'A'
self.kv['b'] = 'B'
self.assertEqual(self.kv.get('a'), 'A')
self.assertEqual(self.kv.get('x'), None)
self.assertEqual(self.kv.get('x', 'y'), 'y')
self.assertEqual(
list(self.kv.get(self.kv.key << ('a', 'b'))),
['A', 'B'])
self.assertEqual(
list(self.kv.get(self.kv.key << ('x', 'y'))),
[])
def test_pop(self):
self.ordered_kv['a'] = 'A'
self.ordered_kv['b'] = 'B'
self.ordered_kv['c'] = 'C'
self.assertEqual(self.ordered_kv.pop('a'), 'A')
self.assertEqual(list(self.ordered_kv.keys()), ['b', 'c'])
self.assertRaises(KeyError, self.ordered_kv.pop, 'x')
self.assertEqual(self.ordered_kv.pop('x', 'y'), 'y')
self.assertEqual(
list(self.ordered_kv.pop(self.ordered_kv.key << ['b', 'c'])),
['B', 'C'])
self.assertEqual(list(self.ordered_kv.keys()), [])
try:
import psycopg2
except ImportError:
psycopg2 = None
if psycopg2 is not None:
db = PostgresqlDatabase('peewee_test')
class PostgresqlKeyStoreTestCase(unittest.TestCase):
def setUp(self):
self.kv = KeyStore(CharField(), ordered=True, database=db)
self.kv.clear()
def test_non_native_upsert(self):
self.kv['a'] = 'A'
self.kv['b'] = 'B'
self.assertEqual(self.kv['a'], 'A')
self.kv['a'] = 'C'
self.assertEqual(self.kv['a'], 'C')
|
# Generated by Django 2.2.11 on 2020-03-24 09:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wiki', '0003_auto_20200324_0906'),
]
operations = [
migrations.AlterModelOptions(
name='wikientry',
options={'ordering': ['row', 'column', 'timestamp'], 'verbose_name': 'WikiEntry', 'verbose_name_plural': 'WikiEntries'},
),
migrations.RemoveField(
model_name='wikientry',
name='position',
),
migrations.AddField(
model_name='wikientry',
name='timestamp',
field=models.DateTimeField(blank=True, null=True),
),
]
|
"""
Base class for OceanMonkey commands
"""
import abc
class MonkeyCommand(abc.ABC):
@abc.abstractmethod
def execute(self):
""" """
@abc.abstractmethod
def print_help(self):
""""""
CommandType = {
"startproject": 1,
"run": 2
}
class Commands:
STARTPROJECT = CommandType["startproject"]
RUN = CommandType["run"]
|
import xml.etree.ElementTree as ET
import urllib.request
import sys
import os
import time
from datetime import datetime
import sys
rss_feeds = []
assert len(sys.argv) <= 3, "Too many arguments"
if len(sys.argv) == 1:
rss_path = "links.txt"
else:
rss_path = sys.argv[1]
if len(sys.argv) <= 2:
res_folder = "res"
else:
res_folder = sys.argv[2]
cache_path = res_folder + "/cache.txt"
cache_expiry = 604800 # One week in seconds
cache = []
links = []
current_time = int(time.time())
current_date = datetime.today().strftime('%Y-%m-%d')
headers = {"user-agent": "Mozilla/5.0"}
warning = lambda text: print("--> Warning:",text)
error = lambda errtype,text: print("--> ERROR ("+ errtype +"):",text)
def load_rss_feeds ():
with open (rss_path,"r") as rss_list_file:
for line in rss_list_file.read().split("\n"):
line = line.rstrip()
if len(line) > 1 and line[0] != "#":
arr = line.split("\t")
if len(arr) == 1: rss_feeds.append([None,arr[0]])
elif len(arr) == 2: rss_feeds.append(arr)
else: raise ValueError ("Too many tabs in line that contains \"" + line + "\"")
def load_cache ():
if os.path.isdir(res_folder):
if os.path.isfile(cache_path):
with open(cache_path,"r") as cache_file:
for line in cache_file.read().split("\n")[2:]:
line_list = line.split("\t")
if len(line_list) == 2:
line_list[0] = int(line_list[0])
if current_time - line_list[0] < cache_expiry:
cache.append(line_list)
links.append(line_list[1])
else:
os.mkdir(res_folder)
def unload_cache ():
cache_string = "# This is a file that stores links to all articles accessed for a week to avoid repetition\n"
cache_string += "# Please do not edit this file in any way\n"
if not os.path.isdir(res_folder): os.mkdir(res_folder)
for time,link in cache: cache_string += str(time) + "\t" + link + "\n"
with open(cache_path,"w") as cache_file:
cache_file.write(cache_string)
def access_page (url,content_type_wanted,werr = False):
req = urllib.request.Request(url, headers = headers)
try:
with urllib.request.urlopen(req) as page:
if page.status < 400:
raw_text = page.read()
content_type = page.getheader("Content-Type")
if content_type_wanted not in content_type:
if not werr: warning("Warning: Webpage may not be properly formatted")
else: return None
else:
error("HTTP Error",page.status)
return None
except urllib.error.URLError as e:
error ("URLError",e.reason)
return None
return raw_text
def file_title (title, feed_title):
# Characters not allowed in Windows filenames
replace = [ ("/"," "), ("\\"," "), (">"," "), ('"',"'"), (":","- "),
("*"," "), ("|","- "), ("<"," "), ("?","") ]
for old,new in replace:
title = title.replace(old,new)
folder = res_folder + "/" + current_date + "/" + feed_title
if not os.path.isdir(folder): os.makedirs(folder)
return folder + "/" + title + ".html"
def do_item (item,feed_title):
title = item.find("title")
link = item.find("link")
if not (title is None or link is None) and link.text not in links:
print("Saving Article:",title.text)
text = access_page(link.text,"text/html", werr = True)
cache.append([current_time,link.text])
links.append(link.text)
if text is not None:
with open(file_title(title.text,feed_title),"wb") as file:
file.write(text)
return True
return False
def find_links (root, feed_url, title):
if title is None:
title = root.find("title")
if title is None: title = feed_url
else: title = title.text
print("Getting articles from",title)
count = 0
for item in root.findall("item"):
count += int(do_item(item,title))
print (count,"new articles fetched in \n")
return count
# MAIN
load_rss_feeds()
load_cache()
for feed_title, feed_url in rss_feeds:
if feed_title is None: print("Fetching RSS feed",feed_url)
else: print("Fetching RSS feed",feed_title)
raw_text = access_page(feed_url,"text/xml")
if raw_text is not None:
find_links(ET.fromstring(raw_text).find("channel"),feed_url,feed_title)
unload_cache()
|
import requests
params = (
('include_profile_interstitial_type', '1'),
('include_blocking', '1'),
('include_blocked_by', '1'),
('include_followed_by', '1'),
('include_want_retweets', '1'),
('include_mute_edge', '1'),
('include_can_dm', '1'),
('include_can_media_tag', '1'),
('skip_status', '1'),
('cards_platform', 'Web-12'),
('include_cards', '1'),
('include_ext_alt_text', 'true'),
('include_quote_count', 'true'),
('include_reply_count', '1'),
('tweet_mode', 'extended'),
('include_entities', 'true'),
('include_user_entities', 'true'),
('include_ext_media_color', 'true'),
('include_ext_media_availability', 'true'),
('send_error_codes', 'true'),
('simple_quoted_tweet', 'true'),
('q', '#MCF'),
('count', '20'),
('query_source', 'hashtag_click'),
('cursor', ''),
('pc', '1'),
('spelling_corrections', '1'),
('ext', 'mediaStats,highlightedLabel'),
)
response = requests.get('https://api.twitter.com/2/search/adaptive.json', headers=headers, params=params)
#NB. Original query string below. It seems impossible to parse and
#reproduce query strings 100% accurately so the one below is given
#in case the reproduced version is not "correct".
# response = requests.get('https://api.twitter.com/2/search/adaptive.json?include_profile_interstitial_type=1&include_blocking=1&include_blocked_by=1&include_followed_by=1&include_want_retweets=1&include_mute_edge=1&include_can_dm=1&include_can_media_tag=1&skip_status=1&cards_platform=Web-12&include_cards=1&include_ext_alt_text=true&include_quote_count=true&include_reply_count=1&tweet_mode=extended&include_entities=true&include_user_entities=true&include_ext_media_color=true&include_ext_media_availability=true&send_error_codes=true&simple_quoted_tweet=true&q=%23MCF&count=20&query_source=hashtag_click&cursor=scroll%3AthGAVUV0VFVBYBFoCs09Sk-aeYIxIYzAESY8LrAAAB9D-AYk3S8an8AAAAFxJxrIQXlhABEgXlF9ZWkAoSWDcN-5dQABIke-nPVrAFEdbw8z5WkAURuGh64leQABJAkzkAFqABEk9pzBZXEAESK-khM9fQBRHrvGzAVgABEbMS6F3X0AASMbSavxeQAQ-XX7DY1-AAEm3UMrrWMAAR4DtVI1YAABJtl9bX1xABEgVsJx6XYAER9YF9gZeABRHi9NCD12ACEm3EWUiXMAASdCSN2FbwARI0C8qVFpAJEeU-QoNU0AMlABUAJQARFciFehWAiXoYB0RFRkFVTFQVABUAFS4VABUAAA%3D%3D&pc=1&spelling_corrections=1&ext=mediaStats%2ChighlightedLabel', headers=headers)
print(response.content)
#NB. Original query string below. It seems impossible to parse and
#reproduce query strings 100% accurately so the one below is given
#in case the reproduced version is not "correct".
# response = requests.get('https://api.twitter.com/2/search/adaptive.json?include_profile_interstitial_type=1&include_blocking=1&include_blocked_by=1&include_followed_by=1&include_want_retweets=1&include_mute_edge=1&include_can_dm=1&include_can_media_tag=1&skip_status=1&cards_platform=Web-12&include_cards=1&include_ext_alt_text=true&include_quote_count=true&include_reply_count=1&tweet_mode=extended&include_entities=true&include_user_entities=true&include_ext_media_color=true&include_ext_media_availability=true&send_error_codes=true&simple_quoted_tweet=true&q=%23MCF&count=20&query_source=hashtag_click&pc=1&spelling_corrections=1&ext=mediaStats%2ChighlightedLabel', headers=headers)
|
#!/usr/bin/env python3
#
# This example shows how to run a combined fluid-kinetic simulation with
# with both the hot-tail and runaway electron grids.
#
# Run as
#
# $ ./basic.py
# $ ../../build/iface/dreami dream_settings.h5
#
# ###################################################################
import numpy as np
import sys
sys.path.append('../../py/')
from DREAM.DREAMSettings import DREAMSettings
import DREAM.Settings.Equations.IonSpecies as Ions
import DREAM.Settings.Solver as Solver
import DREAM.Settings.CollisionHandler as Collisions
import DREAM.Settings.Equations.DistributionFunction as DistFunc
import DREAM.Settings.Equations.RunawayElectrons as Runaways
import DREAM.Settings.TransportSettings as Transport
ds = DREAMSettings()
E = 0.6 # Electric field strength (V/m)
n = 5e19 # Electron density (m^-3)
T = 1e3 # Temperature (eV)
pstar=0.5
Nt = 3
Nr = 11; a0=0.22
Np = 60
Nxi= 45
t_data = np.linspace(0,1e-2,Nt)
r_data = np.linspace(0,a0,Nr)
p_data = np.linspace(0.0,1.5,Np)
xi_data = np.linspace(-1.0,1.0,Nxi)
Ar = 1.0 * np.ones((Nt,Nr,Nxi,Np));
Drr = 1.0e-2 * np.ones((Nt,Nr,Nxi,Np))
## Tests with differently set coefficients.
Ar[:,r_data<0.05,:,:] = 0.0
Drr[:,r_data<0.05,:,:] = 0.0
# Enable runaways
re_enabled = True
# Set E_field
ds.eqsys.E_field.setPrescribedData(E)
# Set temperature
ds.eqsys.T_cold.setPrescribedData(T)
# Set ions
ds.eqsys.n_i.addIon(name='D', Z=1, iontype=Ions.IONS_PRESCRIBED_FULLY_IONIZED, n=n)
# Disable hot-tail grid
ds.hottailgrid.setEnabled(False)
# Set initial hot electron Maxwellian
ds.eqsys.f_hot.setInitialProfiles(n0=2*n, T0=T)
# Set up momentum grid
ds.hottailgrid.setNp(15)
ds.hottailgrid.setNxi(5)
ds.hottailgrid.setPmax(1.5)
#ds.collisions.collfreq_mode = Collisions.COLLFREQ_MODE_ULTRA_RELATIVISTIC
#ds.collisions.collfreq_mode = Collisions.COLLFREQ_MODE_SUPERTHERMAL
# Include Dreicer and avalanche
ds.eqsys.n_re.setAvalanche(Runaways.AVALANCHE_MODE_FLUID)
ds.eqsys.n_re.setDreicer(Runaways.DREICER_RATE_NEURAL_NETWORK)
# Disable runaway grid
pmax_re = 0.5
ds.runawaygrid.setEnabled(False)
# Set up radial grid
ds.radialgrid.setB0(5)
ds.radialgrid.setMinorRadius(a0)
ds.radialgrid.setNr(50)
ds.radialgrid.setWallRadius(a0*1.1)
# Set Svensson transport coefficients
ds.eqsys.n_re.transport.setSvenssonPstar(pstar)
ds.eqsys.n_re.transport.setSvenssonInterp1dParam(Transport.SVENSSON_INTERP1D_PARAM_IP)
ds.eqsys.n_re.transport.setSvenssonAdvection(Ar ,t=t_data,r=r_data,p=p_data,xi=xi_data)
ds.eqsys.n_re.transport.setSvenssonDiffusion(Drr,t=t_data,r=r_data,p=p_data,xi=xi_data,
#interp3d=Transport.INTERP3D_NEAREST,
interp1d=Transport.INTERP1D_LINEAR)
# Use the linear solver
#ds.solver.setType(Solver.LINEAR_IMPLICIT)
ds.solver.setType(Solver.NONLINEAR)
ds.solver.setVerbose(False)
ds.other.include('fluid')
# Set time stepper
ds.timestep.setTmax(1e-3)
ds.timestep.setNt(500)
# Save settings to HDF5 file
ds.save('dream_settings.h5')
print()
print("Done!")
|
from moeda import *
from dado import *
preco = leia_dinheiro('Digite o preço: R$')
resumo(preco, 35, 22)
|
# Generated by Django 2.0.5 on 2018-06-07 07:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hrmsapp', '0009_auto_20180606_0959'),
]
operations = [
migrations.RemoveField(
model_name='educational_info',
name='add_skill',
),
migrations.AddField(
model_name='auth_user_extended',
name='add_skill',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
# vim: ts=4:sw=4:expandtabs
__authors__ = "Zach Mott, David Fox, Jason Dunn"
from django.views import generic
from django.shortcuts import redirect
from django.utils.text import slugify
from quizard.forms import AssignmentSearchForm
from NavLocationMixin import NavLocationMixin
class Index(NavLocationMixin, generic.FormView):
location = 'index'
form_class = AssignmentSearchForm
template_name = 'quizard/index.html'
def form_valid(self, form):
code = form.cleaned_data.get('code', None)
school = form.cleaned_data.get('school', None)
teacher = form.cleaned_data.get('teacher', None)
if code:
return redirect('assignment', code=code)
elif school and teacher:
return redirect('assignments', school=slugify(school), teacher=teacher.lower())
else:
return redirect('index')
|
"""
Python library to fetch trending repositories/users using github-trending-api
Made by Hedy Li,
Code on GitHub
"""
from typing import Optional
import requests
def fetch_repos(
language: str = "",
spoken_language_code: str = "",
since: str = "daily",
) -> dict:
"""Fetch trending repositories on GitHub
Parameters:
language (str, optional): Filtering by language, eg: python
spoken_language_code (str, optional): The spoken language, eg: en for english
since (str, optional): The time range, choose from: [daily, weekly, monthly]. Defaults to "daily"
Returns:
A list of dicts containing information for the trending repositories found
"""
if language and not check_language(language):
raise ValueError(f"Invalid language argument: {language}")
if spoken_language_code and not check_spoken_language(spoken_language_code):
raise ValueError(
f"Invalid spoken_language_code argument: {spoken_language_code}"
)
if since and not check_since(since):
raise ValueError(
f"Invalid since argument (must be 'daily', 'weekly' or 'monthly'): {since}"
)
url: str = f"https://gtrend.yapie.me/repositories?language={language}&since={since}&spoken_language_code={spoken_language_code}"
res = requests.get(url).json()
repos = []
for repo in res:
repo["fullname"] = f"{repo['author']}/{repo['name']}"
repo_language = repo.get("language")
if language:
if not repo_language or repo_language.lower() != language.lower():
continue
repos.append(repo)
return repos
def fetch_developers(language: str = "", since: str = "daily") -> dict:
"""Fetch trending developers on GitHub
Parameters:
language (str, optional): The programming language, eg: python
since (str, optional): The time range, choose from [daily, weekly, monthly]. Defaults to "daily"
Returns:
A list of dicts containing information for the trending developers found
"""
if language and not check_language(language):
raise ValueError("Language value does not exist.")
if since and not check_since(since):
raise ValueError("Since value is not correct.")
url: str = f"https://gtrend.yapie.me/developers?language={language}&since={since}"
res = requests.get(url).json()
return res
def languages_list() -> list:
"""Fetch languages
Returns:
A list of dictionaries containing languages
"""
url: str = "https://gtrend.yapie.me/languages"
response = requests.get(url).json()
return response
def spoken_languages_list() -> list:
"""Fetch spoken languages.
Returns:
A list of spoken languages
"""
url: str = "https://gtrend.yapie.me/spoken_languages"
response = requests.get(url).json()
return response
def check_language(language: str = "") -> bool:
"""Check if the language exists.
Parameters:
language (str): The language, eg: python.
Returns:
A boolean value. True for valid language, False otherwise.
"""
languages = languages_list()
language = language.lower()
for name in languages:
if language == name["name"].lower():
return True
return False
def check_spoken_language(spoken_language_code: str = "") -> bool:
"""Check if the spoken language exists.
Parameters:
spoken_language_code (str): The spoken language, eg: en for english.
Returns:
A boolean value. True for valid spoken language, False otherwise.
"""
spoken_languages = spoken_languages_list()
spoken_language_code = spoken_language_code.lower()
for name in spoken_languages:
if spoken_language_code == name["urlParam"].lower():
return True
return False
def check_since(since: str = "") -> bool:
"""Check if the time range value is correct.
Parameters:
since (str): The time range.
Returns:
A boolean value. True for valid parameter, False otherwise.
"""
return since.lower() in ["daily", "weekly", "monthly"]
|
from django.conf.urls import include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from zentral.conf import saml2_idp_metadata_file, settings as zentral_settings
# base
urlpatterns = [
url(r'^', include('base.urls', namespace='base')),
url(r'^admin/users/', include('accounts.urls', namespace='users')),
url(r'^accounts/', include('django.contrib.auth.urls')),
]
# zentral apps
for app_name in zentral_settings.get('apps', []):
app_shortname = app_name.rsplit('.', 1)[-1]
url_module = "{}.urls".format(app_name)
try:
urlpatterns.append(url(r'^{}/'.format(app_shortname), include(url_module, namespace=app_shortname)))
except ImportError:
# TODO use ModuleNotFoundError for python >= 3.6
pass
# saml2
if saml2_idp_metadata_file:
urlpatterns.append(url(r'^saml2/', include('accounts.saml2_urls', namespace='saml2')))
# static files
urlpatterns += staticfiles_urlpatterns()
|
from owoify import Owoifator
owoifator = Owoifator()
owoifator.owoify("foldr") # Hewwo fwiend (*^ω^)
owoifator.owoify("foldl") # Hewwo fwiend (*^ω^)
owoifator.owoify("loss") # Hewwo fwiend (*^ω^)
owoifator.owoify("losses") # Hewwo fwiend (*^ω^)
owoifator.owoify("nan") # Hewwo fwiend (*^ω^)
owoifator.owoify("tf.random.stateless_gamma") # Hewwo fwiend (*^ω^)
owoifator.owoify("softmax") # Hewwo fwiend (*^ω^)
owoifator.owoify("convert_to_tensor") # Hewwo fwiend (*^ω^)
#Also remember to make an alias for MLP which is MyLittlePony
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
load("//antlir/bzl:constants.bzl", "REPO_CFG")
load("//antlir/bzl:oss_shim.bzl", "python_unittest")
load("//antlir/bzl/image/feature:new.bzl", "PRIVATE_DO_NOT_USE_feature_target_name")
TEST_IMAGE_PREFIX = "//antlir/compiler/test_images:"
def READ_MY_DOC_image_feature_target(name):
"""
DANGER: If you depend on a feature target for testing, you MUST manually
add any `feature` targets that it depends on to your `deps = []`.
If you fail to do this, Buck will not know to rebuild the the test if
one of its indirect `feature` dependencies changes. See
`feature/new.bzl` for an explanation.
"""
return PRIVATE_DO_NOT_USE_feature_target_name(name)
def image_feature_python_unittest(test_image_feature_transitive_deps, deps = None, env = None, **kwargs):
env = env or {}
env.update({
"test_image_feature_path_to_" + t: "$(location {})".format(
TEST_IMAGE_PREFIX + t,
)
for t in test_image_feature_transitive_deps
})
env["test_image_feature_built_artifacts_require_repo"] = \
str(int(REPO_CFG.artifacts_require_repo))
deps = (deps or []) + [":sample_items"]
# For now cpp_deps is raw buck deps for python_ targets
cpp_deps = [
TEST_IMAGE_PREFIX + t
for t in test_image_feature_transitive_deps
]
return python_unittest(
env = env,
deps = deps,
cpp_deps = cpp_deps,
**kwargs
)
|
import pandas as pd
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
def weights(data):
#df = pd.read_csv("complete_new.csv",parse_dates=[0], index_col=0,infer_datetime_format=True)
df = data
fund = df.iloc[0:,0:5]
mu = expected_returns.mean_historical_return(fund)
S = risk_models.sample_cov(fund)
### Method 1: Markowitz Mean-Variance Model
ef = EfficientFrontier(mu, S,weight_bounds=(0.05,0.4))
raw_weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
#print(cleaned_weights)
#ef.save_weights_to_file("weights.csv") # saves to file
#ef.portfolio_performance(verbose=True)
weights = pd.DataFrame(cleaned_weights.values(),index =cleaned_weights.keys(),columns=["weights"])
weights_T = pd.DataFrame(weights.values.T,index= weights.columns, columns = weights.index)
#print(weights_T)
### Method 2: Black-litterman Model
### Output
df = df.append(weights_T,sort=False)
#print(df[-10:])
data_index = pd.DataFrame(df.index,index = df.index)
return_data = pd.concat([data_index,df],axis = 1)
#print(return_data)
return return_data
data = pd.read_csv("complete_new.csv",parse_dates=[0], index_col=0,infer_datetime_format=True)
print(type(data.index[0]))
data = weights(data)
#print(data[-5:])
|
import sys
from telethon.network.connection.tcpabridged import ConnectionTcpAbridged
from telethon.sessions import StringSession
from ..Config import Config
from .client import udyclient
__version__ = "0.02"
loop = None
if Config.STRING_SESSION:
session = StringSession(str(Config.STRING_SESSION))
else:
session = "audynesia"
try:
udy = udyclient(
session=session,
api_id=Config.APP_ID,
api_hash=Config.API_HASH,
loop=loop,
app_version=__version__,
connection=ConnectionTcpAbridged,
auto_reconnect=True,
connection_retries=None,
)
except Exception as e:
print(f"STRING_SESSION - {str(e)}")
sys.exit()
udy.tgbot = tgbot = udyclient(
session="CodTgbot",
api_id=Config.APP_ID,
api_hash=Config.API_HASH,
loop=loop,
app_version=__version__,
connection=ConnectionTcpAbridged,
auto_reconnect=True,
connection_retries=None,
).start(bot_token=Config.TG_BOT_TOKEN)
|
from django.conf.urls import url
from django.contrib import admin
from django.views.generic.base import TemplateView
from api.views import *
from django.urls import path
from .views import movie_times
from .views import movie_list
urlpatterns = [
url(r'^admin/', admin.site.urls),
path('movietimes/', movie_times, name='movie_times'),
path('movielist/', movie_list, name='movie_list'),
]
|
import os
import shutil
import urllib
import urllib2
import json
from subprocess import call, Popen
from time import sleep
from nosuch.oscutil import *
UseLoopMIDI = False
def killtask(nm):
call(["c:/windows/system32/taskkill","/f","/im",nm])
def mmtt_action(meth):
url = 'http://127.0.0.1:4444/dojo.txt'
params = '{}'
id = '12345'
data = '{ "jsonrpc": "2.0", "method": "'+meth+'", "params": "'+params+'", "id":"'+id+'" }\n'
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
r = response.read()
j = json.loads(r)
if "result" in j:
return j["result"]
else:
print "No result in JSON response!? r="+r
return -1
call(["c:/python27/python.exe","c:/local/manifold/bin/killall.py"])
call(["c:/python27/python.exe","c:/local/manifold/bin/debugcycle.py"])
mmtt_exe = "mmtt_kinetic.exe"
mmtt_exe = "mmtt_pcx.exe"
mmtt_exe = "mmtt_depth.exe"
mmtt_depth = Popen([mmtt_exe])
sleep(2) # let it get running
while True:
if mmtt_action("align_isdone") == 1:
break
sleep(1)
print "MMTT has finished aligning."
if UseLoopMIDI:
loopmidi = Popen(["/Program Files (x86)/Tobias Erichsen/loopMIDI/loopMIDI.exe"])
bidulepatch = "\\local\\manifold\\patches\\bidule\\Palette_Alchemy_Burn.bidule"
print "loopMIDI has been started."
else:
print "NOTE: loopMIDI has NOT been started - this machine uses LoopBe30."
bidulepatch = "\\local\\manifold\\patches\\bidule\\Palette_Alchemy_LoopBe.bidule"
sleep(1)
bidule = Popen([
"C:\\Program Files\\Plogue\\Bidule\\PlogueBidule_x64.exe",
bidulepatch])
# Wait for Bidule to completely load all the Alchemy instances
sleep(50)
### patches="c:\\local\\manifold\\bin\\config\\palette"
### shutil.copy(patches+"\\default_burn.mnf",patches+"\\default.mnf")
fromconfig="c:\\local\\manifold\\bin\\config\\resolume_config.xml"
toconfig="c:\\users\\tjt\\documents\\resolume avenue 4\\preferences\\config.xml"
shutil.copy(fromconfig,toconfig)
arena = Popen(["C:\\Program Files (x86)\\Resolume Avenue 4.1.11\\Avenue.exe"])
## cd \local\python\nosuch_oscutil
global resolume
resolume = OscRecipient("127.0.0.1",7000)
# Activate the clips in Resolume.
# IMPORTANT!! The last clip activated MUST be layer1, so that the
# Osc enabling/disabling eof FFGL plugins works as intended.
sleep(12)
print "Sending OSC to activate Resolume."
resolume.sendosc("/layer2/clip1/connect",[1])
resolume.sendosc("/layer1/clip1/connect",[1])
# call(["c:/local/bin/nircmd.exe","win","settopmost","title","MMTT","1"])
# call(["c:/local/bin/nircmd.exe","win","max","title","MMTT"])
# Keep sending - Resolume might not be up yet
for i in range(5):
sleep(2)
resolume.sendosc("/layer2/clip1/connect",[1])
resolume.sendosc("/layer1/clip1/connect",[1])
call(["c:/local/bin/nircmd.exe","win","setsize","title","MMTT","250","250","500","400"])
call(["c:/local/bin/nircmd.exe","win","settopmost","title","MMTT","1"])
# call(["c:/local/bin/nircmd.exe","win","max","title","MMTT","1"])
call(["c:/local/bin/nircmd.exe","win","min","stitle","Plogue"])
print "DONE!"
|
# pylint: skip-file
"""Compute deuteron lowest energy state in 3s1 3d1 coupled channel (bound).
Computes lowest energy eigenvalue for a set of reference SRG evolved potentials
as well as the manually SRG evolved potential to show the implementation is
numerically equivalent to the reference implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from math import sqrt
import numpy as np
from numpy.linalg import eigh
# Check if module is installed, otherwise load locally
try:
import srg3d.potential as potential
import srg3d.srg as srg
except ImportError:
from context import srg3d
potential = srg3d.potential
srg = srg3d.srg
def _get_coupled_channel_hamiltonian(channels):
chan_dim = channels[0].dim
dim = int(sqrt(len(channels)))
hamiltonian = np.zeros((dim * chan_dim, dim * chan_dim))
for i in range(dim):
for j in range(dim):
chan = channels[i * dim + j]
if i == j:
chan_hamiltonian = chan.with_weights() + chan.kinetic_energy()
else:
chan_hamiltonian = chan.with_weights()
r_s = i * chan_dim
r_e = (i + 1) * chan_dim
c_s = j * chan_dim
c_e = (j + 1) * chan_dim
hamiltonian[r_s:r_e, c_s:c_e] = chan_hamiltonian
return hamiltonian
# Physical constants
hbarc = 197.327
proton_mass = 938.272
neutron_mass = 939.565
red_mass = proton_mass * neutron_mass / (proton_mass + neutron_mass)
# Load unevolved potential
chan_3s1 = potential.load(2, 3, 'EM420new', '10010', 50, 'np')
chan_3s1_3d1 = potential.load(2, 3, 'EM420new', '10210', 50, 'np')
chan_3d1_3s1 = potential.load(2, 3, 'EM420new', '12010', 50, 'np')
chan_3d1 = potential.load(2, 3, 'EM420new', '12210', 50, 'np')
# Compute reference Hamiltonian
hamiltonian_ref = _get_coupled_channel_hamiltonian([chan_3s1, chan_3s1_3d1,
chan_3d1_3s1, chan_3d1])
# Create coupled channel potential from channels
c_potential = potential.CoupledPotential([chan_3s1, chan_3s1_3d1, chan_3d1_3s1,
chan_3d1])
# Compute alternate Hamiltonian
hamiltonian_coupled = c_potential.with_weights() + c_potential.kinetic_energy()
# Compute bound state eigenvalues
ev_ref = np.amin(eigh(hamiltonian_ref)[0])
ev = np.amin(eigh(hamiltonian_coupled)[0])
# Print unevolved results
print('Unevolved')
print('E_ref = E_srg = {} MeV'.format(hbarc**2 / (2 * red_mass) * ev_ref))
print('E_alt = {} MeV\n'.format(hbarc**2 / (2 * red_mass) * ev))
# Set up SRG evolution
v_mask = np.array([[0 for _ in range(len(c_potential.nodes))] for _ in
range(len(c_potential.nodes))])
k_mask = np.array([[1 for _ in range(len(c_potential.nodes))] for _ in
range(len(c_potential.nodes))])
srg_obj = srg.SRG(c_potential, v_mask, k_mask)
# Create list of lambdas to which to evolve
lambdas = [25] + list(range(10, 4, -1)) + list(np.arange(4, 3.1, -0.5)) \
+ list(np.arange(3.0, 2.5, -0.2)) + list(np.arange(2.4, 1.38, -0.05))
num_pts = [100]*9 + [83] + [79] + [76] + [73] + [72]*2 + [71] + [70]*2 + [69] \
+ [68]*2 + [67]*2 + [66] + [65]*2 + [64]*2 + [63] + [62]*2 + [61]*2
for l, n in zip(lambdas, num_pts):
# Evolve potential
srg_obj.evolve(l, verbose=False, integrator='dopri5', atol=10**(-6),
rtol=10**(-6), nsteps=10**(5))
# Extract evolved potential
c_potential = srg_obj.get_potential()
# Reduce dimension if necessary
if n < c_potential.dim:
c_potential = c_potential.reduce_dim(n)
v_mask = np.array([[0 for _ in range(len(c_potential.nodes))] for _ in
range(len(c_potential.nodes))])
k_mask = np.array([[1 for _ in range(len(c_potential.nodes))] for _ in
range(len(c_potential.nodes))])
srg_obj.replace_potential(c_potential, v_mask, k_mask)
# Compute Hamiltonian
hamiltonian = c_potential.with_weights() + c_potential.kinetic_energy()
# Load reference potential (calculated by different code)
chan_3s1 = potential.load(2, 3, 'EM420new', '10010', l, 'np')
chan_3s1_3d1 = potential.load(2, 3, 'EM420new', '10210', l, 'np')
chan_3d1_3s1 = potential.load(2, 3, 'EM420new', '12010', l, 'np')
chan_3d1 = potential.load(2, 3, 'EM420new', '12210', l, 'np')
# Compute reference Hamiltonian
hamiltonian_ref = _get_coupled_channel_hamiltonian([chan_3s1, chan_3s1_3d1,
chan_3d1_3s1,
chan_3d1])
# Get lowest eigenvalues
ev_ref = np.amin(eigh(hamiltonian_ref)[0])
ev = np.amin(eigh(hamiltonian)[0])
# Output values
print('Lambda: {}'.format(l))
print('E_ref = {} MeV'.format(hbarc**2 / (2 * red_mass) * ev_ref))
print('E_srg = {} MeV\n'.format(hbarc**2 / (2 * red_mass) * ev))
|
from math import exp, factorial as fat
def poisson(lamb, k):
return exp(-lamb) * lamb**k / fat(k)
while (True):
print('*** Calcular Distribuição de Poisson ***')
lb = float(input('Lambda (taxa média de evento ocorrer no intervalo) .....: '))
x = input("X (número de eventos [X] ou [X¹ X²] (para intervalos) ..: ").split()
if len(x) == 1:
x1 = x2 = int(x[0])
elif len(x) == 2:
x1, x2 = sorted([int(y) for y in x])
soma = 0
for xi in range(x1, x2+1):
soma += poisson(lb, xi)
print(f'Resultado: {soma:.20f}')
print()
|
import hashlib
import json
from http.cookies import SimpleCookie
import requests
import urllib3
TIMEOUT = 5
class QuantumGatewayScanner:
def __init__(self, host, password, use_https=True):
self.verify = False
if use_https:
self.scheme = 'https'
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
else:
self.scheme = 'http'
self.host = self.scheme + '://' + host
self.password = password
self.connected_devices = {}
self.session = requests.Session()
self.success_init = self._check_auth()
def scan_devices(self):
self.connected_devices = {}
if self._check_auth():
self._get_connected_devices()
return self.connected_devices.keys()
def get_device_name(self, device):
return self.connected_devices.get(device)
def _get_connected_devices(self):
devices_raw = self.session.get(self.host + '/api/devices', timeout=TIMEOUT, verify=self.verify)
devices = json.loads(devices_raw.text)
self.connected_devices = {device['mac']: device['name'] for device in devices if device['status']}
def _check_auth(self):
res = self.session.get(self.host + '/api/devices', timeout=TIMEOUT, verify=self.verify)
if res.status_code == 200:
return True
getLogin = self.session.get(self.host + '/api/login', timeout=TIMEOUT, verify=self.verify)
salt = getLogin.json()['passwordSalt']
encodedPassword = hashlib.sha512()
encodedPassword.update((self.password + salt).encode('ascii'))
payload = json.dumps({"password": encodedPassword.hexdigest()})
postLogin = self.session.post(self.host + '/api/login', data=payload, timeout=TIMEOUT, verify=self.verify)
token = SimpleCookie(postLogin.headers.get('set-cookie'))['XSRF-TOKEN'].value
self.session.headers.update({'X-XSRF-TOKEN': token})
res = self.session.get(self.host + '/api/devices', timeout=TIMEOUT, verify=self.verify)
if res.status_code == 200:
return True
return False
def _log_out(self):
self.session.get(self.host + '/api/logout', timeout=TIMEOUT, verify=self.verify)
self.session.close()
|
#
# (C) 2014-2017 Seiji Matsuoka
# Licensed under the MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import pickle
import unittest
from kiwiii.stats import graphgen
data = {
"nodes": {"records": [
{"id": 1, "value": "a"},
{"id": 2, "value": "b"},
{"id": 3, "value": "c"}
]},
"edges": {"records": [
{"source": 1, "target": 2, "weight": 0.5},
{"source": 2, "target": 3, "weight": 0.8},
{"source": 1, "target": 3, "weight": 1}
]}
}
class TestGraphGen(unittest.TestCase):
def test_graph_loader(self):
G = graphgen.graph_loader(data, "id")
self.assertEqual(len(G), 3)
self.assertEqual(G.node[0]["value"], "a")
self.assertEqual(G.edge[1][2]["weight"], 0.8)
def test_threshold_network(self):
G = graphgen.graph_loader(data, "id")
H = graphgen.threshold_network(G, 0.6)
self.assertEqual(G.edge[0][1]["weight"], 0.5)
self.assertEqual(H.number_of_edges(), 2)
def test_load_mapping(self):
G = graphgen.graph_loader(data, "id")
mapping = {
"column": {"key": "additional"},
"key": "id",
"mapping": {1: "a2", 2: "b2", 3: "c2"}
}
graphgen.load_mapping(G, mapping)
self.assertEqual(G.node[2]["additional"], "c2")
mapping2 = {
"column": {"key": "multi"},
"key": "id",
"mapping": {1: "a,b,c", 2: "d,e,f", 3: "g,h,i"}
}
graphgen.load_multilabel_mapping(G, mapping2, delimiter=",")
self.assertEqual(G.node[2]["multi"][2], "i")
def test_group_records(self):
data2 = pickle.loads(pickle.dumps(data))
data2["nodes"]["records"].append({"id": 4, "value": "b"})
data2["edges"]["records"].append(
{"source": 3, "target": 4, "weight": 1})
G = graphgen.graph_loader(data2, "id")
rcds = graphgen.group_records(G, "value")
self.assertEqual(len(rcds), 3)
mapping = {
"column": {"key": "multi"},
"key": "id",
"mapping": {1: "a,b,c", 2: "a", 3: "a,b,d,e", 4: "c,f"}
}
graphgen.load_multilabel_mapping(G, mapping, delimiter=",")
rcds = graphgen.group_records(G, "multi")
self.assertEqual(len(rcds), 6)
def test_assign_partition(self):
data2 = pickle.loads(pickle.dumps(data))
data2["nodes"]["records"].append({"id": 4, "value": "b"})
G = graphgen.graph_loader(data2, "id")
graphgen.assign_partition(G)
self.assertEqual(G.node[3]["partition"], 1)
def test_assign_weight(self):
G = graphgen.graph_loader(data, "id")
mapping = {
"column": {"key": "multi"},
"key": "id",
"mapping": {1: "a,b,c", 2: "a", 3: "a,b,d,e"}
}
graphgen.load_multilabel_mapping(G, mapping, delimiter=",")
graphgen.assign_weight(G, "multi")
self.assertEqual(G.node[1]["weight"], 1)
self.assertEqual(G.node[2]["weight"], 0.25)
def test_random_graph(self):
G = graphgen.random_graph(8, 0.25)
self.assertIn("partition", G.node[0])
|
from tornado import ioloop, web
import os
class WebServer:
"""Creates the webserver used as the control interface for dummyRDM
"""
def __init__(self):
pass
class MainHandler(web.RequestHandler):
def get(self):
self.render("templates/index.html")
def make_app(self):
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static")
}
return web.Application([
(r"/", WebServer.MainHandler),
], **settings)
def run(self):
app = WebServer.make_app(self)
app.listen(8080)
print("Webserver running on port 8080")
ioloop.IOLoop.current().start()
|
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapit_test_framework import gapit_test, require, require_equal
from gapit_test_framework import require_not_equal, little_endian_bytes_to_int
from gapit_test_framework import GapitTest, get_read_offset_function
import gapit_test_framework
from struct_offsets import VulkanStruct, UINT32_T, SIZE_T, POINTER
from struct_offsets import HANDLE, FLOAT, CHAR, ARRAY, DEVICE_SIZE
from vulkan_constants import *
@gapit_test("vkCmdBindVertexBuffers_test")
class SingleBuffer(GapitTest):
def expect(self):
architecture = self.architecture
cmd_bind_vertex_buffers = require(
self.next_call_of("vkCmdBindVertexBuffers"))
require_not_equal(0, cmd_bind_vertex_buffers.int_commandBuffer)
require_equal(0, cmd_bind_vertex_buffers.int_firstBinding)
require_equal(1, cmd_bind_vertex_buffers.int_bindingCount)
require_not_equal(0, cmd_bind_vertex_buffers.hex_pBuffers)
require_not_equal(0, cmd_bind_vertex_buffers.hex_pOffsets)
sent_buffer = little_endian_bytes_to_int(
require(
cmd_bind_vertex_buffers.get_read_data(
cmd_bind_vertex_buffers.hex_pBuffers,
NON_DISPATCHABLE_HANDLE_SIZE)))
require_not_equal(sent_buffer, 0)
sent_offset = little_endian_bytes_to_int(
require(
cmd_bind_vertex_buffers.get_read_data(
cmd_bind_vertex_buffers.hex_pOffsets,
8)))
require_equal(0, sent_offset)
BUFFER_COPY = [
("srcOffset", DEVICE_SIZE),
("dstOffset", DEVICE_SIZE),
("size", DEVICE_SIZE)
]
@gapit_test("vkCmdBindVertexBuffers_test")
class CopyBuffer(GapitTest):
def expect(self):
architecture = self.architecture
cmd_copy_buffer = require(
self.next_call_of("vkCmdCopyBuffer"))
require_not_equal(0, cmd_copy_buffer.int_commandBuffer)
require_not_equal(0, cmd_copy_buffer.int_srcBuffer)
require_not_equal(0, cmd_copy_buffer.int_dstBuffer)
require_equal(1, cmd_copy_buffer.int_regionCount)
require_not_equal(0, cmd_copy_buffer.hex_pRegions)
copy = VulkanStruct(
architecture, BUFFER_COPY,
get_read_offset_function(cmd_copy_buffer,
cmd_copy_buffer.hex_pRegions))
require_equal(0, copy.srcOffset)
require_equal(0, copy.dstOffset)
require_equal(1024, copy.size)
|
# Copyright 2012 Google Inc. All Rights Reserved.
__author__ = 'benvanik@google.com (Ben Vanik)'
class DebuggerProtocol(object):
"""An abstract debugger protocol.
Protocols implement asynchronous command channels for controlling remote
debuggers. The debugging interface has been normalized (somewhat) and the
exact transmission mechanism (TCP/pipes/etc) can be implemented however it is
required.
"""
def __init__(self, uri, *args, **kwargs):
"""Initializes a debugger protocol.
Args:
uri: Target instance URI.
"""
self._uri = uri
self._detach_callback = None
self._break_callback = None
self._exception_callback = None
def uri(self):
return self._uri
def set_detach_callback(self, value):
self._detach_callback = value
def set_break_callback(self, value):
self._break_callback = value
def set_exception_callback(self, value):
self._exception_callback = value
def is_attached(self):
"""
Returns:
True if the protocol is attached.
"""
raise NotImplementedError()
def attach(self, callback=None):
"""Begins attaching the protocol to the instance.
Args:
callback: A function to call when the attaching completes.
Receives a boolean indicating success.
"""
raise NotImplementedError()
def detach(self, terminate, reason=None):
"""Detaches the protocol from the instance.
Args:
terminate: True to terminate the target.
reason: Reason for detaching, or None if user initiated.
"""
raise NotImplementedError()
def suspend(self, callback):
"""Suspends the target instance.
Note that this will not break in the target, but merely suspend execution.
Args:
callback: A function to call when the suspend completes.
"""
raise NotImplementedError()
def resume(self, callback):
"""Resumes the target instance.
If the target was at a breakpoint this will continue from there.
Args:
callback: A function to call when the resume completes.
"""
raise NotImplementedError()
def step(self, action, count, callback):
"""Steps the target instance.
Only valid when suspended at a breakpoint.
Args:
action: 'next', 'in', 'out'.
count: Number of steps to make.
callback: A function to call when the step completes.
"""
raise NotImplementedError()
def change_source(self, uri, new_source, callback):
"""Modifies source code at runtime.
Here be black magic, and it may not work.
Args:
uri: Source URI.
new_source: New source code contents.
callback: A function to call when the change completes.
"""
raise NotImplementedError()
def add_breakpoint(self, breakpoint, callback):
"""Adds a breakpoint to the debugger.
Args:
breakpoint: Breakpoint to add.
callback: A function to call when the add completes. Inspect for the
protocol ID used in change/remove requests.
"""
raise NotImplementedError()
def change_breakpoint(self, protocol_id, breakpoint, callback):
"""Updates a breakpoint that has changed.
Args:
protocol_id: Breakpoint protocol ID.
breakpoint: Breakpoint that changed.
callback: A function to call when the change completes.
"""
raise NotImplementedError()
def ignore_breakpoint(self, protocol_id, ignore_count, callback):
"""Ignores a breakpoint for a given number of hits.
Args:
protocol_id: Breakpoint protocol ID.
ignore_count: Number of hits to ignore.
callback: A function to call when the ignore acknowledges.
"""
raise NotImplementedError()
def remove_breakpoint(self, protocol_id, callback):
"""Removes a breakpoint from the debugger.
Args:
protocol_id: Breakpoint protocol ID.
callback: A function to call when the remove completes.
"""
raise NotImplementedError()
def query_values(self, handle_ids, callback):
"""Queries the values of a list of handles.
This is only valid while the remote debugger is paused after an event,
such as a break or exception.
Args:
handle_ids: A list of handle IDs.
callback: A function to call when the query completes.
"""
raise NotImplementedError()
def query_frame_scopes(self, frame, callback):
"""Queries the scopes for the given frame.
This is only valid while the remote debugger is paused after an event,
such as a break or exception.
Args:
frame: Frame to query.
callback: A function to call when the query completes.
"""
raise NotImplementedError()
class ProtocolResponse(object):
"""A response to a request made to a protocol.
"""
def __init__(self, protocol, is_running, is_success, error_message, body,
*args, **kwargs):
"""Initializes a protocol response.
Args:
protocol: The protocol that this response is from.
is_running: True if the VM is running.
is_success: True if the requests was successful.
error_message: An error message, if not successful.
body: Raw body. Implementation-specific.
"""
self._protocol = protocol
self._is_running = is_running
self._is_success = is_success
self._error_message = error_message
self._body = body
def is_running(self):
return self._is_running
def is_success(self):
return self._is_success
def error_message(self):
return self._error_message
def body(self):
return self._body
class SnapshotResponse(ProtocolResponse):
"""A response containing callstack information.
"""
def __init__(self, protocol, is_running, is_success, error_message, body,
handle_set, frames, *args, **kwargs):
"""Initializes a snapshot response.
Args:
protocol: The protocol that this response is from.
is_running: True if the VM is running.
is_success: True if the requests was successful.
error_message: An error message, if not successful.
body: Raw body. Implementation-specific.
handle_set: Handle value set.
frames: A list of Frames.
"""
super(SnapshotResponse, self).__init__(
protocol, is_running, is_success, error_message, body, *args, **kwargs)
self._handle_set = handle_set
self._frames = frames
def handle_set(self):
return self._handle_set
def frames(self):
return self._frames
class QueryValuesResponse(ProtocolResponse):
"""A response to value requests.
"""
def __init__(self, protocol, is_running, is_success, error_message, body,
handle_set, *args, **kwargs):
"""Initializes a value query response.
Args:
protocol: The protocol that this response is from.
is_running: True if the VM is running.
is_success: True if the requests was successful.
error_message: An error message, if not successful.
body: Raw body. Implementation-specific.
handle_set: Handle value set.
"""
super(QueryValuesResponse, self).__init__(
protocol, is_running, is_success, error_message, body, *args, **kwargs)
self._handle_set = handle_set
def handle_set(self):
return self._handle_set
class QueryFrameScopesResponse(ProtocolResponse):
"""A response to frame scope value requests.
"""
def __init__(self, protocol, is_running, is_success, error_message, body,
handle_set, scopes, *args, **kwargs):
"""Initializes a frame scope query response.
Args:
protocol: The protocol that this response is from.
is_running: True if the VM is running.
is_success: True if the requests was successful.
error_message: An error message, if not successful.
body: Raw body. Implementation-specific.
handle_set: Handle value set.
scopes: A list of Scopes.
"""
super(QueryFrameScopesResponse, self).__init__(
protocol, is_running, is_success, error_message, body, *args, **kwargs)
self._handle_set = handle_set
self._scopes = scopes
def handle_set(self):
return self._handle_set
def scopes(self):
return self._scopes
class ChangeSourceResponse(ProtocolResponse):
"""A response to change source requests.
"""
def __init__(self, protocol, is_running, is_success, error_message, body,
step_in_required, *args, **kwargs):
"""Initializes a change source response.
Args:
protocol: The protocol that this response is from.
is_running: True if the VM is running.
is_success: True if the requests was successful.
error_message: An error message, if not successful.
body: Raw body. Implementation-specific.
step_in_required: A step-in is required.
"""
super(ChangeSourceResponse, self).__init__(
protocol, is_running, is_success, error_message, body, *args, **kwargs)
self._step_in_required = step_in_required
# change_log: [
# {
# 'break_points_update': [] ??
# },
# {
# 'function_patched': ''
# },
# {
# 'position_patched': [...]
# }
# ],
# result: {
# 'stack_modified': bool,
# 'updated': True,
# 'change_tree': {
# 'status': 'source changed',
# 'name': '',
# 'positions': {
# 'start_position': 0,
# 'end_position': 481
# },
# 'new_positions': {
# 'start_position': 0,
# 'end_position': 482
# },
# 'new_children': [],
# 'children': [ ... ]
# },
# 'textual_diff': {
# 'old_len': 481,
# 'new_len': 482,
# 'chunks': [325, 325, 326]
# },
# 'stack_update_needs_step_in': bool
# }
def step_in_required(self):
return self._step_in_required
class AddBreakpointResponse(ProtocolResponse):
"""A response to add breakpoint requests.
"""
def __init__(self, protocol, is_running, is_success, error_message, body,
protocol_id, *args, **kwargs):
"""Initializes an add breakpoint response.
Args:
protocol: The protocol that this response is from.
is_running: True if the VM is running.
is_success: True if the requests was successful.
error_message: An error message, if not successful.
body: Raw body. Implementation-specific.
protocol_id: Breakpoint protocol ID.
"""
super(AddBreakpointResponse, self).__init__(
protocol, is_running, is_success, error_message, body, *args, **kwargs)
self._protocol_id = protocol_id
# TODO(benvanik): actual location line/col
def protocol_id(self):
return self._protocol_id
class ProtocolEvent(object):
"""An event fired by the protocol.
"""
def __init__(self, protocol, source, *args, **kwargs):
"""Initializes a protocol event.
Args:
protocol: The protocol that fired this event.
source: A tuple of (url, line, column).
"""
self._protocol = protocol
self._source = source
def source_url(self):
return self._source[0]
def source_line(self):
return self._source[1]
def source_column(self):
return self._source[2]
class BreakEvent(ProtocolEvent):
"""An event indicating that a break occurred.
"""
def __init__(self, protocol, source, breakpoint_ids, *args, **kwargs):
"""Initializes a break protocol event.
Args:
protocol: The protocol that fired this event.
source: A tuple of (url, line, column).
breakpoint_ids: A list of breakpoints that were hit, if any.
"""
super(BreakEvent, self).__init__(protocol, source, *args, **kwargs)
self._breakpoint_ids = breakpoint_ids
def breakpoint_ids(self):
return self._breakpoint_ids
class ExceptionEvent(ProtocolEvent):
"""An event indicating that an exception occurred.
"""
def __init__(self, protocol, source, is_uncaught, exception, *args, **kwargs):
"""Initializes an exception protocol event.
Args:
protocol: The protocol that fired this event.
source: A tuple of (url, line, column).
is_uncaught: True if the exception was uncaught.
exception: Exception object.
"""
super(ExceptionEvent, self).__init__(protocol, source, *args, **kwargs)
self._is_uncaught = is_uncaught
self._exception = exception
def is_uncaught(self):
return self._is_uncaught
def exception(self):
return self._exception
class Frame(object):
def __init__(self, ordinal, location, is_constructor, is_at_return,
function_ref, this_ref, argument_vars, local_vars):
self._ordinal = ordinal
self._location = location
self._is_constructor = is_constructor
self._is_at_return = is_at_return
self._function_ref = function_ref
self._this_ref = this_ref
self._arguments = argument_vars
self._locals = local_vars
def ordinal(self):
return self._ordinal
def location(self):
return self._location
def is_constructor(self):
return self._is_constructor
def is_at_return(self):
return self._is_at_return
def function_ref(self):
return self._function_ref
def this_ref(self):
return self._this_ref
def argument_refs(self):
return self._arguments
def local_refs(self):
return self._locals
def formatted_call(self, handle_set):
function = handle_set.get_value(self._function_ref)
s = '%s(' % (function.inferred_name() or function.name() or '<anonymous>')
for n in range(len(self._arguments)):
var = self._arguments[n]
var_name = var[0]
var_value = handle_set.get_value(var[1])
if var_name:
s += '%s=' % (var_name)
s += str(var_value)
if n < len(self._arguments) - 1:
s += ', '
s += ')'
return s
class ScopeType:
GLOBAL = 0
LOCAL = 1
WITH = 2
CLOSURE = 3
CATCH = 4
class Scope(object):
def __init__(self, ordinal, scope_type, object_ref, *args, **kwargs):
self._ordinal = ordinal
self._scope_type = scope_type
self._object_ref = object_ref
def ordinal(self):
return self._ordinal
def scope_type(self):
return self._scope_type
def scope_name(self):
if self._scope_type == ScopeType.GLOBAL:
return 'Global'
elif self._scope_type == ScopeType.LOCAL:
return 'Local'
elif self._scope_type == ScopeType.WITH:
return 'With'
elif self._scope_type == ScopeType.CLOSURE:
return 'Closure'
elif self._scope_type == ScopeType.CATCH:
return 'Catch'
else:
return 'Unknown'
def object_ref(self):
return self._object_ref
class HandleSet(object):
def __init__(self, *args, **kwargs):
self._values = {}
def merge(self, other):
for value in other._values:
self.add_value(value)
def add_value(self, value):
self._values[value.handle_id()] = value
def has_value(self, handle_id):
return self._values.get(handle_id, None) != None
def get_value(self, handle_id):
return self._values.get(handle_id, None)
def dump(self):
print 'handle set contains %s values:' % (len(self._values.keys()))
for (key, value) in self._values.items():
print ' %s: %s' % (key, value)
def print_value(self, key, handle_id):
dumper = _RecursiveDumper(self)
dumper.dump(key, self.get_value(handle_id))
return dumper.output()
class _RecursiveDumper(object):
def __init__(self, handle_set):
self._handle_set = handle_set
self._stack = []
self._output = ''
def output(self):
return self._output
def dump(self, key, value):
if key:
indent = ''.join([' ' for n in range(len(self._stack))])
self._output += '%s%s: %s\n' % (indent, key, value)
if value in self._stack:
return
if isinstance(value, JSObject):
self._stack.append(value)
if isinstance(value, JSFunction):
self._dump_function(value)
else:
self._dump_object(value)
self._stack.pop()
def _dump_function(self, value):
pass
def _dump_object(self, value):
for p in value.properties():
self.dump(p.name(), self._handle_set.get_value(p.ref()))
class JSHandle(object):
def __init__(self, handle_id, handle_type, *args, **kwargs):
self._handle_id = handle_id
self._handle_type = handle_type
def handle_id(self):
return self._handle_id
def handle_type(self):
return self._handle_type
class JSUndefined(JSHandle):
def __init__(self, handle_id, *args, **kwargs):
super(JSUndefined, self).__init__(handle_id, 'undefined', *args, **kwargs)
def __repr__(self):
return 'undefined'
class JSNull(JSHandle):
def __init__(self, handle_id, *args, **kwargs):
super(JSNull, self).__init__(handle_id, 'null', *args, **kwargs)
def __repr__(self):
return 'null'
class JSBoolean(JSHandle):
def __init__(self, handle_id, value, *args, **kwargs):
super(JSBoolean, self).__init__(handle_id, 'boolean', *args, **kwargs)
self._value = value
def value(self):
return self._value
def __repr__(self):
return 'true' if self._value else 'false'
class JSNumber(JSHandle):
def __init__(self, handle_id, value, *args, **kwargs):
super(JSNumber, self).__init__(handle_id, 'number', *args, **kwargs)
self._value = value
def value(self):
return self._value
def __repr__(self):
return str(self._value)
class JSString(JSHandle):
def __init__(self, handle_id, value, *args, **kwargs):
super(JSString, self).__init__(handle_id, 'string', *args, **kwargs)
self._value = value
def value(self):
return self._value
def __repr__(self):
return '"%s"' % (self._value)
class JSScript(JSHandle):
def __init__(self, handle_id, uri, *args, **kwargs):
super(JSScript, self).__init__(handle_id, 'script', *args, **kwargs)
self._uri = uri
def uri(self):
return self._uri
def __repr__(self):
return self._uri
class JSObject(JSHandle):
def __init__(self, handle_id, class_name, constructor_ref, prototype_ref,
properties, *args, **kwargs):
super(JSObject, self).__init__(handle_id, 'object', *args, **kwargs)
self._class_name = class_name
self._constructor_ref = constructor_ref
self._prototype_ref = prototype_ref
self._properties = properties
def class_name(self):
return self._class_name
def constructor_ref(self):
return self._constructor_ref
def prototype_ref(self):
return self._prototype_ref
def properties(self):
return self._properties
def __repr__(self):
return '<object %s>' % (self.handle_id())
class JSProperty(object):
def __init__(self, name, ref, property_type, attributes, *args, **kwargs):
self._name = name
self._ref = ref
self._property_type = property_type
self._attributes = attributes
def name(self):
return self._name
def ref(self):
return self._ref
def property_type(self):
return self._property_type
def attributes(self):
return self._attributes
def __repr__(self):
return '%s = <%s>' % (self._name, self._ref)
class JSFunction(JSObject):
def __init__(self, handle_id, class_name, constructor_ref, prototype_ref,
properties, name, inferred_name, location, *args, **kwargs):
super(JSFunction, self).__init__(handle_id, class_name, constructor_ref,
prototype_ref, properties, *args, **kwargs)
self._name = name
self._inferred_name = inferred_name
self._location = location
def name(self):
return self._name
def inferred_name(self):
return self._inferred_name
def location(self):
return self._location
def __repr__(self):
name = self._inferred_name or self._name
if self._location:
return '%s (%s@%s:%s)' % (name, self._location[0], self._location[1],
self._location[2])
else:
return name
|
"""Shared definitions"""
PROGRAMNAME = 'metaindexmanager'
ANY_SCOPE = 'any'
ALL_SCOPE = 'all'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.